From b637ddeb2825f762f0a84342603e4ff969ac7955 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 21 Apr 2024 15:04:00 +0200 Subject: [PATCH 001/443] Refactor and reformat userConfig --- internal/repository/userConfig.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/repository/userConfig.go b/internal/repository/userConfig.go index fb8c3f5..4e5c32d 100644 --- a/internal/repository/userConfig.go +++ b/internal/repository/userConfig.go @@ -24,9 +24,9 @@ var ( type UserCfgRepo struct { DB *sqlx.DB Lookup *sqlx.Stmt - lock sync.RWMutex uiDefaults map[string]interface{} cache *lrucache.Cache + lock sync.RWMutex } func GetUserCfgRepo() *UserCfgRepo { @@ -112,8 +112,8 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{}, // configuration. func (uCfg *UserCfgRepo) UpdateConfig( key, value string, - user *schema.User) error { - + user *schema.User, +) error { if user == nil { var val interface{} if err := json.Unmarshal([]byte(value), &val); err != nil { From cbaeffde2c4013a3a9ff031fd17df74b272c621d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 22 Apr 2024 11:29:31 +0200 Subject: [PATCH 002/443] fix: improve speed of hasNextPage query for infinite scroll --- internal/graph/schema.resolvers.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index a33e041..5f55139 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -243,14 +243,21 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag if !config.Keys.UiDefaults["job_list_usePaging"].(bool) { hasNextPage := false - page.Page += 1 + // page.Page += 1 : Simple, but expensive + // Example Page 4 @ 10 IpP : Does item 41 exist? + // Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists. + nextPage := &model.PageRequest{ + ItemsPerPage: 1, + Page: ((page.Page * page.ItemsPerPage) + 1), + } - nextJobs, err := r.Repo.QueryJobs(ctx, filter, page, order) + nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order) if err != nil { log.Warn("Error while querying next jobs") return nil, err } - if len(nextJobs) > 0 { + + if len(nextJobs) == 1 { hasNextPage = true } From a22340196f388b4403accc8639b0ffa44ac148c1 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 22 Apr 2024 12:14:40 +0200 Subject: [PATCH 003/443] Fix: Improve jobName query by parsing DB field as JSON - No DB mirgration required - SQLite internal EXTRACT function used --- internal/repository/query.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/internal/repository/query.go b/internal/repository/query.go index eec51a2..94aa742 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -135,7 +135,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildStringCondition("job.project", filter.Project, query) } if filter.JobName != nil { - query = buildStringCondition("job.meta_data", filter.JobName, query) + query = buildMetaJsonCondition("jobName", filter.JobName, query) } if filter.Cluster != nil { query = buildStringCondition("job.cluster", filter.Cluster, query) @@ -235,6 +235,25 @@ func buildStringCondition(field string, cond *model.StringInput, query sq.Select return query } +func buildMetaJsonCondition(jsonField string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder { + if cond.Eq != nil { + return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") = ?", *cond.Eq) + } + if cond.Neq != nil { + return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") != ?", *cond.Neq) + } + if cond.StartsWith != nil { + return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") LIKE ?", fmt.Sprint(*cond.StartsWith, "%")) + } + if cond.EndsWith != nil { + return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") LIKE ?", fmt.Sprint("%", *cond.EndsWith)) + } + if cond.Contains != nil { + return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") LIKE ?", fmt.Sprint("%", *cond.Contains, "%")) + } + return query +} + var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") From f80123c85dbedeccb8031f9451d323b6d80f68f4 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 24 Apr 2024 13:47:29 +0200 Subject: [PATCH 004/443] Fix: Add missing nullsafe for admin user table --- web/frontend/src/Job.root.svelte | 2 +- web/frontend/src/config/admin/ShowUsersRow.svelte | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 758cef9..b399f4d 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -361,7 +361,7 @@
- Missing Metrics/Reseources + Missing Metrics/Resources {#if missingMetrics.length > 0} diff --git a/web/frontend/src/config/admin/ShowUsersRow.svelte b/web/frontend/src/config/admin/ShowUsersRow.svelte index 9845241..782ea56 100644 --- a/web/frontend/src/config/admin/ShowUsersRow.svelte +++ b/web/frontend/src/config/admin/ShowUsersRow.svelte @@ -20,7 +20,7 @@ {user.name} {user.projects} {user.email} -{user.roles.join(", ")} +{user?.roles ? user.roles.join(", ") : "No Roles"} {#if !jwt} + + From b66750339ddf5cdb6b7c1d5b9413576cd6341205 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 25 Apr 2024 16:59:04 +0200 Subject: [PATCH 006/443] add default value, remove unused argument --- web/frontend/src/Job.root.svelte | 1 - web/frontend/src/MetricSelection.svelte | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index b399f4d..8d090ac 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -341,7 +341,6 @@ scopes={item.data.map((x) => x.scope)} {width} isShared={$initq.data.job.exclusive != 1} - resources={$initq.data.job.resources} /> {:else} Date: Thu, 25 Apr 2024 16:59:27 +0200 Subject: [PATCH 007/443] fix: fix metricPlot y zoom reset --- web/frontend/src/plots/MetricPlot.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index bd44675..ba3c294 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -461,7 +461,7 @@ }, scales: { x: { time: false }, - y: maxY ? { range: [0, maxY * 1.1] } : {}, + y: maxY ? { min: 0, max: (maxY * 1.1) } : {auto: true}, // Add some space to upper render limit }, legend: { // Display legend until max 12 Y-dataseries From ef51e69ffb53e696eada560aaa0ab0c857b2c0ff Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 26 Apr 2024 11:11:55 +0200 Subject: [PATCH 008/443] feat: Add roofline color scale for time information --- web/frontend/src/plots/Roofline.svelte | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/web/frontend/src/plots/Roofline.svelte b/web/frontend/src/plots/Roofline.svelte index 1e47f6f..11d1d25 100644 --- a/web/frontend/src/plots/Roofline.svelte +++ b/web/frontend/src/plots/Roofline.svelte @@ -298,6 +298,24 @@ // Reset grid lineWidth u.ctx.lineWidth = 0.15; } + if (renderTime) { + // The Color Scale For Time Information + const posX = u.valToPos(0.1, "x", true) + const posXLimit = u.valToPos(100, "x", true) + const posY = u.valToPos(15000.0, "y", true) + u.ctx.fillStyle = 'black' + u.ctx.fillText('Start', posX, posY) + const start = posX + 10 + for (let x = start; x < posXLimit; x += 10) { + let c = (x - start) / (posXLimit - start) + u.ctx.fillStyle = getRGB(c) + u.ctx.beginPath() + u.ctx.arc(x, posY, 3, 0, Math.PI * 2, false) + u.ctx.fill() + } + u.ctx.fillStyle = 'black' + u.ctx.fillText('End', posXLimit + 23, posY) + } }, ], }, From 72557fd0bf28e0c5b456c7030542b66e55f0da2d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 2 May 2024 16:32:01 +0200 Subject: [PATCH 009/443] feat: add statistics series render to job view metric plots --- web/frontend/src/Job.root.svelte | 3 --- web/frontend/src/Metric.svelte | 37 ++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 8d090ac..00f64e2 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -306,9 +306,6 @@ {/if} - diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index 6022ffb..a3bedaa 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -33,8 +33,17 @@ error = null; let selectedScope = minScope(scopes); + let statsPattern = /(.*)-stats$/ + let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) + let selectedScopeIndex + $: availableScopes = scopes; - $: selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + $: patternMatches = statsPattern.exec(selectedScope) + $: if (!patternMatches) { + selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + } else { + selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); + } $: data = rawData[selectedScopeIndex]; $: series = data?.series.filter( (series) => selectedHost == null || series.hostname == selectedHost, @@ -62,6 +71,7 @@ if (jm.scope != "node") { scopes = [...scopes, jm.scope]; rawData.push(jm.metric); + statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) selectedScope = jm.scope; selectedScopeIndex = scopes.findIndex((s) => s == jm.scope); dispatch("more-loaded", jm); @@ -79,15 +89,18 @@ : "") + (metricConfig?.unit?.base ? metricConfig.unit.base : "")}) {#if job.resources.length > 1} - {#each job.resources as { hostname }} @@ -100,7 +113,7 @@ {:else if error != null} {error.message} - {:else if series != null} + {:else if series != null && !patternMatches} + {:else if statsSeries[selectedScopeIndex] != null && patternMatches} + {/if} {/key} From 597bccc080ad17478a14cb72768655573a80fa62 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 6 May 2024 13:15:15 +0200 Subject: [PATCH 010/443] fix: add SQL JSON validity check to meta_data query --- internal/repository/query.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/repository/query.go b/internal/repository/query.go index 94aa742..5ca98fb 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -236,6 +236,9 @@ func buildStringCondition(field string, cond *model.StringInput, query sq.Select } func buildMetaJsonCondition(jsonField string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder { + // Verify and Search Only in Valid Jsons + query = query.Where("JSON_VALID(meta_data)") + // add "AND" Sql query Block for field match if cond.Eq != nil { return query.Where("JSON_EXTRACT(meta_data, \"$."+jsonField+"\") = ?", *cond.Eq) } From 684cb5a37688b84552b118a000f8d18710f2c3e9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 8 May 2024 16:17:42 +0200 Subject: [PATCH 011/443] feat: change statistics render of metric plot to min/max/median - #263 --- api/schema.graphqls | 7 ++- internal/graph/generated/generated.go | 72 ++++++++++++++++++++++-- internal/metricdata/metricdata.go | 2 +- internal/util/statistics.go | 41 +++++++++++++- pkg/schema/metrics.go | 46 +++++++++++---- web/frontend/src/JobFootprint.svelte | 2 +- web/frontend/src/Metric.svelte | 4 +- web/frontend/src/joblist/Row.svelte | 2 +- web/frontend/src/plots/MetricPlot.svelte | 18 ++++-- 9 files changed, 163 insertions(+), 31 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 5c5bc2c..c7ade86 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -150,9 +150,10 @@ type MetricStatistics { } type StatsSeries { - mean: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! } type MetricFootprints { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 29a2a24..296bbfe 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -251,9 +251,10 @@ type ComplexityRoot struct { } StatsSeries struct { - Max func(childComplexity int) int - Mean func(childComplexity int) int - Min func(childComplexity int) int + Max func(childComplexity int) int + Mean func(childComplexity int) int + Median func(childComplexity int) int + Min func(childComplexity int) int } SubCluster struct { @@ -1344,6 +1345,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.StatsSeries.Mean(childComplexity), true + case "StatsSeries.median": + if e.complexity.StatsSeries.Median == nil { + break + } + + return e.complexity.StatsSeries.Median(childComplexity), true + case "StatsSeries.min": if e.complexity.StatsSeries.Min == nil { break @@ -1867,9 +1875,10 @@ type MetricStatistics { } type StatsSeries { - mean: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! } type MetricFootprints { @@ -4854,6 +4863,8 @@ func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(ctx context. switch field.Name { case "mean": return ec.fieldContext_StatsSeries_mean(ctx, field) + case "median": + return ec.fieldContext_StatsSeries_median(ctx, field) case "min": return ec.fieldContext_StatsSeries_min(ctx, field) case "max": @@ -8814,6 +8825,50 @@ func (ec *executionContext) fieldContext_StatsSeries_mean(ctx context.Context, f return fc, nil } +func (ec *executionContext) _StatsSeries_median(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_StatsSeries_median(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Median, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_StatsSeries_median(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "StatsSeries", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type NullableFloat does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { fc, err := ec.fieldContext_StatsSeries_min(ctx, field) if err != nil { @@ -14427,6 +14482,11 @@ func (ec *executionContext) _StatsSeries(ctx context.Context, sel ast.SelectionS if out.Values[i] == graphql.Null { out.Invalids++ } + case "median": + out.Values[i] = ec._StatsSeries_median(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "min": out.Values[i] = ec._StatsSeries_min(ctx, field, obj) if out.Values[i] == graphql.Null { diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index a93b1ac..e6ca6b8 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -263,7 +263,7 @@ func cacheKey( // For /monitoring/job/ and some other places, flops_any and mem_bw need // to be available at the scope 'node'. If a job has a lot of nodes, -// statisticsSeries should be available so that a min/mean/max Graph can be +// statisticsSeries should be available so that a min/median/max Graph can be // used instead of a lot of single lines. func prepareJobData( job *schema.Job, diff --git a/internal/util/statistics.go b/internal/util/statistics.go index 384de58..879c700 100644 --- a/internal/util/statistics.go +++ b/internal/util/statistics.go @@ -4,7 +4,13 @@ // license that can be found in the LICENSE file. package util -import "golang.org/x/exp/constraints" +import ( + "fmt" + "math" + "sort" + + "golang.org/x/exp/constraints" +) func Min[T constraints.Ordered](a, b T) T { if a < b { @@ -19,3 +25,36 @@ func Max[T constraints.Ordered](a, b T) T { } return b } + +func sortedCopy(input []float64) []float64 { + sorted := make([]float64, len(input)) + copy(sorted, input) + sort.Float64s(sorted) + return sorted +} + +func Mean(input []float64) (float64, error) { + if len(input) == 0 { + return math.NaN(), fmt.Errorf("input array is empty: %#v", input) + } + sum := 0.0 + for _, n := range input { + sum += n + } + return sum / float64(len(input)), nil +} + +func Median(input []float64) (median float64, err error) { + c := sortedCopy(input) + // Even numbers: add the two middle numbers, divide by two (use mean function) + // Odd numbers: Use the middle number + l := len(c) + if l == 0 { + return math.NaN(), fmt.Errorf("input array is empty: %#v", input) + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = c[l/2] + } + return median, nil +} diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index e340747..08636f1 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -10,6 +10,8 @@ import ( "math" "sort" "unsafe" + + "github.com/ClusterCockpit/cc-backend/internal/util" ) type JobData map[string]map[MetricScope]*JobMetric @@ -36,6 +38,7 @@ type MetricStatistics struct { type StatsSeries struct { Mean []Float `json:"mean"` + Median []Float `json:"median"` Min []Float `json:"min"` Max []Float `json:"max"` Percentiles map[int][]Float `json:"percentiles,omitempty"` @@ -120,7 +123,7 @@ func (jd *JobData) Size() int { for _, metric := range scopes { if metric.StatisticsSeries != nil { n += len(metric.StatisticsSeries.Max) - n += len(metric.StatisticsSeries.Mean) + n += len(metric.StatisticsSeries.Median) n += len(metric.StatisticsSeries.Min) } @@ -149,53 +152,74 @@ func (jm *JobMetric) AddStatisticsSeries() { } } - min, mean, max := make([]Float, n), make([]Float, n), make([]Float, n) + // mean := make([]Float, n) + min, median, max := make([]Float, n), make([]Float, n), make([]Float, n) i := 0 for ; i < m; i++ { - smin, ssum, smax := math.MaxFloat32, 0.0, -math.MaxFloat32 + seriesCount := len(jm.Series) + // ssum := 0.0 + smin, smed, smax := math.MaxFloat32, make([]float64, seriesCount), -math.MaxFloat32 notnan := 0 - for j := 0; j < len(jm.Series); j++ { + for j := 0; j < seriesCount; j++ { x := float64(jm.Series[j].Data[i]) if math.IsNaN(x) { continue } notnan += 1 - ssum += x + // ssum += x + smed[j] = x smin = math.Min(smin, x) smax = math.Max(smax, x) } if notnan < 3 { min[i] = NaN - mean[i] = NaN + // mean[i] = NaN + median[i] = NaN max[i] = NaN } else { min[i] = Float(smin) - mean[i] = Float(ssum / float64(notnan)) + // mean[i] = Float(ssum / float64(notnan)) max[i] = Float(smax) + + medianRaw, err := util.Median(smed) + if err != nil { + median[i] = NaN + } else { + median[i] = Float(medianRaw) + } } } for ; i < n; i++ { min[i] = NaN - mean[i] = NaN + // mean[i] = NaN + median[i] = NaN max[i] = NaN } if smooth { - for i := 2; i < len(mean)-2; i++ { + for i := 2; i < len(median)-2; i++ { if min[i].IsNaN() { continue } min[i] = (min[i-2] + min[i-1] + min[i] + min[i+1] + min[i+2]) / 5 max[i] = (max[i-2] + max[i-1] + max[i] + max[i+1] + max[i+2]) / 5 - mean[i] = (mean[i-2] + mean[i-1] + mean[i] + mean[i+1] + mean[i+2]) / 5 + // mean[i] = (mean[i-2] + mean[i-1] + mean[i] + mean[i+1] + mean[i+2]) / 5 + // Reduce Median further + smoothRaw := []float64{float64(median[i-2]), float64(median[i-1]), float64(median[i]), float64(median[i+1]), float64(median[i+2])} + smoothMedian, err := util.Median(smoothRaw) + if err != nil { + median[i] = NaN + } else { + median[i] = Float(smoothMedian) + } } } - jm.StatisticsSeries = &StatsSeries{Mean: mean, Min: min, Max: max} + jm.StatisticsSeries = &StatsSeries{Median: median, Min: min, Max: max} // Mean: mean } func (jd *JobData) AddNodeScope(metric string) bool { diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte index 8ed8089..4f85b6c 100644 --- a/web/frontend/src/JobFootprint.svelte +++ b/web/frontend/src/JobFootprint.svelte @@ -101,7 +101,7 @@ // Calculate Avg from jobMetrics const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); if (jm?.metric?.statisticsSeries) { - mv = round(mean(jm.metric.statisticsSeries.mean), 2); + mv = round(mean(jm.metric.statisticsSeries.median), 2); } else if (jm?.metric?.series?.length > 1) { const avgs = jm.metric.series.map((jms) => jms.statistics.avg); mv = round(mean(avgs), 2); diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index a3bedaa..279df13 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -33,7 +33,7 @@ error = null; let selectedScope = minScope(scopes); - let statsPattern = /(.*)-stats$/ + let statsPattern = /(.*)-stat$/ let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) let selectedScopeIndex @@ -92,7 +92,7 @@ {#each availableScopes as scope, index} {#if statsSeries[index]} - + {/if} {/each} {#if availableScopes.length == 1 && metricConfig?.scope != "node"} diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 98d3190..dd92ec4 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -50,7 +50,7 @@ timestep statisticsSeries { min - mean + median max } series { diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index ba3c294..db6f4fd 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -216,7 +216,7 @@ // conditional hide series color markers: if ( - useStatsSeries === true || // Min/Max/Avg Self-Explanatory + useStatsSeries === true || // Min/Max/Median Self-Explanatory dataSize === 1 || // Only one Y-Dataseries dataSize > 6 ) { @@ -296,7 +296,7 @@ } const longestSeries = useStatsSeries - ? statisticsSeries.mean.length + ? statisticsSeries.median.length : series.reduce((n, series) => Math.max(n, series.data.length), 0); const maxX = longestSeries * timestep; let maxY = null; @@ -346,13 +346,15 @@ if (useStatsSeries) { plotData.push(statisticsSeries.min); plotData.push(statisticsSeries.max); - plotData.push(statisticsSeries.mean); + plotData.push(statisticsSeries.median); + // plotData.push(statisticsSeries.mean); if (forNode === true) { // timestamp 0 with null value for reversed time axis if (plotData[1].length != 0) plotData[1].push(null); if (plotData[2].length != 0) plotData[2].push(null); if (plotData[3].length != 0) plotData[3].push(null); + // if (plotData[4].length != 0) plotData[4].push(null); } plotSeries.push({ @@ -368,11 +370,17 @@ stroke: "green", }); plotSeries.push({ - label: "mean", + label: "median", scale: "y", width: lineWidth, stroke: "black", }); + // plotSeries.push({ + // label: "mean", + // scale: "y", + // width: lineWidth, + // stroke: "blue", + // }); plotBands = [ { series: [2, 3], fill: "rgba(0,255,0,0.1)" }, @@ -422,7 +430,7 @@ // Draw plot type label: let textl = `${scope}${plotSeries.length > 2 ? "s" : ""}${ useStatsSeries - ? ": min/avg/max" + ? ": min/median/max" : metricConfig != null && scope != metricConfig.scope ? ` (${metricConfig.aggregation})` : "" From 54f7980162c1f3321e223ebe88477df6529cce28 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 16 May 2024 11:18:57 +0200 Subject: [PATCH 012/443] fix: Add required key to init config file --- cmd/cc-backend/main.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 9d084f2..ed471ac 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -76,6 +76,9 @@ const configString = ` "kind": "file", "path": "./var/job-archive" }, + "jwts": { + "max-age": "2000h" + }, "clusters": [ { "name": "name", @@ -115,15 +118,15 @@ func initEnv() { os.Exit(0) } - if err := os.WriteFile("config.json", []byte(configString), 0666); err != nil { + if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil { log.Fatalf("Writing config.json failed: %s", err.Error()) } - if err := os.WriteFile(".env", []byte(envString), 0666); err != nil { + if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil { log.Fatalf("Writing .env failed: %s", err.Error()) } - if err := os.Mkdir("var", 0777); err != nil { + if err := os.Mkdir("var", 0o777); err != nil { log.Fatalf("Mkdir var failed: %s", err.Error()) } From b48d1b8ad6904d97e80f9f4753ba0a22240a4225 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 22 May 2024 14:21:54 +0200 Subject: [PATCH 013/443] fix: correct status view columns on mobile displays --- web/frontend/src/Status.root.svelte | 40 +++++++++++++++-------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 4121ead..132cb2e 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -315,20 +315,11 @@ - - + +

Current utilization of cluster "{cluster}"

- - {#if $initq.fetching || $mainQuery.fetching} - - {:else if $initq.error} - {$initq.error.message} - {:else} - - {/if} - - + - + { @@ -347,6 +338,17 @@ />
+ + + {#if $initq.fetching || $mainQuery.fetching} + + {:else if $initq.error} + {$initq.error.message} + {:else} + + {/if} + + {#if $mainQuery.error} @@ -361,8 +363,8 @@ {#if $initq.data && $mainQuery.data} {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i} - - + + SubCluster "{subCluster.name}" @@ -433,7 +435,7 @@ - +
{#key $mainQuery.data.nodeMetrics} - +

@@ -580,7 +582,7 @@
- +
{#key $mainQuery.data.stats} @@ -610,7 +612,7 @@ {/key} - +
{#key $mainQuery.data.stats} From 061c9f0979d53a873a0acc9c07f405193b6b7997 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 22 May 2024 15:57:22 +0200 Subject: [PATCH 014/443] fix: deselected metrics were marked as missing on new jobview load --- web/frontend/src/Job.root.svelte | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 00f64e2..8cf8f87 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -165,10 +165,11 @@ .find((c) => c.name == job.cluster) .metricConfig.map((mc) => mc.name); - // Metric not found in JobMetrics && Metric not explicitly disabled: Was expected, but is Missing + // Metric not found in JobMetrics && Metric not explicitly disabled in config or deselected: Was expected, but is Missing missingMetrics = metricNames.filter( (metric) => !metrics.some((jm) => jm.name == metric) && + selectedMetrics.includes(metric) && !checkMetricDisabled( metric, $initq.data.job.cluster, From 575753038b981ce093e6852cc8762ecb78319be6 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 22 May 2024 18:22:35 +0200 Subject: [PATCH 015/443] feat: add jobname filter to joblist textfilter - allows combination of filters now including jobname - rename component --- web/frontend/src/Jobs.root.svelte | 4 +- ...UserOrProject.svelte => TextFilter.svelte} | 74 +++++++++---------- 2 files changed, 39 insertions(+), 39 deletions(-) rename web/frontend/src/filters/{UserOrProject.svelte => TextFilter.svelte} (52%) diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 204a4e3..fb49b39 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -14,7 +14,7 @@ import Refresher from "./joblist/Refresher.svelte"; import Sorting from "./joblist/SortSelection.svelte"; import MetricSelection from "./MetricSelection.svelte"; - import UserOrProject from "./filters/UserOrProject.svelte"; + import TextFilter from "./filters/TextFilter.svelte"; const { query: initq } = init(); @@ -86,7 +86,7 @@ - filterComponent.update(detail)} diff --git a/web/frontend/src/filters/UserOrProject.svelte b/web/frontend/src/filters/TextFilter.svelte similarity index 52% rename from web/frontend/src/filters/UserOrProject.svelte rename to web/frontend/src/filters/TextFilter.svelte index 983192c..80f454a 100644 --- a/web/frontend/src/filters/UserOrProject.svelte +++ b/web/frontend/src/filters/TextFilter.svelte @@ -4,21 +4,25 @@ const dispatch = createEventDispatcher(); - export let user = ""; - export let project = ""; export let authlevel; export let roles; - let mode = "user", - term = ""; + let mode = "user"; + let term = ""; + let user = ""; + let project = ""; + let jobName = ""; const throttle = 500; function modeChanged() { if (mode == "user") { project = term; term = user; - } else { + } else if (mode == "project") { user = term; term = project; + } else { + jobName = term; + term = jobName; } termChanged(0); } @@ -28,7 +32,8 @@ function termChanged(sleep = throttle) { if (authlevel >= roles.manager) { if (mode == "user") user = term; - else project = term; + else if (mode == "project") project = term; + else jobName = term; if (timeoutId != null) clearTimeout(timeoutId); @@ -36,49 +41,44 @@ dispatch("update", { user, project, + jobName }); }, sleep); } else { - project = term; + if (mode == "project") project = term; + else jobName = term; + if (timeoutId != null) clearTimeout(timeoutId); timeoutId = setTimeout(() => { dispatch("update", { project, + jobName }); }, sleep); } } -{#if authlevel >= roles.manager} - - + {#if authlevel >= roles.manager} - - - termChanged()} - on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} - placeholder={mode == "user" ? "filter username..." : "filter project..."} - /> - -{:else} - - - termChanged()} - on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} - placeholder="filter project..." - /> - -{/if} + {/if} + + + + termChanged()} + on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} + placeholder={`filter ${mode}...`} + /> + + From ba1658beac1094b4bb387d10fd4a85450ddc6574 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 22 May 2024 18:50:52 +0200 Subject: [PATCH 016/443] fix: correct selectable histogram placement in status view --- web/frontend/src/Status.root.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 132cb2e..48c3711 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -644,7 +644,7 @@
{#if metricsInHistograms} - + {#key $mainQuery.data.stats[0].histMetrics} Date: Thu, 23 May 2024 11:53:23 +0200 Subject: [PATCH 017/443] fix: fix jobname and arrayjobid timeouts by adding additional 30d filter - improve archive worker logs - add arrayjobid filter to url if used --- internal/repository/job.go | 6 +++--- internal/routerConfig/routes.go | 12 ++++++++++-- web/frontend/src/filters/Filters.svelte | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index ce5e416..b42598d 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -520,7 +520,7 @@ func (r *JobRepository) archivingWorker() { // not using meta data, called to load JobMeta into Cache? // will fail if job meta not in repository if _, err := r.FetchMetadata(job); err != nil { - log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error()) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) continue } @@ -529,14 +529,14 @@ func (r *JobRepository) archivingWorker() { // TODO: Maybe use context with cancel/timeout here jobMeta, err := metricdata.ArchiveJob(job, context.Background()) if err != nil { - log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error()) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) continue } // Update the jobs database entry one last time: if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil { - log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed at marking archived step: %s", job.ID, err.Error()) continue } log.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index fe374ac..c7a5a0c 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -302,11 +302,19 @@ func HandleSearchBar(rw http.ResponseWriter, r *http.Request, buildInfo web.Buil case "jobId": http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery case "jobName": - http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery + // Add Last 30 Days to migitate timeouts + untilTime := strconv.FormatInt(time.Now().Unix(), 10) + fromTime := strconv.FormatInt((time.Now().Unix() - int64(30*24*3600)), 10) + + http.Redirect(rw, r, "/monitoring/jobs/?startTime="+fromTime+"-"+untilTime+"&jobName="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery case "projectId": http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery case "arrayJobId": - http.Redirect(rw, r, "/monitoring/jobs/?arrayJobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery + // Add Last 30 Days to migitate timeouts + untilTime := strconv.FormatInt(time.Now().Unix(), 10) + fromTime := strconv.FormatInt((time.Now().Unix() - int64(30*24*3600)), 10) + + http.Redirect(rw, r, "/monitoring/jobs/?startTime="+fromTime+"-"+untilTime+"&arrayJobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery case "username": if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleManager}) { http.Redirect(rw, r, "/monitoring/users/?user="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 8e7a8ef..a27c799 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -193,6 +193,7 @@ opts.push(`userMatch=${filters.userMatch}`); if (filters.project) opts.push(`project=${filters.project}`); if (filters.jobName) opts.push(`jobName=${filters.jobName}`); + if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`); if (filters.projectMatch != "contains") opts.push(`projectMatch=${filters.projectMatch}`); From 8d1228c9e8b5fa96f29745250e51c13b335ef247 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 23 May 2024 15:43:09 +0200 Subject: [PATCH 018/443] feat: rework list searchbar, adds project-specific mode, add to user-joblist --- web/frontend/src/Jobs.root.svelte | 2 + web/frontend/src/User.root.svelte | 6 +++ web/frontend/src/filters/Filters.svelte | 2 +- web/frontend/src/filters/TextFilter.svelte | 46 +++++++++++++++------- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index fb49b39..f7c99ff 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -38,6 +38,7 @@ ? !!ccconfig[`plot_list_showFootprint:${filterPresets.cluster}`] : !!ccconfig.plot_list_showFootprint; let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null; + let presetProject = filterPresets?.project ? filterPresets.project : "" // The filterPresets are handled by the Filters component, // so we need to wait for it to be ready before we can start a query. @@ -87,6 +88,7 @@ filterComponent.update(detail)} diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index c60ea20..41969d9 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -12,6 +12,7 @@ } from "@sveltestrap/sveltestrap"; import { queryStore, gql, getContextClient } from "@urql/svelte"; import Filters from "./filters/Filters.svelte"; + import TextFilter from "./filters/TextFilter.svelte" import JobList from "./joblist/JobList.svelte"; import Sorting from "./joblist/SortSelection.svelte"; import Refresher from "./joblist/Refresher.svelte"; @@ -132,6 +133,11 @@ /> + filterComponent.update(detail)} + /> + + jobList.refresh()} /> diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index a27c799..7253ff7 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -194,7 +194,7 @@ if (filters.project) opts.push(`project=${filters.project}`); if (filters.jobName) opts.push(`jobName=${filters.jobName}`); if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`); - if (filters.projectMatch != "contains") + if (filters.project && filters.projectMatch != "contains") opts.push(`projectMatch=${filters.projectMatch}`); if (opts.length == 0 && window.location.search.length <= 1) return; diff --git a/web/frontend/src/filters/TextFilter.svelte b/web/frontend/src/filters/TextFilter.svelte index 80f454a..b010175 100644 --- a/web/frontend/src/filters/TextFilter.svelte +++ b/web/frontend/src/filters/TextFilter.svelte @@ -1,28 +1,29 @@ @@ -67,10 +76,12 @@ bind:value={mode} on:change={modeChanged} > - {#if authlevel >= roles.manager} + {#if !presetProject} + + {/if} + {#if roles && authlevel >= roles.manager} {/if} - termChanged()} on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} - placeholder={`filter ${mode}...`} + placeholder={presetProject ? `Filter ${mode} in ${presetProject} ...` : `Filter ${mode} ...`} /> + {#if presetProject} + + {/if} From 320c87a1dbc41a15db63dac68c9327f9725cba24 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 27 May 2024 11:11:25 +0200 Subject: [PATCH 019/443] fix: add additional 30d fitler to searchbar fallback handling --- internal/routerConfig/routes.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index c7a5a0c..1dd6dee 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -347,7 +347,11 @@ func HandleSearchBar(rw http.ResponseWriter, r *http.Request, buildInfo web.Buil } else if project != "" { http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(project), http.StatusFound) // projectId (equal) } else if jobname != "" { - http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(jobname), http.StatusFound) // JobName (contains) + // Add Last 30 Days to migitate timeouts + untilTime := strconv.FormatInt(time.Now().Unix(), 10) + fromTime := strconv.FormatInt((time.Now().Unix() - int64(30*24*3600)), 10) + + http.Redirect(rw, r, "/monitoring/jobs/?startTime="+fromTime+"-"+untilTime+"&jobName="+url.QueryEscape(jobname), http.StatusFound) // 30D Fitler + JobName (contains) } else { web.RenderTemplate(rw, "message.tmpl", &web.Page{Title: "Info", MsgType: "alert-info", Message: "Search without result", User: *user, Roles: availableRoles, Build: buildInfo}) } From a4397d54479ee44b56b80a93703e8bff2617dbca Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 27 May 2024 12:09:55 +0200 Subject: [PATCH 020/443] fix: add scramble to textfilter component --- web/frontend/src/filters/TextFilter.svelte | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/filters/TextFilter.svelte b/web/frontend/src/filters/TextFilter.svelte index b010175..db1f184 100644 --- a/web/frontend/src/filters/TextFilter.svelte +++ b/web/frontend/src/filters/TextFilter.svelte @@ -1,6 +1,7 @@ {#if isAdmin == true} @@ -24,7 +23,7 @@ - Plotting Options + User Options - + diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js index 276f648..2978c8c 100644 --- a/web/frontend/src/config.entrypoint.js +++ b/web/frontend/src/config.entrypoint.js @@ -4,7 +4,9 @@ import Config from './Config.root.svelte' new Config({ target: document.getElementById('svelte-app'), props: { - isAdmin: isAdmin + isAdmin: isAdmin, + isApi: isApi, + username: username }, context: new Map([ ['cc-config', clusterCockpitConfig] diff --git a/web/frontend/src/config/PlotSettings.svelte b/web/frontend/src/config/PlotSettings.svelte deleted file mode 100644 index 60a3d7a..0000000 --- a/web/frontend/src/config/PlotSettings.svelte +++ /dev/null @@ -1,552 +0,0 @@ - - - - - - -
- handleSettingSubmit("#line-width-form", "lw")} - > - - -
Line Width
- - {#if displayMessage && message.target == "lw"} -
- - Update: {message.msg} - -
- {/if} -
- -
- - -
- Width of the lines in the timeseries plots. -
-
- -
-
- - - -
- handleSettingSubmit("#plots-per-row-form", "ppr")} - > - - -
Plots per Row
- {#if displayMessage && message.target == "ppr"}
- Update: {message.msg} -
{/if} -
- -
- - -
- How many plots to show next to each other on pages such as - /monitoring/job/, /monitoring/system/... -
-
- -
-
- - - -
- handleSettingSubmit("#backgrounds-form", "bg")} - > - - -
Colored Backgrounds
- {#if displayMessage && message.target == "bg"}
- Update: {message.msg} -
{/if} -
- -
-
- {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
-
- {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
-
- -
-
- - - -
- handleSettingSubmit("#paging-form", "pag")} - > - - -
Paging Type
- {#if displayMessage && message.target == "pag"}
- Update: {message.msg} -
{/if} -
- -
-
- {#if config.job_list_usePaging} - - {:else} - - {/if} - -
-
- {#if config.job_list_usePaging} - - {:else} - - {/if} - -
-
- -
-
-
- - - - -
- - -
Color Scheme for Timeseries Plots
- {#if displayMessage && message.target == "cs"}
- Update: {message.msg} -
{/if} -
- - - - {#each Object.entries(colorschemes) as [name, rgbrow]} - - - - - - {/each} - -
{name} - {#if rgbrow.join(",") == config.plot_general_colorscheme} - - handleSettingSubmit("#colorscheme-form", "cs")} - /> - {:else} - - handleSettingSubmit("#colorscheme-form", "cs")} - /> - {/if} - - {#each rgbrow as rgb} - - {/each} -
-
-
-
- - diff --git a/web/frontend/src/config/UserSettings.svelte b/web/frontend/src/config/UserSettings.svelte new file mode 100644 index 0000000..cd1d9a3 --- /dev/null +++ b/web/frontend/src/config/UserSettings.svelte @@ -0,0 +1,47 @@ + + + handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> diff --git a/web/frontend/src/config/admin/ShowUsersRow.svelte b/web/frontend/src/config/admin/ShowUsersRow.svelte index 2971365..9ad9666 100644 --- a/web/frontend/src/config/admin/ShowUsersRow.svelte +++ b/web/frontend/src/config/admin/ShowUsersRow.svelte @@ -1,18 +1,17 @@ diff --git a/web/frontend/src/config/user/PlotColorScheme.svelte b/web/frontend/src/config/user/PlotColorScheme.svelte new file mode 100644 index 0000000..ad73791 --- /dev/null +++ b/web/frontend/src/config/user/PlotColorScheme.svelte @@ -0,0 +1,329 @@ + + + + + +
+ + +
Color Scheme for Timeseries Plots
+ {#if displayMessage && message.target == "cs"}
+ Update: {message.msg} +
{/if} +
+ + + + {#each Object.entries(colorschemes) as [name, rgbrow]} + + + + + + {/each} + +
{name} + {#if rgbrow.join(",") == config.plot_general_colorscheme} + + updateSetting("#colorscheme-form", "cs")} + /> + {:else} + + updateSetting("#colorscheme-form", "cs")} + /> + {/if} + + {#each rgbrow as rgb} + + {/each} +
+
+
+
+ + \ No newline at end of file diff --git a/web/frontend/src/config/user/PlotRenderOptions.svelte b/web/frontend/src/config/user/PlotRenderOptions.svelte new file mode 100644 index 0000000..eb96eda --- /dev/null +++ b/web/frontend/src/config/user/PlotRenderOptions.svelte @@ -0,0 +1,166 @@ + + + + + + +
+ updateSetting("#line-width-form", "lw")} + > + + +
Line Width
+ + {#if displayMessage && message.target == "lw"} +
+ + Update: {message.msg} + +
+ {/if} +
+ +
+ + +
+ Width of the lines in the timeseries plots. +
+
+ +
+
+ + + +
+ updateSetting("#plots-per-row-form", "ppr")} + > + + +
Plots per Row
+ {#if displayMessage && message.target == "ppr"}
+ Update: {message.msg} +
{/if} +
+ +
+ + +
+ How many plots to show next to each other on pages such as + /monitoring/job/, /monitoring/system/... +
+
+ +
+
+ + + +
+ updateSetting("#backgrounds-form", "bg")} + > + + +
Colored Backgrounds
+ {#if displayMessage && message.target == "bg"}
+ Update: {message.msg} +
{/if} +
+ +
+
+ {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
+
+ {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
+
+ +
+
+
\ No newline at end of file diff --git a/web/frontend/src/config/user/UserOptions.svelte b/web/frontend/src/config/user/UserOptions.svelte new file mode 100644 index 0000000..fe63118 --- /dev/null +++ b/web/frontend/src/config/user/UserOptions.svelte @@ -0,0 +1,131 @@ + + + + + + +
+ updateSetting("#paging-form", "pag")} + > + + +
Paging Type
+ {#if displayMessage && message.target == "pag"}
+ Update: {message.msg} +
{/if} +
+ +
+
+ {#if config.job_list_usePaging} + + {:else} + + {/if} + +
+
+ {#if config.job_list_usePaging} + + {:else} + + {/if} + +
+
+ +
+
+ + + {#if isApi} + + + + + Generate JWT + {#if jwt} + +

+ Your token is displayed on the right. Press this button to copy it to the clipboard. +

+ {:else} + +

+ Generate a JSON Web Token for use with the ClusterCockpit REST-API endpoints. +

+ {/if} +
+
+ + + + + + + Display JWT + + + + + {/if} +
\ No newline at end of file diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 5346208..48bca6b 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -433,6 +433,18 @@ export function transformPerNodeDataForRoofline(nodes) { return data } +export async function fetchJwt(username) { + const raw = await fetch(`/userconfig/jwt/?username=${username}`); + + if (!raw.ok) { + const message = `An error has occured: ${response.status}`; + throw new Error(message); + } + + const res = await raw.text(); + return res; +} + // https://stackoverflow.com/questions/45309447/calculating-median-javascript // function median(numbers) { // const sorted = Array.from(numbers).sort((a, b) => a - b); diff --git a/web/templates/config.tmpl b/web/templates/config.tmpl index f72cd93..9f3f3be 100644 --- a/web/templates/config.tmpl +++ b/web/templates/config.tmpl @@ -8,6 +8,8 @@ {{define "javascript"}} From 3afe40083d6bde9d4ba7db2f094b3df9c559cb22 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 5 Jul 2024 11:48:06 +0200 Subject: [PATCH 041/443] rename api userconfig to frontend, return json on api auth error --- cmd/cc-backend/main.go | 56 ++++++++----------- internal/api/rest.go | 5 +- internal/auth/auth.go | 50 ++++++++--------- internal/repository/job.go | 24 +++++++- .../src/config/user/PlotColorScheme.svelte | 2 +- .../src/config/user/PlotRenderOptions.svelte | 6 +- .../src/config/user/UserOptions.svelte | 2 +- web/frontend/src/utils.js | 4 +- 8 files changed, 80 insertions(+), 69 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index fdddb99..b0faa13 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -374,7 +374,7 @@ func main() { securedapi := r.PathPrefix("/api").Subrouter() userapi := r.PathPrefix("/userapi").Subrouter() configapi := r.PathPrefix("/config").Subrouter() - userconfigapi := r.PathPrefix("/userconfig").Subrouter() + frontendapi := r.PathPrefix("/frontend").Subrouter() if !config.Keys.DisableAuthentication { r.Handle("/login", authentication.Login( @@ -447,15 +447,13 @@ func main() { // On success; next, - // On failure: + // On failure: JSON Response func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), }) }) }) @@ -465,15 +463,13 @@ func main() { // On success; next, - // On failure: + // On failure: JSON Response func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), }) }) }) @@ -483,33 +479,29 @@ func main() { // On success; next, - // On failure: + // On failure: JSON Response func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), }) }) }) - userconfigapi.Use(func(next http.Handler) http.Handler { - return authentication.AuthUserConfigApi( + frontendapi.Use(func(next http.Handler) http.Handler { + return authentication.AuthFrontendApi( // On success; next, - // On failure: + // On failure: JSON Response func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), }) }) }) @@ -532,7 +524,7 @@ func main() { api.MountApiRoutes(securedapi) api.MountUserApiRoutes(userapi) api.MountConfigApiRoutes(configapi) - api.MountUserConfigApiRoutes(userconfigapi) + api.MountFrontendApiRoutes(frontendapi) if config.Keys.EmbedStaticFiles { if i, err := os.Stat("./var/img"); err == nil { diff --git a/internal/api/rest.go b/internal/api/rest.go index 17a3183..b447a21 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -106,12 +106,13 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { } } -func (api *RestApi) MountUserConfigApiRoutes(r *mux.Router) { +func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { r.StrictSlash(true) if api.Authentication != nil { - r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet) // Role:Admin Check in + r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet) r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost) + r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) // Fetched in Job.svelte: Needs All-User-Access-Session-Auth } } diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 7e6f30e..50f4121 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -219,27 +219,25 @@ func (auth *Authentication) Auth( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) + log.Infof("auth -> authentication failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnauthorized) return } - if user == nil { user, err = auth.AuthViaSession(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) + log.Infof("auth -> authentication failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnauthorized) return } } - if user != nil { ctx := context.WithValue(r.Context(), repository.ContextUserKey, user) onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Debug("authentication failed") + log.Info("auth -> authentication failed") onfailure(rw, r, errors.New("unauthorized (please login first)")) }) } @@ -251,8 +249,8 @@ func (auth *Authentication) AuthApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusUnauthorized) + log.Infof("auth api -> authentication failed: %s", err.Error()) + onfailure(rw, r, err) return } if user != nil { @@ -270,12 +268,12 @@ func (auth *Authentication) AuthApi( return } default: - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (missing role)")) + log.Info("auth api -> authentication failed: missing role") + onfailure(rw, r, errors.New("unauthorized")) } } - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (no auth)")) + log.Info("auth api -> authentication failed: no auth") + onfailure(rw, r, errors.New("unauthorized")) }) } @@ -286,8 +284,8 @@ func (auth *Authentication) AuthUserApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusUnauthorized) + log.Infof("auth user api -> authentication failed: %s", err.Error()) + onfailure(rw, r, err) return } if user != nil { @@ -305,12 +303,12 @@ func (auth *Authentication) AuthUserApi( return } default: - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (missing role)")) + log.Info("auth user api -> authentication failed: missing role") + onfailure(rw, r, errors.New("unauthorized")) } } - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (no auth)")) + log.Info("auth user api -> authentication failed: no auth") + onfailure(rw, r, errors.New("unauthorized")) }) } @@ -321,8 +319,8 @@ func (auth *Authentication) AuthConfigApi( return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.AuthViaSession(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusUnauthorized) + log.Infof("auth config api -> authentication failed: %s", err.Error()) + onfailure(rw, r, err) return } if user != nil && user.HasRole(schema.RoleAdmin) { @@ -330,20 +328,20 @@ func (auth *Authentication) AuthConfigApi( onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (no auth)")) + log.Info("auth config api -> authentication failed: no auth") + onfailure(rw, r, errors.New("unauthorized")) }) } -func (auth *Authentication) AuthUserConfigApi( +func (auth *Authentication) AuthFrontendApi( onsuccess http.Handler, onfailure func(rw http.ResponseWriter, r *http.Request, authErr error), ) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { user, err := auth.AuthViaSession(rw, r) if err != nil { - log.Infof("authentication failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusUnauthorized) + log.Infof("auth frontend api -> authentication failed: %s", err.Error()) + onfailure(rw, r, err) return } if user != nil { @@ -351,8 +349,8 @@ func (auth *Authentication) AuthUserConfigApi( onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Debug("authentication failed") - onfailure(rw, r, errors.New("unauthorized (no auth)")) + log.Info("auth frontend api -> authentication failed: no auth") + onfailure(rw, r, errors.New("unauthorized")) }) } diff --git a/internal/repository/job.go b/internal/repository/job.go index ddd93d3..20496b2 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -305,16 +305,36 @@ func (r *JobRepository) FindByIdDirect(jobId int64) (*schema.Job, error) { return scanJob(q.RunWith(r.stmtCache).QueryRow()) } +// FindByJobId executes a SQL query to find a specific batch job. +// The job is queried using the slurm id and the clustername. +// It returns a pointer to a schema.Job data structure and an error variable. +// To check if no job was found test err == sql.ErrNoRows +func (r *JobRepository) FindByJobId(ctx context.Context, jobId int64, startTime int64, cluster string) (*schema.Job, error) { + q := sq.Select(jobColumns...). + From("job"). + Where("job.job_id = ?", jobId). + Where("job.cluster = ?", cluster). + Where("job.start_time = ?", startTime) + + q, qerr := SecurityCheck(ctx, q) + if qerr != nil { + return nil, qerr + } + + return scanJob(q.RunWith(r.stmtCache).QueryRow()) +} + // IsJobOwner executes a SQL query to find a specific batch job. // The job is queried using the slurm id,a username and the cluster. // It returns a bool. // If job was found, user is owner: test err != sql.ErrNoRows -func (r *JobRepository) IsJobOwner(jobId int64, user string, cluster string) bool { +func (r *JobRepository) IsJobOwner(jobId int64, startTime int64, user string, cluster string) bool { q := sq.Select("id"). From("job"). Where("job.job_id = ?", jobId). Where("job.user = ?", user). - Where("job.cluster = ?", cluster) + Where("job.cluster = ?", cluster). + Where("job.start_time = ?", startTime) _, err := scanJob(q.RunWith(r.stmtCache).QueryRow()) return err != sql.ErrNoRows diff --git a/web/frontend/src/config/user/PlotColorScheme.svelte b/web/frontend/src/config/user/PlotColorScheme.svelte index ad73791..a36f47c 100644 --- a/web/frontend/src/config/user/PlotColorScheme.svelte +++ b/web/frontend/src/config/user/PlotColorScheme.svelte @@ -262,7 +262,7 @@
diff --git a/web/frontend/src/config/user/PlotRenderOptions.svelte b/web/frontend/src/config/user/PlotRenderOptions.svelte index eb96eda..a237ed3 100644 --- a/web/frontend/src/config/user/PlotRenderOptions.svelte +++ b/web/frontend/src/config/user/PlotRenderOptions.svelte @@ -30,7 +30,7 @@ updateSetting("#line-width-form", "lw")} @@ -76,7 +76,7 @@ updateSetting("#plots-per-row-form", "ppr")} @@ -122,7 +122,7 @@ updateSetting("#backgrounds-form", "bg")} diff --git a/web/frontend/src/config/user/UserOptions.svelte b/web/frontend/src/config/user/UserOptions.svelte index fe63118..d589414 100644 --- a/web/frontend/src/config/user/UserOptions.svelte +++ b/web/frontend/src/config/user/UserOptions.svelte @@ -51,7 +51,7 @@ updateSetting("#paging-form", "pag")} diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 48bca6b..bb43094 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -239,7 +239,7 @@ export async function fetchMetrics(job, metrics, scopes) { try { let res = await fetch( - `/api/jobs/metrics/${job.id}${query.length > 0 ? "?" : ""}${query.join( + `/frontend/jobs/metrics/${job.id}${query.length > 0 ? "?" : ""}${query.join( "&" )}` ); @@ -434,7 +434,7 @@ export function transformPerNodeDataForRoofline(nodes) { } export async function fetchJwt(username) { - const raw = await fetch(`/userconfig/jwt/?username=${username}`); + const raw = await fetch(`/frontend/jwt/?username=${username}`); if (!raw.ok) { const message = `An error has occured: ${response.status}`; From 63fb9239953666eeac4d8dd1f5cadd6bf796b621 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 5 Jul 2024 13:16:21 +0200 Subject: [PATCH 042/443] fix: fix api test router init --- internal/api/api_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/api/api_test.go b/internal/api/api_test.go index e91357e..725563c 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -197,6 +197,8 @@ func TestRestApi(t *testing.T) { } r := mux.NewRouter() + r.PathPrefix("/api").Subrouter() + r.StrictSlash(true) restapi.MountApiRoutes(r) const startJobBody string = `{ From be9df7649f63f7b7805a107f681130ecbffd3e6c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 5 Jul 2024 15:25:24 +0200 Subject: [PATCH 043/443] fix: setup user in api test config --- internal/api/api_test.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 725563c..e8f477e 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -227,11 +227,22 @@ func TestRestApi(t *testing.T) { }` var dbid int64 + const contextUserKey repository.ContextKey = "user" + contextUserValue := &schema.User{ + Username: "testuser", + Projects: make([]string, 0), + Roles: []string{"user"}, + AuthType: 0, + AuthSource: 2, + } + if ok := t.Run("StartJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody))) + req := httptest.NewRequest(http.MethodPost, "/jobs/start_job/", bytes.NewBuffer([]byte(startJobBody))) recorder := httptest.NewRecorder() - r.ServeHTTP(recorder, req) + ctx := context.WithValue(req.Context(), contextUserKey, contextUserValue) + + r.ServeHTTP(recorder, req.WithContext(ctx)) response := recorder.Result() if response.StatusCode != http.StatusCreated { t.Fatal(response.Status, recorder.Body.String()) @@ -242,12 +253,12 @@ func TestRestApi(t *testing.T) { t.Fatal(err) } - job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(res.DBID))) + job, err := restapi.Resolver.Query().Job(ctx, strconv.Itoa(int(res.DBID))) if err != nil { t.Fatal(err) } - job.Tags, err = restapi.Resolver.Job().Tags(context.Background(), job) + job.Tags, err = restapi.Resolver.Job().Tags(ctx, job) if err != nil { t.Fatal(err) } From 0a604336c47c599e23f9697e524dd3532b3c6eca Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 5 Jul 2024 15:42:08 +0200 Subject: [PATCH 044/443] Fix other apitest subtests --- internal/api/api_test.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/internal/api/api_test.go b/internal/api/api_test.go index e8f477e..0354a0f 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -302,17 +302,19 @@ func TestRestApi(t *testing.T) { var stoppedJob *schema.Job if ok := t.Run("StopJob", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) + req := httptest.NewRequest(http.MethodPost, "/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBody))) recorder := httptest.NewRecorder() - r.ServeHTTP(recorder, req) + ctx := context.WithValue(req.Context(), contextUserKey, contextUserValue) + + r.ServeHTTP(recorder, req.WithContext(ctx)) response := recorder.Result() if response.StatusCode != http.StatusOK { t.Fatal(response.Status, recorder.Body.String()) } restapi.JobRepository.WaitForArchiving() - job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(dbid))) + job, err := restapi.Resolver.Query().Job(ctx, strconv.Itoa(int(dbid))) if err != nil { t.Fatal(err) } @@ -354,10 +356,12 @@ func TestRestApi(t *testing.T) { // Starting a job with the same jobId and cluster should only be allowed if the startTime is far appart! body := strings.Replace(startJobBody, `"startTime": 123456789`, `"startTime": 123456790`, -1) - req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(body))) + req := httptest.NewRequest(http.MethodPost, "/jobs/start_job/", bytes.NewBuffer([]byte(body))) recorder := httptest.NewRecorder() - r.ServeHTTP(recorder, req) + ctx := context.WithValue(req.Context(), contextUserKey, contextUserValue) + + r.ServeHTTP(recorder, req.WithContext(ctx)) response := recorder.Result() if response.StatusCode != http.StatusUnprocessableEntity { t.Fatal(response.Status, recorder.Body.String()) @@ -384,10 +388,12 @@ func TestRestApi(t *testing.T) { }` ok := t.Run("StartJobFailed", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/start_job/", bytes.NewBuffer([]byte(startJobBodyFailed))) + req := httptest.NewRequest(http.MethodPost, "/jobs/start_job/", bytes.NewBuffer([]byte(startJobBodyFailed))) recorder := httptest.NewRecorder() - r.ServeHTTP(recorder, req) + ctx := context.WithValue(req.Context(), contextUserKey, contextUserValue) + + r.ServeHTTP(recorder, req.WithContext(ctx)) response := recorder.Result() if response.StatusCode != http.StatusCreated { t.Fatal(response.Status, recorder.Body.String()) @@ -406,10 +412,12 @@ func TestRestApi(t *testing.T) { }` ok = t.Run("StopJobFailed", func(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBodyFailed))) + req := httptest.NewRequest(http.MethodPost, "/jobs/stop_job/", bytes.NewBuffer([]byte(stopJobBodyFailed))) recorder := httptest.NewRecorder() - r.ServeHTTP(recorder, req) + ctx := context.WithValue(req.Context(), contextUserKey, contextUserValue) + + r.ServeHTTP(recorder, req.WithContext(ctx)) response := recorder.Result() if response.StatusCode != http.StatusOK { t.Fatal(response.Status, recorder.Body.String()) From c6ede6758989abd7717bc97588fd0b662ceaad9e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 5 Jul 2024 16:16:01 +0200 Subject: [PATCH 045/443] Add energy footprint --- api/schema.graphqls | 3 +- .../testdata/archive/alex/cluster.json | 3234 ++++++++++++++--- .../testdata/archive/fritz/cluster.json | 6 +- pkg/schema/job.go | 52 +- 4 files changed, 2794 insertions(+), 501 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 154bc35..bb9ae58 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -40,7 +40,6 @@ type JobLink { type Cluster { name: String! partitions: [String!]! # Slurm partitions - metricConfig: [MetricConfig!]! subClusters: [SubCluster!]! # Hardware partitions/subclusters } @@ -56,6 +55,8 @@ type SubCluster { flopRateSimd: MetricValue! memoryBandwidth: MetricValue! topology: Topology! + metricConfig: [MetricConfig!]! + footprint: [String!]! } type MetricValue { diff --git a/pkg/archive/testdata/archive/alex/cluster.json b/pkg/archive/testdata/archive/alex/cluster.json index 89af72d..fe791f4 100644 --- a/pkg/archive/testdata/archive/alex/cluster.json +++ b/pkg/archive/testdata/archive/alex/cluster.json @@ -1,484 +1,2772 @@ { - "name": "alex", - "metricConfig": [ - { - "name": "cpu_load", - "unit": { - "base": "" - }, - "scope": "node", - "aggregation": "avg", - "timestep": 60, - "peak": 128, - "normal": 128, - "caution": 10, - "alert": 5 + "name": "alex", + "metricConfig": [ + { + "name": "cpu_load", + "unit": { + "base": "" + }, + "scope": "node", + "aggregation": "avg", + "footprint": true, + "timestep": 60, + "peak": 128, + "normal": 128, + "caution": 10, + "alert": 5 + }, + { + "name": "cpu_user", + "unit": { + "base": "" + }, + "scope": "hwthread", + "aggregation": "avg", + "timestep": 60, + "peak": 100, + "normal": 50, + "caution": 20, + "alert": 10 + }, + { + "name": "mem_used", + "unit": { + "base": "B", + "prefix": "G" + }, + "scope": "node", + "aggregation": "sum", + "footprint": true, + "timestep": 60, + "peak": 512, + "normal": 128, + "caution": 200, + "alert": 240 + }, + { + "name": "flops_any", + "unit": { + "base": "Flops/s", + "prefix": "G" + }, + "scope": "hwthread", + "aggregation": "sum", + "footprint": true, + "timestep": 60, + "peak": 9216, + "normal": 1000, + "caution": 200, + "alert": 50 + }, + { + "name": "mem_bw", + "unit": { + "base": "B/s", + "prefix": "G" + }, + "scope": "socket", + "aggregation": "sum", + "footprint": true, + "timestep": 60, + "peak": 350, + "normal": 100, + "caution": 50, + "alert": 10 + }, + { + "name": "clock", + "unit": { + "base": "Hz", + "prefix": "M" + }, + "scope": "hwthread", + "aggregation": "avg", + "timestep": 60, + "peak": 3000, + "normal": 2400, + "caution": 1800, + "alert": 1200 + }, + { + "name": "core_power", + "unit": { + "base": "W" + }, + "scope": "hwthread", + "aggregation": "sum", + "energy": true, + "timestep": 60, + "peak": 500, + "normal": 250, + "caution": 100, + "alert": 50 + }, + { + "name": "acc_utilization", + "unit": { + "base": "" + }, + "scope": "accelerator", + "aggregation": "avg", + "footprint": true, + "timestep": 60, + "peak": 100, + "normal": 80, + "caution": 50, + "alert": 20 + }, + { + "name": "acc_mem_used", + "unit": { + "base": "B", + "prefix": "G" + }, + "scope": "accelerator", + "aggregation": "sum", + "timestep": 60, + "peak": 40, + "normal": 20, + "caution": 10, + "alert": 5 + }, + { + "name": "acc_power", + "unit": { + "base": "W" + }, + "scope": "accelerator", + "aggregation": "sum", + "energy": true, + "timestep": 60, + "peak": 400, + "normal": 200, + "caution": 50, + "alert": 20 + }, + { + "name": "nv_mem_util", + "unit": { + "base": "" + }, + "scope": "accelerator", + "aggregation": "avg", + "timestep": 60, + "peak": 100, + "normal": 80, + "caution": 20, + "alert": 10 + }, + { + "name": "nv_temp", + "unit": { + "base": "°C" + }, + "scope": "accelerator", + "aggregation": "avg", + "timestep": 60, + "peak": 40, + "normal": 20, + "caution": 5, + "alert": 2 + }, + { + "name": "nv_sm_clock", + "unit": { + "base": "Hz", + "prefix": "M" + }, + "scope": "accelerator", + "aggregation": "avg", + "timestep": 60, + "peak": 1400, + "normal": 1200, + "caution": 100, + "alert": 50 + }, + { + "name": "cpu_power", + "unit": { + "base": "W" + }, + "scope": "socket", + "aggregation": "sum", + "energy": true, + "timestep": 60, + "peak": 500, + "normal": 250, + "caution": 100, + "alert": 50 + }, + { + "name": "ipc", + "unit": { + "base": "IPC" + }, + "scope": "hwthread", + "aggregation": "avg", + "timestep": 60, + "peak": 4, + "normal": 2, + "caution": 1, + "alert": 0.5 + } + ], + "subClusters": [ + { + "name": "a40", + "nodes": "a[0121-0129],a[0221-0229],a[0321-0329],a[0421-0429],a[0521-0522],a[1621-1624],a[1721-1722]", + "processorType": "AMD Milan", + "socketsPerNode": 2, + "coresPerSocket": 64, + "threadsPerCore": 1, + "flopRateScalar": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "cpu_user", - "unit": { - "base": "" - }, - "scope": "hwthread", - "aggregation": "avg", - "timestep": 60, - "peak": 100, - "normal": 50, - "caution": 20, - "alert": 10 + "value": 432 + }, + "flopRateSimd": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "mem_used", - "unit": { - "base": "B", - "prefix": "G" - }, - "scope": "node", - "aggregation": "sum", - "timestep": 60, - "peak": 512, - "normal": 128, - "caution": 200, - "alert": 240 + "value": 9216 + }, + "memoryBandwidth": { + "unit": { + "base": "B/s", + "prefix": "G" }, - { - "name": "flops_any", - "unit": { - "base": "Flops/s", - "prefix": "G" - }, - "scope": "hwthread", - "aggregation": "sum", - "timestep": 60, - "peak": 9216, - "normal": 1000, - "caution": 200, - "alert": 50 + "value": 400 + }, + "topology": { + "node": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ], + "socket": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63 + ], + [ + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "memoryDomain": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "core": [ + [ + 0 + ], + [ + 1 + ], + [ + 2 + ], + [ + 3 + ], + [ + 4 + ], + [ + 5 + ], + [ + 6 + ], + [ + 7 + ], + [ + 8 + ], + [ + 9 + ], + [ + 10 + ], + [ + 11 + ], + [ + 12 + ], + [ + 13 + ], + [ + 14 + ], + [ + 15 + ], + [ + 16 + ], + [ + 17 + ], + [ + 18 + ], + [ + 19 + ], + [ + 20 + ], + [ + 21 + ], + [ + 22 + ], + [ + 23 + ], + [ + 24 + ], + [ + 25 + ], + [ + 26 + ], + [ + 27 + ], + [ + 28 + ], + [ + 29 + ], + [ + 30 + ], + [ + 31 + ], + [ + 32 + ], + [ + 33 + ], + [ + 34 + ], + [ + 35 + ], + [ + 36 + ], + [ + 37 + ], + [ + 38 + ], + [ + 39 + ], + [ + 40 + ], + [ + 41 + ], + [ + 42 + ], + [ + 43 + ], + [ + 44 + ], + [ + 45 + ], + [ + 46 + ], + [ + 47 + ], + [ + 48 + ], + [ + 49 + ], + [ + 50 + ], + [ + 51 + ], + [ + 52 + ], + [ + 53 + ], + [ + 54 + ], + [ + 55 + ], + [ + 56 + ], + [ + 57 + ], + [ + 58 + ], + [ + 59 + ], + [ + 60 + ], + [ + 61 + ], + [ + 62 + ], + [ + 63 + ], + [ + 64 + ], + [ + 65 + ], + [ + 66 + ], + [ + 67 + ], + [ + 68 + ], + [ + 69 + ], + [ + 70 + ], + [ + 71 + ], + [ + 73 + ], + [ + 74 + ], + [ + 75 + ], + [ + 76 + ], + [ + 77 + ], + [ + 78 + ], + [ + 79 + ], + [ + 80 + ], + [ + 81 + ], + [ + 82 + ], + [ + 83 + ], + [ + 84 + ], + [ + 85 + ], + [ + 86 + ], + [ + 87 + ], + [ + 88 + ], + [ + 89 + ], + [ + 90 + ], + [ + 91 + ], + [ + 92 + ], + [ + 93 + ], + [ + 94 + ], + [ + 95 + ], + [ + 96 + ], + [ + 97 + ], + [ + 98 + ], + [ + 99 + ], + [ + 100 + ], + [ + 101 + ], + [ + 102 + ], + [ + 103 + ], + [ + 104 + ], + [ + 105 + ], + [ + 106 + ], + [ + 107 + ], + [ + 108 + ], + [ + 109 + ], + [ + 110 + ], + [ + 111 + ], + [ + 112 + ], + [ + 113 + ], + [ + 114 + ], + [ + 115 + ], + [ + 116 + ], + [ + 117 + ], + [ + 118 + ], + [ + 119 + ], + [ + 120 + ], + [ + 121 + ], + [ + 122 + ], + [ + 123 + ], + [ + 124 + ], + [ + 125 + ], + [ + 126 + ], + [ + 127 + ] + ], + "accelerators": [ + { + "id": "00000000:01:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:25:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:41:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:61:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:81:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:A1:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:C1:00.0", + "type": "Nvidia GPU", + "model": "A40" + }, + { + "id": "00000000:E1:00.0", + "type": "Nvidia GPU", + "model": "A40" + } + ] + } + }, + { + "name": "a100", + "nodes": "a[0601-0605],a[0701-0705],a[0801-0805],a[0901-0905]", + "processorType": "AMD Milan", + "socketsPerNode": 2, + "coresPerSocket": 64, + "threadsPerCore": 1, + "flopRateScalar": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "mem_bw", - "unit": { - "base": "B/s", - "prefix": "G" - }, - "scope": "socket", - "aggregation": "sum", - "timestep": 60, - "peak": 350, - "normal": 100, - "caution": 50, - "alert": 10 + "value": 432 + }, + "flopRateSimd": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "clock", - "unit": { - "base": "Hz", - "prefix": "M" - }, - "scope": "hwthread", - "aggregation": "avg", - "timestep": 60, - "peak": 3000, - "normal": 2400, - "caution": 1800, - "alert": 1200 + "value": 9216 + }, + "memoryBandwidth": { + "unit": { + "base": "B/s", + "prefix": "G" }, - { - "name": "core_power", - "unit": { - "base": "W" - }, - "scope": "hwthread", - "aggregation": "sum", - "timestep": 60, - "peak": 500, - "normal": 250, - "caution": 100, - "alert": 50 + "value": 400 + }, + "topology": { + "node": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ], + "socket": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63 + ], + [ + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "memoryDomain": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "core": [ + [ + 0 + ], + [ + 1 + ], + [ + 2 + ], + [ + 3 + ], + [ + 4 + ], + [ + 5 + ], + [ + 6 + ], + [ + 7 + ], + [ + 8 + ], + [ + 9 + ], + [ + 10 + ], + [ + 11 + ], + [ + 12 + ], + [ + 13 + ], + [ + 14 + ], + [ + 15 + ], + [ + 16 + ], + [ + 17 + ], + [ + 18 + ], + [ + 19 + ], + [ + 20 + ], + [ + 21 + ], + [ + 22 + ], + [ + 23 + ], + [ + 24 + ], + [ + 25 + ], + [ + 26 + ], + [ + 27 + ], + [ + 28 + ], + [ + 29 + ], + [ + 30 + ], + [ + 31 + ], + [ + 32 + ], + [ + 33 + ], + [ + 34 + ], + [ + 35 + ], + [ + 36 + ], + [ + 37 + ], + [ + 38 + ], + [ + 39 + ], + [ + 40 + ], + [ + 41 + ], + [ + 42 + ], + [ + 43 + ], + [ + 44 + ], + [ + 45 + ], + [ + 46 + ], + [ + 47 + ], + [ + 48 + ], + [ + 49 + ], + [ + 50 + ], + [ + 51 + ], + [ + 52 + ], + [ + 53 + ], + [ + 54 + ], + [ + 55 + ], + [ + 56 + ], + [ + 57 + ], + [ + 58 + ], + [ + 59 + ], + [ + 60 + ], + [ + 61 + ], + [ + 62 + ], + [ + 63 + ], + [ + 64 + ], + [ + 65 + ], + [ + 66 + ], + [ + 67 + ], + [ + 68 + ], + [ + 69 + ], + [ + 70 + ], + [ + 71 + ], + [ + 73 + ], + [ + 74 + ], + [ + 75 + ], + [ + 76 + ], + [ + 77 + ], + [ + 78 + ], + [ + 79 + ], + [ + 80 + ], + [ + 81 + ], + [ + 82 + ], + [ + 83 + ], + [ + 84 + ], + [ + 85 + ], + [ + 86 + ], + [ + 87 + ], + [ + 88 + ], + [ + 89 + ], + [ + 90 + ], + [ + 91 + ], + [ + 92 + ], + [ + 93 + ], + [ + 94 + ], + [ + 95 + ], + [ + 96 + ], + [ + 97 + ], + [ + 98 + ], + [ + 99 + ], + [ + 100 + ], + [ + 101 + ], + [ + 102 + ], + [ + 103 + ], + [ + 104 + ], + [ + 105 + ], + [ + 106 + ], + [ + 107 + ], + [ + 108 + ], + [ + 109 + ], + [ + 110 + ], + [ + 111 + ], + [ + 112 + ], + [ + 113 + ], + [ + 114 + ], + [ + 115 + ], + [ + 116 + ], + [ + 117 + ], + [ + 118 + ], + [ + 119 + ], + [ + 120 + ], + [ + 121 + ], + [ + 122 + ], + [ + 123 + ], + [ + 124 + ], + [ + 125 + ], + [ + 126 + ], + [ + 127 + ] + ], + "accelerators": [ + { + "id": "00000000:0E:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:13:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:49:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:4F:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:90:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:96:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:CC:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:D1:00.0", + "type": "Nvidia GPU", + "model": "A100" + } + ] + } + }, + { + "name": "a100m80", + "nodes": "a[0531-0537],a[0631-0633],a0831,a[0931-0934]", + "processorType": "AMD Milan", + "socketsPerNode": 2, + "coresPerSocket": 64, + "threadsPerCore": 1, + "flopRateScalar": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "acc_utilization", - "unit": { - "base": "" - }, - "scope": "accelerator", - "aggregation": "avg", - "timestep": 60, - "peak": 100, - "normal": 80, - "caution": 50, - "alert": 20 + "value": 432 + }, + "flopRateSimd": { + "unit": { + "base": "F/s", + "prefix": "G" }, - { - "name": "acc_mem_used", - "unit": { - "base": "B", - "prefix": "G" - }, - "scope": "accelerator", - "aggregation": "sum", - "timestep": 60, - "peak": 40, - "normal": 20, - "caution": 10, - "alert": 5 + "value": 9216 + }, + "memoryBandwidth": { + "unit": { + "base": "B/s", + "prefix": "G" }, - { - "name": "acc_power", - "unit": { - "base": "W" - }, - "scope": "accelerator", - "aggregation": "sum", - "timestep": 60, - "peak": 400, - "normal": 200, - "caution": 50, - "alert": 20 - }, - { - "name": "nv_mem_util", - "unit": { - "base": "" - }, - "scope": "accelerator", - "aggregation": "avg", - "timestep": 60, - "peak": 100, - "normal": 80, - "caution": 20, - "alert": 10 - }, - { - "name": "nv_temp", - "unit": { - "base": "°C" - }, - "scope": "accelerator", - "aggregation": "avg", - "timestep": 60, - "peak": 40, - "normal": 20, - "caution": 5, - "alert": 2 - }, - { - "name": "nv_sm_clock", - "unit": { - "base": "Hz", - "prefix": "M" - }, - "scope": "accelerator", - "aggregation": "avg", - "timestep": 60, - "peak": 1400, - "normal": 1200, - "caution": 100, - "alert": 50 - }, - { - "name": "cpu_power", - "unit": { - "base": "W" - }, - "scope": "socket", - "aggregation": "sum", - "timestep": 60, - "peak": 500, - "normal": 250, - "caution": 100, - "alert": 50 - }, - { - "name": "ipc", - "unit": { - "base": "IPC" - }, - "scope": "hwthread", - "aggregation": "avg", - "timestep": 60, - "peak": 4, - "normal": 2, - "caution": 1, - "alert": 0.5 - } - ], - "subClusters": [ - { - "name": "a40", - "nodes": "a[0121-0129],a[0221-0229],a[0321-0329],a[0421-0429],a[0521-0522],a[1621-1624],a[1721-1722]", - "processorType": "AMD Milan", - "socketsPerNode": 2, - "coresPerSocket": 64, - "threadsPerCore": 1, - "flopRateScalar": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 432 - }, - "flopRateSimd": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 9216 - }, - "memoryBandwidth": { - "unit": { - "base": "B/s", - "prefix": "G" - }, - "value": 400 - }, - "topology": { - "node": [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ], - "socket": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 - ], - [ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "memoryDomain": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "core": [ - [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ], [ 24 ], [ 25 ], [ 26 ], [ 27 ], [ 28 ], [ 29 ], [ 30 ], [ 31 ], [ 32 ], [ 33 ], [ 34 ], [ 35 ], [ 36 ], [ 37 ], [ 38 ], [ 39 ], [ 40 ], [ 41 ], [ 42 ], [ 43 ], [ 44 ], [ 45 ], [ 46 ], [ 47 ], [ 48 ], [ 49 ], [ 50 ], [ 51 ], [ 52 ], [ 53 ], [ 54 ], [ 55 ], [ 56 ], [ 57 ], [ 58 ], [ 59 ], [ 60 ], [ 61 ], [ 62 ], [ 63 ], [ 64 ], [ 65 ], [ 66 ], [ 67 ], [ 68 ], [ 69 ], [ 70 ], [ 71 ], [ 73 ], [ 74 ], [ 75 ], [ 76 ], [ 77 ], [ 78 ], [ 79 ], [ 80 ], [ 81 ], [ 82 ], [ 83 ], [ 84 ], [ 85 ], [ 86 ], [ 87 ], [ 88 ], [ 89 ], [ 90 ], [ 91 ], [ 92 ], [ 93 ], [ 94 ], [ 95 ], [ 96 ], [ 97 ], [ 98 ], [ 99 ], [ 100 ], [ 101 ], [ 102 ], [ 103 ], [ 104 ], [ 105 ], [ 106 ], [ 107 ], [ 108 ], [ 109 ], [ 110 ], [ 111 ], [ 112 ], [ 113 ], [ 114 ], [ 115 ], [ 116 ], [ 117 ], [ 118 ], [ 119 ], [ 120 ], [ 121 ], [ 122 ], [ 123 ], [ 124 ], [ 125 ], [ 126 ], [ 127 ] - ], - "accelerators": [ - { - "id": "00000000:01:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:25:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:41:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:61:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:81:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:A1:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:C1:00.0", - "type": "Nvidia GPU", - "model": "A40" - }, - { - "id": "00000000:E1:00.0", - "type": "Nvidia GPU", - "model": "A40" - } - ] - } - }, - { - "name": "a100", - "nodes": "a[0601-0605],a[0701-0705],a[0801-0805],a[0901-0905]", - "processorType": "AMD Milan", - "socketsPerNode": 2, - "coresPerSocket": 64, - "threadsPerCore": 1, - "flopRateScalar": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 432 - }, - "flopRateSimd": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 9216 - }, - "memoryBandwidth": { - "unit": { - "base": "B/s", - "prefix": "G" - }, - "value": 400 - }, - "topology": { - "node": [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ], - "socket": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 - ], - [ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "memoryDomain": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "core": [ - [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ], [ 24 ], [ 25 ], [ 26 ], [ 27 ], [ 28 ], [ 29 ], [ 30 ], [ 31 ], [ 32 ], [ 33 ], [ 34 ], [ 35 ], [ 36 ], [ 37 ], [ 38 ], [ 39 ], [ 40 ], [ 41 ], [ 42 ], [ 43 ], [ 44 ], [ 45 ], [ 46 ], [ 47 ], [ 48 ], [ 49 ], [ 50 ], [ 51 ], [ 52 ], [ 53 ], [ 54 ], [ 55 ], [ 56 ], [ 57 ], [ 58 ], [ 59 ], [ 60 ], [ 61 ], [ 62 ], [ 63 ], [ 64 ], [ 65 ], [ 66 ], [ 67 ], [ 68 ], [ 69 ], [ 70 ], [ 71 ], [ 73 ], [ 74 ], [ 75 ], [ 76 ], [ 77 ], [ 78 ], [ 79 ], [ 80 ], [ 81 ], [ 82 ], [ 83 ], [ 84 ], [ 85 ], [ 86 ], [ 87 ], [ 88 ], [ 89 ], [ 90 ], [ 91 ], [ 92 ], [ 93 ], [ 94 ], [ 95 ], [ 96 ], [ 97 ], [ 98 ], [ 99 ], [ 100 ], [ 101 ], [ 102 ], [ 103 ], [ 104 ], [ 105 ], [ 106 ], [ 107 ], [ 108 ], [ 109 ], [ 110 ], [ 111 ], [ 112 ], [ 113 ], [ 114 ], [ 115 ], [ 116 ], [ 117 ], [ 118 ], [ 119 ], [ 120 ], [ 121 ], [ 122 ], [ 123 ], [ 124 ], [ 125 ], [ 126 ], [ 127 ] - ], - "accelerators": [ - { - "id": "00000000:0E:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:13:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:49:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:4F:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:90:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:96:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:CC:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:D1:00.0", - "type": "Nvidia GPU", - "model": "A100" - } - ] - } - }, - { - "name": "a100m80", - "nodes": "a[0531-0537],a[0631-0633],a0831,a[0931-0934]", - "processorType": "AMD Milan", - "socketsPerNode": 2, - "coresPerSocket": 64, - "threadsPerCore": 1, - "flopRateScalar": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 432 - }, - "flopRateSimd": { - "unit": { - "base": "F/s", - "prefix": "G" - }, - "value": 9216 - }, - "memoryBandwidth": { - "unit": { - "base": "B/s", - "prefix": "G" - }, - "value": 400 - }, - "topology": { - "node": [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ], - "socket": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 - ], - [ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "memoryDomain": [ - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127 - ] - ], - "core": [ - [ 0 ], [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ], [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ], [ 11 ], [ 12 ], [ 13 ], [ 14 ], [ 15 ], [ 16 ], [ 17 ], [ 18 ], [ 19 ], [ 20 ], [ 21 ], [ 22 ], [ 23 ], [ 24 ], [ 25 ], [ 26 ], [ 27 ], [ 28 ], [ 29 ], [ 30 ], [ 31 ], [ 32 ], [ 33 ], [ 34 ], [ 35 ], [ 36 ], [ 37 ], [ 38 ], [ 39 ], [ 40 ], [ 41 ], [ 42 ], [ 43 ], [ 44 ], [ 45 ], [ 46 ], [ 47 ], [ 48 ], [ 49 ], [ 50 ], [ 51 ], [ 52 ], [ 53 ], [ 54 ], [ 55 ], [ 56 ], [ 57 ], [ 58 ], [ 59 ], [ 60 ], [ 61 ], [ 62 ], [ 63 ], [ 64 ], [ 65 ], [ 66 ], [ 67 ], [ 68 ], [ 69 ], [ 70 ], [ 71 ], [ 73 ], [ 74 ], [ 75 ], [ 76 ], [ 77 ], [ 78 ], [ 79 ], [ 80 ], [ 81 ], [ 82 ], [ 83 ], [ 84 ], [ 85 ], [ 86 ], [ 87 ], [ 88 ], [ 89 ], [ 90 ], [ 91 ], [ 92 ], [ 93 ], [ 94 ], [ 95 ], [ 96 ], [ 97 ], [ 98 ], [ 99 ], [ 100 ], [ 101 ], [ 102 ], [ 103 ], [ 104 ], [ 105 ], [ 106 ], [ 107 ], [ 108 ], [ 109 ], [ 110 ], [ 111 ], [ 112 ], [ 113 ], [ 114 ], [ 115 ], [ 116 ], [ 117 ], [ 118 ], [ 119 ], [ 120 ], [ 121 ], [ 122 ], [ 123 ], [ 124 ], [ 125 ], [ 126 ], [ 127 ] - ], - "accelerators": [ - { - "id": "00000000:0E:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:13:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:49:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:4F:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:90:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:96:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:CC:00.0", - "type": "Nvidia GPU", - "model": "A100" - }, - { - "id": "00000000:D1:00.0", - "type": "Nvidia GPU", - "model": "A100" - } - ] - } - } - ] + "value": 400 + }, + "topology": { + "node": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ], + "socket": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63 + ], + [ + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "memoryDomain": [ + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127 + ] + ], + "core": [ + [ + 0 + ], + [ + 1 + ], + [ + 2 + ], + [ + 3 + ], + [ + 4 + ], + [ + 5 + ], + [ + 6 + ], + [ + 7 + ], + [ + 8 + ], + [ + 9 + ], + [ + 10 + ], + [ + 11 + ], + [ + 12 + ], + [ + 13 + ], + [ + 14 + ], + [ + 15 + ], + [ + 16 + ], + [ + 17 + ], + [ + 18 + ], + [ + 19 + ], + [ + 20 + ], + [ + 21 + ], + [ + 22 + ], + [ + 23 + ], + [ + 24 + ], + [ + 25 + ], + [ + 26 + ], + [ + 27 + ], + [ + 28 + ], + [ + 29 + ], + [ + 30 + ], + [ + 31 + ], + [ + 32 + ], + [ + 33 + ], + [ + 34 + ], + [ + 35 + ], + [ + 36 + ], + [ + 37 + ], + [ + 38 + ], + [ + 39 + ], + [ + 40 + ], + [ + 41 + ], + [ + 42 + ], + [ + 43 + ], + [ + 44 + ], + [ + 45 + ], + [ + 46 + ], + [ + 47 + ], + [ + 48 + ], + [ + 49 + ], + [ + 50 + ], + [ + 51 + ], + [ + 52 + ], + [ + 53 + ], + [ + 54 + ], + [ + 55 + ], + [ + 56 + ], + [ + 57 + ], + [ + 58 + ], + [ + 59 + ], + [ + 60 + ], + [ + 61 + ], + [ + 62 + ], + [ + 63 + ], + [ + 64 + ], + [ + 65 + ], + [ + 66 + ], + [ + 67 + ], + [ + 68 + ], + [ + 69 + ], + [ + 70 + ], + [ + 71 + ], + [ + 73 + ], + [ + 74 + ], + [ + 75 + ], + [ + 76 + ], + [ + 77 + ], + [ + 78 + ], + [ + 79 + ], + [ + 80 + ], + [ + 81 + ], + [ + 82 + ], + [ + 83 + ], + [ + 84 + ], + [ + 85 + ], + [ + 86 + ], + [ + 87 + ], + [ + 88 + ], + [ + 89 + ], + [ + 90 + ], + [ + 91 + ], + [ + 92 + ], + [ + 93 + ], + [ + 94 + ], + [ + 95 + ], + [ + 96 + ], + [ + 97 + ], + [ + 98 + ], + [ + 99 + ], + [ + 100 + ], + [ + 101 + ], + [ + 102 + ], + [ + 103 + ], + [ + 104 + ], + [ + 105 + ], + [ + 106 + ], + [ + 107 + ], + [ + 108 + ], + [ + 109 + ], + [ + 110 + ], + [ + 111 + ], + [ + 112 + ], + [ + 113 + ], + [ + 114 + ], + [ + 115 + ], + [ + 116 + ], + [ + 117 + ], + [ + 118 + ], + [ + 119 + ], + [ + 120 + ], + [ + 121 + ], + [ + 122 + ], + [ + 123 + ], + [ + 124 + ], + [ + 125 + ], + [ + 126 + ], + [ + 127 + ] + ], + "accelerators": [ + { + "id": "00000000:0E:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:13:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:49:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:4F:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:90:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:96:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:CC:00.0", + "type": "Nvidia GPU", + "model": "A100" + }, + { + "id": "00000000:D1:00.0", + "type": "Nvidia GPU", + "model": "A100" + } + ] + } + } + ] } diff --git a/pkg/archive/testdata/archive/fritz/cluster.json b/pkg/archive/testdata/archive/fritz/cluster.json index 00a8f70..8c7331c 100644 --- a/pkg/archive/testdata/archive/fritz/cluster.json +++ b/pkg/archive/testdata/archive/fritz/cluster.json @@ -252,7 +252,8 @@ "peak": 500, "normal": 250, "caution": 100, - "alert": 50 + "alert": 50, + "energy": true }, { "name": "mem_power", @@ -265,7 +266,8 @@ "peak": 100, "normal": 50, "caution": 20, - "alert": 10 + "alert": 10, + "energy": true }, { "name": "ipc", diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 3cfdf55..83064c7 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -16,31 +16,33 @@ import ( // Common subset of Job and JobMeta. Use one of those, not this type directly. type BaseJob struct { - Cluster string `json:"cluster" db:"cluster" example:"fritz"` - SubCluster string `json:"subCluster" db:"subcluster" example:"main"` - Partition string `json:"partition,omitempty" db:"partition" example:"main"` - Project string `json:"project" db:"project" example:"abcd200"` - User string `json:"user" db:"user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` - Tags []*Tag `json:"tags,omitempty"` - RawFootprint []byte `json:"-" db:"footprint"` - RawMetaData []byte `json:"-" db:"meta_data"` - RawResources []byte `json:"-" db:"resources"` - Resources []*Resource `json:"resources"` - Footprint map[string]float64 `json:"footPrint"` - MetaData map[string]string `json:"metaData"` - ConcurrentJobs JobLinkResultList `json:"concurrentJobs"` - Energy float64 `json:"energy"` - ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` - Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` - JobID int64 `json:"jobId" db:"job_id" example:"123000"` - Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` - SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` - MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` - Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` - NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` - NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` - NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` + Cluster string `json:"cluster" db:"cluster" example:"fritz"` + SubCluster string `json:"subCluster" db:"subcluster" example:"main"` + Partition string `json:"partition,omitempty" db:"partition" example:"main"` + Project string `json:"project" db:"project" example:"abcd200"` + User string `json:"user" db:"user" example:"abcd100h"` + State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` + Tags []*Tag `json:"tags,omitempty"` + RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` + RawFootprint []byte `json:"-" db:"footprint"` + RawMetaData []byte `json:"-" db:"meta_data"` + RawResources []byte `json:"-" db:"resources"` + Resources []*Resource `json:"resources"` + EnergyFootprint map[string]float64 `json:"energyFootprint"` + Footprint map[string]float64 `json:"footprint"` + MetaData map[string]string `json:"metaData"` + ConcurrentJobs JobLinkResultList `json:"concurrentJobs"` + Energy float64 `json:"energy"` + ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` + Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` + JobID int64 `json:"jobId" db:"job_id" example:"123000"` + Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` + SMT int32 `json:"smt,omitempty" db:"smt" example:"4"` + MonitoringStatus int32 `json:"monitoringStatus,omitempty" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` + Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` + NumAcc int32 `json:"numAcc,omitempty" db:"num_acc" example:"2" minimum:"1"` + NumHWThreads int32 `json:"numHwthreads,omitempty" db:"num_hwthreads" example:"20" minimum:"1"` + NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` } // Job struct type From f1e341f0b9c375ac8363823054a936e70904c956 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 9 Jul 2024 09:17:50 +0200 Subject: [PATCH 046/443] Initial commit for frontend refactor --- api/schema.graphqls | 2 +- internal/graph/generated/generated.go | 314 ++++++++++++------------ internal/graph/schema.resolvers.go | 4 +- internal/repository/job.go | 28 +++ web/frontend/src/joblist/JobList.svelte | 4 +- web/frontend/src/utils.js | 68 +++-- 6 files changed, 242 insertions(+), 178 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index bb9ae58..f1592b1 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -27,7 +27,7 @@ type Job { tags: [Tag!]! resources: [Resource!]! concurrentJobs: JobLinkResultList - footprint: [MetricValue] + footprint: Any metaData: Any userData: User } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index e40a1a2..910dbaa 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -59,10 +59,9 @@ type ComplexityRoot struct { } Cluster struct { - MetricConfig func(childComplexity int) int - Name func(childComplexity int) int - Partitions func(childComplexity int) int - SubClusters func(childComplexity int) int + Name func(childComplexity int) int + Partitions func(childComplexity int) int + SubClusters func(childComplexity int) int } Count struct { @@ -259,7 +258,9 @@ type ComplexityRoot struct { CoresPerSocket func(childComplexity int) int FlopRateScalar func(childComplexity int) int FlopRateSimd func(childComplexity int) int + Footprint func(childComplexity int) int MemoryBandwidth func(childComplexity int) int + MetricConfig func(childComplexity int) int Name func(childComplexity int) int Nodes func(childComplexity int) int NumberOfNodes func(childComplexity int) int @@ -323,7 +324,7 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) - Footprint(ctx context.Context, obj *schema.Job) ([]*schema.MetricValue, error) + Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } @@ -394,13 +395,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Accelerator.Type(childComplexity), true - case "Cluster.metricConfig": - if e.complexity.Cluster.MetricConfig == nil { - break - } - - return e.complexity.Cluster.MetricConfig(childComplexity), true - case "Cluster.name": if e.complexity.Cluster.Name == nil { break @@ -1360,6 +1354,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.SubCluster.FlopRateSimd(childComplexity), true + case "SubCluster.footprint": + if e.complexity.SubCluster.Footprint == nil { + break + } + + return e.complexity.SubCluster.Footprint(childComplexity), true + case "SubCluster.memoryBandwidth": if e.complexity.SubCluster.MemoryBandwidth == nil { break @@ -1367,6 +1368,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.SubCluster.MemoryBandwidth(childComplexity), true + case "SubCluster.metricConfig": + if e.complexity.SubCluster.MetricConfig == nil { + break + } + + return e.complexity.SubCluster.MetricConfig(childComplexity), true + case "SubCluster.name": if e.complexity.SubCluster.Name == nil { break @@ -1732,7 +1740,7 @@ type Job { tags: [Tag!]! resources: [Resource!]! concurrentJobs: JobLinkResultList - footprint: [MetricValue] + footprint: Any metaData: Any userData: User } @@ -1745,7 +1753,6 @@ type JobLink { type Cluster { name: String! partitions: [String!]! # Slurm partitions - metricConfig: [MetricConfig!]! subClusters: [SubCluster!]! # Hardware partitions/subclusters } @@ -1761,6 +1768,8 @@ type SubCluster { flopRateSimd: MetricValue! memoryBandwidth: MetricValue! topology: Topology! + metricConfig: [MetricConfig!]! + footprint: [String!]! } type MetricValue { @@ -2737,72 +2746,6 @@ func (ec *executionContext) fieldContext_Cluster_partitions(ctx context.Context, return fc, nil } -func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *schema.Cluster) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Cluster_metricConfig(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.MetricConfig, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.([]*schema.MetricConfig) - fc.Result = res - return ec.marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_Cluster_metricConfig(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Cluster", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "name": - return ec.fieldContext_MetricConfig_name(ctx, field) - case "unit": - return ec.fieldContext_MetricConfig_unit(ctx, field) - case "scope": - return ec.fieldContext_MetricConfig_scope(ctx, field) - case "aggregation": - return ec.fieldContext_MetricConfig_aggregation(ctx, field) - case "timestep": - return ec.fieldContext_MetricConfig_timestep(ctx, field) - case "peak": - return ec.fieldContext_MetricConfig_peak(ctx, field) - case "normal": - return ec.fieldContext_MetricConfig_normal(ctx, field) - case "caution": - return ec.fieldContext_MetricConfig_caution(ctx, field) - case "alert": - return ec.fieldContext_MetricConfig_alert(ctx, field) - case "subClusters": - return ec.fieldContext_MetricConfig_subClusters(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricConfig", field.Name) - }, - } - return fc, nil -} - func (ec *executionContext) _Cluster_subClusters(ctx context.Context, field graphql.CollectedField, obj *schema.Cluster) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Cluster_subClusters(ctx, field) if err != nil { @@ -2864,6 +2807,10 @@ func (ec *executionContext) fieldContext_Cluster_subClusters(ctx context.Context return ec.fieldContext_SubCluster_memoryBandwidth(ctx, field) case "topology": return ec.fieldContext_SubCluster_topology(ctx, field) + case "metricConfig": + return ec.fieldContext_SubCluster_metricConfig(ctx, field) + case "footprint": + return ec.fieldContext_SubCluster_footprint(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type SubCluster", field.Name) }, @@ -4207,9 +4154,9 @@ func (ec *executionContext) _Job_footprint(ctx context.Context, field graphql.Co if resTmp == nil { return graphql.Null } - res := resTmp.([]*schema.MetricValue) + res := resTmp.(interface{}) fc.Result = res - return ec.marshalOMetricValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) + return ec.marshalOAny2interface(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -4219,15 +4166,7 @@ func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, fiel IsMethod: true, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "name": - return ec.fieldContext_MetricValue_name(ctx, field) - case "unit": - return ec.fieldContext_MetricValue_unit(ctx, field) - case "value": - return ec.fieldContext_MetricValue_value(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricValue", field.Name) + return nil, errors.New("field of type Any does not have child fields") }, } return fc, nil @@ -7511,8 +7450,6 @@ func (ec *executionContext) fieldContext_Query_clusters(ctx context.Context, fie return ec.fieldContext_Cluster_name(ctx, field) case "partitions": return ec.fieldContext_Cluster_partitions(ctx, field) - case "metricConfig": - return ec.fieldContext_Cluster_metricConfig(ctx, field) case "subClusters": return ec.fieldContext_Cluster_subClusters(ctx, field) } @@ -9322,6 +9259,116 @@ func (ec *executionContext) fieldContext_SubCluster_topology(ctx context.Context return fc, nil } +func (ec *executionContext) _SubCluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *schema.SubCluster) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_SubCluster_metricConfig(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MetricConfig, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.MetricConfig) + fc.Result = res + return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_SubCluster_metricConfig(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "SubCluster", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_MetricConfig_name(ctx, field) + case "unit": + return ec.fieldContext_MetricConfig_unit(ctx, field) + case "scope": + return ec.fieldContext_MetricConfig_scope(ctx, field) + case "aggregation": + return ec.fieldContext_MetricConfig_aggregation(ctx, field) + case "timestep": + return ec.fieldContext_MetricConfig_timestep(ctx, field) + case "peak": + return ec.fieldContext_MetricConfig_peak(ctx, field) + case "normal": + return ec.fieldContext_MetricConfig_normal(ctx, field) + case "caution": + return ec.fieldContext_MetricConfig_caution(ctx, field) + case "alert": + return ec.fieldContext_MetricConfig_alert(ctx, field) + case "subClusters": + return ec.fieldContext_MetricConfig_subClusters(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricConfig", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _SubCluster_footprint(ctx context.Context, field graphql.CollectedField, obj *schema.SubCluster) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_SubCluster_footprint(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Footprint, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]string) + fc.Result = res + return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_SubCluster_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "SubCluster", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _SubClusterConfig_name(ctx context.Context, field graphql.CollectedField, obj *schema.SubClusterConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_SubClusterConfig_name(ctx, field) if err != nil { @@ -12679,11 +12726,6 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) - case "metricConfig": - out.Values[i] = ec._Cluster_metricConfig(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&out.Invalids, 1) - } case "subClusters": out.Values[i] = ec._Cluster_subClusters(ctx, field, obj) if out.Values[i] == graphql.Null { @@ -14519,6 +14561,16 @@ func (ec *executionContext) _SubCluster(ctx context.Context, sel ast.SelectionSe if out.Values[i] == graphql.Null { atomic.AddUint32(&out.Invalids, 1) } + case "metricConfig": + out.Values[i] = ec._SubCluster_metricConfig(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "footprint": + out.Values[i] = ec._SubCluster_footprint(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -15925,7 +15977,11 @@ func (ec *executionContext) marshalNJobsStatistics2ᚖgithubᚗcomᚋClusterCock return ec._JobsStatistics(ctx, sel, v) } -func (ec *executionContext) marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.MetricConfig) graphql.Marshaler { +func (ec *executionContext) marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v schema.MetricConfig) graphql.Marshaler { + return ec._MetricConfig(ctx, sel, &v) +} + +func (ec *executionContext) marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricConfig) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -15949,7 +16005,7 @@ func (ec *executionContext) marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCoc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNMetricConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx, sel, v[i]) + ret[i] = ec.marshalNMetricConfig2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx, sel, v[i]) } if isLen1 { f(i) @@ -15969,16 +16025,6 @@ func (ec *executionContext) marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCoc return ret } -func (ec *executionContext) marshalNMetricConfig2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfig(ctx context.Context, sel ast.SelectionSet, v *schema.MetricConfig) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._MetricConfig(ctx, sel, v) -} - func (ec *executionContext) marshalNMetricFootprints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricFootprintsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricFootprints) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -17241,54 +17287,6 @@ func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockp return ec._MetricStatistics(ctx, sel, &v) } -func (ec *executionContext) marshalOMetricValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v []*schema.MetricValue) graphql.Marshaler { - if v == nil { - return graphql.Null - } - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalOMetricValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - - return ret -} - -func (ec *executionContext) marshalOMetricValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v *schema.MetricValue) graphql.Marshaler { - if v == nil { - return graphql.Null - } - return ec._MetricValue(ctx, sel, v) -} - func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { if v == nil { return nil, nil diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index c621e4d..2c061e7 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -45,8 +45,8 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod } // Footprint is the resolver for the footprint field. -func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*schema.MetricValue, error) { - panic(fmt.Errorf("not implemented: Footprint - footprint")) +func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) { + return r.Repo.FetchFootprint(obj) } // MetaData is the resolver for the metaData field. diff --git a/internal/repository/job.go b/internal/repository/job.go index 8dda6e0..21540b2 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -222,6 +222,34 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er return archive.UpdateMetadata(job, job.MetaData) } +func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, error) { + start := time.Now() + cachekey := fmt.Sprintf("footprint:%d", job.ID) + if cached := r.cache.Get(cachekey, nil); cached != nil { + job.Footprint = cached.(map[string]float64) + return job.Footprint, nil + } + + if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID). + RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil { + log.Warn("Error while scanning for job footprint") + return nil, err + } + + if len(job.RawFootprint) == 0 { + return nil, nil + } + + if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil { + log.Warn("Error while unmarshaling raw footprint json") + return nil, err + } + + r.cache.Put(cachekey, job.Footprint, len(job.Footprint), 24*time.Hour) + log.Debugf("Timer FetchFootprint %s", time.Since(start)) + return job.Footprint, nil +} + func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { var cnt int q := sq.Select("count(*)").From("job").Where("job.start_time < ?", startTime) diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 39a3010..71b9548 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -75,9 +75,7 @@ name } metaData - flopsAnyAvg - memBwAvg - loadAvg + footprint } count hasNextPage diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index bb43094..3ab86da 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -42,28 +42,37 @@ export function init(extraInitQuery = "") { .query( `query { clusters { - name, - metricConfig { - name, unit { base, prefix }, peak, - normal, caution, alert, - timestep, scope, - aggregation, - subClusters { name, peak, normal, caution, alert, remove } - } + name partitions subClusters { - name, processorType + name + nodes + numberOfNodes + processorType socketsPerNode coresPerSocket threadsPerCore flopRateScalar { unit { base, prefix }, value } flopRateSimd { unit { base, prefix }, value } memoryBandwidth { unit { base, prefix }, value } - numberOfNodes topology { - node, socket, core + node + socket + core accelerators { id } } + metricConfig { + name + unit { base, prefix } + scope + aggregation + timestep + peak + normal + caution + alert + } + footprint } } tags { id, name, type } @@ -84,13 +93,18 @@ export function init(extraInitQuery = "") { const tags = [], clusters = []; + const allMetrics = []; setContext("tags", tags); setContext("clusters", clusters); - setContext("metrics", (cluster, metric) => { + setContext("allmetrics", allMetrics); + setContext("getMetricConfig", (cluster, subCluster, metric) => { if (typeof cluster !== "object") cluster = clusters.find((c) => c.name == cluster); - return cluster.metricConfig.find((m) => m.name == metric); + if (typeof subCluster !== "object") + subCluster = cluster.subClusters.find((sc) => sc.name == subCluster); + + return subCluster.metricConfig.find((m) => m.name == metric); }); setContext("on-init", (callback) => state.fetching ? subscribers.push(callback) : callback(state) @@ -111,7 +125,31 @@ export function init(extraInitQuery = "") { for (let tag of data.tags) tags.push(tag); - for (let cluster of data.clusters) clusters.push(cluster); + let globalmetrics = []; + for (let cluster of data.clusters) { + // Add full info to context object + clusters.push(cluster); + // Build global metric list with availability for joblist metricselect + for (let subcluster of cluster.subClusters) { + for (let scm of subcluster.metricConfig) { + let match = globalmetrics.find((gm) => gm.name == scm.name); + if (match) { + let submatch = match.availability.find((av) => av.cluster == cluster.name); + if (submatch) { + submatch.subclusters.push(subcluster.name) + } else { + match.availability.push({cluster: cluster.name, subclusters: [subcluster.name]}) + } + } else { + globalmetrics.push({name: scm.name, availability: [{cluster: cluster.name, subclusters: [subcluster.name]}]}); + } + } + } + } + // Add to ctx object + for (let gm of globalmetrics) allMetrics.push(gm); + + console.log('All Metrics List', allMetrics); state.data = data; tick().then(() => subscribers.forEach((cb) => cb(state))); @@ -298,6 +336,7 @@ export function stickyHeader(datatableHeaderSelector, updatePading) { onDestroy(() => document.removeEventListener("scroll", onscroll)); } +// Outdated: Frontend Will Now Receive final MetricList from backend export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubcluster const mc = getContext("metrics"); const thisConfig = mc(c, m); @@ -407,6 +446,7 @@ export function transformDataForRoofline(flopsAny, memBw) { // Uses Metric Objec // Return something to be plotted. The argument shall be the result of the // `nodeMetrics` GraphQL query. +// Remove "hardcoded" here or deemed necessary? export function transformPerNodeDataForRoofline(nodes) { let data = null const x = [], y = [] From bf6b87d65cfe447fb489903407c080334f63d2e3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 9 Jul 2024 09:50:32 +0200 Subject: [PATCH 047/443] Fix circular import after merge --- internal/importer/handleImport.go | 3 +-- internal/importer/initDB.go | 3 +-- internal/repository/job.go | 3 +-- internal/repository/stats.go | 12 ++++++++++++ internal/util/statistics.go | 13 ------------- 5 files changed, 15 insertions(+), 19 deletions(-) diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 81a312f..c4d55ab 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -13,7 +13,6 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -78,7 +77,7 @@ func HandleImportFlag(flag string) error { job.Footprint = make(map[string]float64) for _, fp := range sc.Footprint { - job.Footprint[fp] = util.LoadJobStat(&job, fp) + job.Footprint[fp] = repository.LoadJobStat(&job, fp) } job.RawFootprint, err = json.Marshal(job.Footprint) if err != nil { diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 468ebb1..4b9abab 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -11,7 +11,6 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -69,7 +68,7 @@ func InitDB() error { job.Footprint = make(map[string]float64) for _, fp := range sc.Footprint { - job.Footprint[fp] = util.LoadJobStat(jobMeta, fp) + job.Footprint[fp] = repository.LoadJobStat(jobMeta, fp) } job.RawFootprint, err = json.Marshal(job.Footprint) diff --git a/internal/repository/job.go b/internal/repository/job.go index 21540b2..33b619f 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -16,7 +16,6 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" @@ -305,7 +304,7 @@ func (r *JobRepository) MarkArchived( footprint := make(map[string]float64) for _, fp := range sc.Footprint { - footprint[fp] = util.LoadJobStat(jobMeta, fp) + footprint[fp] = LoadJobStat(jobMeta, fp) } var rawFootprint []byte diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 2e226ee..6865e18 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -284,6 +284,18 @@ func (r *JobRepository) JobsStats( return stats, nil } +func LoadJobStat(job *schema.JobMeta, metric string) float64 { + if stats, ok := job.Statistics[metric]; ok { + if metric == "mem_used" { + return stats.Max + } else { + return stats.Avg + } + } + + return 0.0 +} + func (r *JobRepository) JobCountGrouped( ctx context.Context, filter []*model.JobFilter, diff --git a/internal/util/statistics.go b/internal/util/statistics.go index 9e23b15..d75224f 100644 --- a/internal/util/statistics.go +++ b/internal/util/statistics.go @@ -5,7 +5,6 @@ package util import ( - "github.com/ClusterCockpit/cc-backend/pkg/schema" "golang.org/x/exp/constraints" "fmt" @@ -27,18 +26,6 @@ func Max[T constraints.Ordered](a, b T) T { return b } -func LoadJobStat(job *schema.JobMeta, metric string) float64 { - if stats, ok := job.Statistics[metric]; ok { - if metric == "mem_used" { - return stats.Max - } else { - return stats.Avg - } - } - - return 0.0 -} - func sortedCopy(input []float64) []float64 { sorted := make([]float64, len(input)) copy(sorted, input) From f1427d52726cdb4069814ee4fbcad57a650da571 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 11 Jul 2024 11:09:14 +0200 Subject: [PATCH 048/443] Add global metric list including graphQL query --- api/schema.graphqls | 13 + go.mod | 1 - gqlgen.yml | 61 ++- internal/graph/generated/generated.go | 628 ++++++++++++++++++++++++++ internal/graph/schema.resolvers.go | 19 +- pkg/archive/archive.go | 55 ++- pkg/archive/clusterConfig.go | 27 +- pkg/archive/clusterConfig_test.go | 2 + pkg/schema/cluster.go | 14 + 9 files changed, 770 insertions(+), 50 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 017fd70..fa8bb20 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -178,6 +178,18 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type ClusterSupport { + cluster: String! + subClusters: [String!]! +} + +type GlobalMetricListItem { + name: String! + unit: Unit! + scope: MetricScope! + availability: [ClusterSupport!]! +} + type Count { name: String! count: Int! @@ -192,6 +204,7 @@ type User { type Query { clusters: [Cluster!]! # List of all clusters tags: [Tag!]! # List of all tags + globalMetrics: [GlobalMetricListItem!]! user(username: String!): User allocatedNodes(cluster: String!): [Count!]! diff --git a/go.mod b/go.mod index 07743ad..fddcfbc 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.3 github.com/coreos/go-oidc/v3 v3.9.0 - github.com/davecgh/go-spew v1.1.1 github.com/go-co-op/gocron v1.25.0 github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-sql-driver/mysql v1.7.0 diff --git a/gqlgen.yml b/gqlgen.yml index 2db1bdb..917bab9 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -61,23 +61,50 @@ models: fields: partitions: resolver: true - NullableFloat: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" } - MetricScope: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" } - MetricValue: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" } - JobStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" } + NullableFloat: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Float" } + MetricScope: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricScope" } + MetricValue: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricValue" } + JobStatistics: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobStatistics" } + GlobalMetricListItem: + { + model: "github.com/ClusterCockpit/cc-backend/pkg/schema.GlobalMetricListItem", + } + ClusterSupport: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.ClusterSupport" } Tag: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Tag" } - Resource: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" } - JobState: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" } - TimeRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" } - IntRange: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" } - JobMetric: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" } + Resource: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Resource" } + JobState: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobState" } + TimeRange: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.TimeRange" } + IntRange: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.IntRange" } + JobMetric: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.JobMetric" } Series: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Series" } - MetricStatistics: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics" } - MetricConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" } - SubClusterConfig: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig" } - Accelerator: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" } - Topology: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" } - FilterRanges: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" } - SubCluster: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" } - StatsSeries: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" } + MetricStatistics: + { + model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricStatistics", + } + MetricConfig: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.MetricConfig" } + SubClusterConfig: + { + model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubClusterConfig", + } + Accelerator: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Accelerator" } + Topology: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Topology" } + FilterRanges: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.FilterRanges" } + SubCluster: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.SubCluster" } + StatsSeries: + { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.StatsSeries" } Unit: { model: "github.com/ClusterCockpit/cc-backend/pkg/schema.Unit" } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index cb688f2..92467ce 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -64,6 +64,11 @@ type ComplexityRoot struct { SubClusters func(childComplexity int) int } + ClusterSupport struct { + Cluster func(childComplexity int) int + SubClusters func(childComplexity int) int + } + Count struct { Count func(childComplexity int) int Name func(childComplexity int) int @@ -74,6 +79,13 @@ type ComplexityRoot struct { TimeWeights func(childComplexity int) int } + GlobalMetricListItem struct { + Availability func(childComplexity int) int + Name func(childComplexity int) int + Scope func(childComplexity int) int + Unit func(childComplexity int) int + } + HistoPoint struct { Count func(childComplexity int) int Value func(childComplexity int) int @@ -223,6 +235,7 @@ type ComplexityRoot struct { Query struct { AllocatedNodes func(childComplexity int, cluster string) int Clusters func(childComplexity int) int + GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int @@ -342,6 +355,7 @@ type MutationResolver interface { type QueryResolver interface { Clusters(ctx context.Context) ([]*schema.Cluster, error) Tags(ctx context.Context) ([]*schema.Tag, error) + GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) User(ctx context.Context, username string) (*model.User, error) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) @@ -417,6 +431,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Cluster.SubClusters(childComplexity), true + case "ClusterSupport.cluster": + if e.complexity.ClusterSupport.Cluster == nil { + break + } + + return e.complexity.ClusterSupport.Cluster(childComplexity), true + + case "ClusterSupport.subClusters": + if e.complexity.ClusterSupport.SubClusters == nil { + break + } + + return e.complexity.ClusterSupport.SubClusters(childComplexity), true + case "Count.count": if e.complexity.Count.Count == nil { break @@ -445,6 +473,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Footprints.TimeWeights(childComplexity), true + case "GlobalMetricListItem.availability": + if e.complexity.GlobalMetricListItem.Availability == nil { + break + } + + return e.complexity.GlobalMetricListItem.Availability(childComplexity), true + + case "GlobalMetricListItem.name": + if e.complexity.GlobalMetricListItem.Name == nil { + break + } + + return e.complexity.GlobalMetricListItem.Name(childComplexity), true + + case "GlobalMetricListItem.scope": + if e.complexity.GlobalMetricListItem.Scope == nil { + break + } + + return e.complexity.GlobalMetricListItem.Scope(childComplexity), true + + case "GlobalMetricListItem.unit": + if e.complexity.GlobalMetricListItem.Unit == nil { + break + } + + return e.complexity.GlobalMetricListItem.Unit(childComplexity), true + case "HistoPoint.count": if e.complexity.HistoPoint.Count == nil { break @@ -1154,6 +1210,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Clusters(childComplexity), true + case "Query.globalMetrics": + if e.complexity.Query.GlobalMetrics == nil { + break + } + + return e.complexity.Query.GlobalMetrics(childComplexity), true + case "Query.job": if e.complexity.Query.Job == nil { break @@ -1899,6 +1962,18 @@ type NodeMetrics { metrics: [JobMetricWithName!]! } +type ClusterSupport { + cluster: String! + subClusters: [String!]! +} + +type GlobalMetricListItem { + name: String! + unit: Unit! + scope: MetricScope! + availability: [ClusterSupport!]! +} + type Count { name: String! count: Int! @@ -1913,6 +1988,7 @@ type User { type Query { clusters: [Cluster!]! # List of all clusters tags: [Tag!]! # List of all tags + globalMetrics: [GlobalMetricListItem!]! user(username: String!): User allocatedNodes(cluster: String!): [Count!]! @@ -2827,6 +2903,94 @@ func (ec *executionContext) fieldContext_Cluster_subClusters(ctx context.Context return fc, nil } +func (ec *executionContext) _ClusterSupport_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.ClusterSupport) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ClusterSupport_cluster(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Cluster, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ClusterSupport_cluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ClusterSupport", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ClusterSupport_subClusters(ctx context.Context, field graphql.CollectedField, obj *schema.ClusterSupport) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ClusterSupport_subClusters(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SubClusters, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]string) + fc.Result = res + return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ClusterSupport_subClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ClusterSupport", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Count_name(ctx context.Context, field graphql.CollectedField, obj *model.Count) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Count_name(ctx, field) if err != nil { @@ -3017,6 +3181,194 @@ func (ec *executionContext) fieldContext_Footprints_metrics(ctx context.Context, return fc, nil } +func (ec *executionContext) _GlobalMetricListItem_name(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalMetricListItem_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalMetricListItem_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalMetricListItem", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _GlobalMetricListItem_unit(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalMetricListItem_unit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Unit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.Unit) + fc.Result = res + return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalMetricListItem", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "base": + return ec.fieldContext_Unit_base(ctx, field) + case "prefix": + return ec.fieldContext_Unit_prefix(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Unit", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _GlobalMetricListItem_scope(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalMetricListItem_scope(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.MetricScope) + fc.Result = res + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalMetricListItem", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type MetricScope does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalMetricListItem_availability(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Availability, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.ClusterSupport) + fc.Result = res + return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalMetricListItem", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "cluster": + return ec.fieldContext_ClusterSupport_cluster(ctx, field) + case "subClusters": + return ec.fieldContext_ClusterSupport_subClusters(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type ClusterSupport", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _HistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.HistoPoint) (ret graphql.Marshaler) { fc, err := ec.fieldContext_HistoPoint_count(ctx, field) if err != nil { @@ -7522,6 +7874,60 @@ func (ec *executionContext) fieldContext_Query_tags(ctx context.Context, field g return fc, nil } +func (ec *executionContext) _Query_globalMetrics(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_globalMetrics(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().GlobalMetrics(rctx) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*schema.GlobalMetricListItem) + fc.Result = res + return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_globalMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_GlobalMetricListItem_name(ctx, field) + case "unit": + return ec.fieldContext_GlobalMetricListItem_unit(ctx, field) + case "scope": + return ec.fieldContext_GlobalMetricListItem_scope(ctx, field) + case "availability": + return ec.fieldContext_GlobalMetricListItem_availability(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type GlobalMetricListItem", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_user(ctx, field) if err != nil { @@ -12809,6 +13215,50 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, return out } +var clusterSupportImplementors = []string{"ClusterSupport"} + +func (ec *executionContext) _ClusterSupport(ctx context.Context, sel ast.SelectionSet, obj *schema.ClusterSupport) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, clusterSupportImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ClusterSupport") + case "cluster": + out.Values[i] = ec._ClusterSupport_cluster(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "subClusters": + out.Values[i] = ec._ClusterSupport_subClusters(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var countImplementors = []string{"Count"} func (ec *executionContext) _Count(ctx context.Context, sel ast.SelectionSet, obj *model.Count) graphql.Marshaler { @@ -12897,6 +13347,60 @@ func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSe return out } +var globalMetricListItemImplementors = []string{"GlobalMetricListItem"} + +func (ec *executionContext) _GlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, obj *schema.GlobalMetricListItem) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, globalMetricListItemImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("GlobalMetricListItem") + case "name": + out.Values[i] = ec._GlobalMetricListItem_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "unit": + out.Values[i] = ec._GlobalMetricListItem_unit(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "scope": + out.Values[i] = ec._GlobalMetricListItem_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "availability": + out.Values[i] = ec._GlobalMetricListItem_availability(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var histoPointImplementors = []string{"HistoPoint"} func (ec *executionContext) _HistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.HistoPoint) graphql.Marshaler { @@ -14156,6 +14660,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "globalMetrics": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_globalMetrics(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "user": field := field @@ -15386,6 +15912,54 @@ func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋClusterCockpitᚋc return ec._Cluster(ctx, sel, v) } +func (ec *executionContext) marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx context.Context, sel ast.SelectionSet, v schema.ClusterSupport) graphql.Marshaler { + return ec._ClusterSupport(ctx, sel, &v) +} + +func (ec *executionContext) marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.ClusterSupport) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNClusterSupport2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupport(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) marshalNCount2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Count) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -15519,6 +16093,60 @@ func (ec *executionContext) marshalNFloat2ᚕᚕfloat64ᚄ(ctx context.Context, return ret } +func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNGlobalMetricListItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItem(ctx context.Context, sel ast.SelectionSet, v *schema.GlobalMetricListItem) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._GlobalMetricListItem(ctx, sel, v) +} + func (ec *executionContext) marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.HistoPoint) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 2c061e7..c95a307 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -150,6 +150,11 @@ func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) { return r.Repo.GetTags(nil) } +// GlobalMetrics is the resolver for the globalMetrics field. +func (r *queryResolver) GlobalMetrics(ctx context.Context) ([]*schema.GlobalMetricListItem, error) { + return archive.GlobalMetricList, nil +} + // User is the resolver for the user field. func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) { return repository.GetUserRepository().FetchUserInCtx(ctx, username) @@ -414,9 +419,11 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type clusterResolver struct{ *Resolver } -type jobResolver struct{ *Resolver } -type metricValueResolver struct{ *Resolver } -type mutationResolver struct{ *Resolver } -type queryResolver struct{ *Resolver } -type subClusterResolver struct{ *Resolver } +type ( + clusterResolver struct{ *Resolver } + jobResolver struct{ *Resolver } + metricValueResolver struct{ *Resolver } + mutationResolver struct{ *Resolver } + queryResolver struct{ *Resolver } + subClusterResolver struct{ *Resolver } +) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 4a05194..56c5d47 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -7,6 +7,7 @@ package archive import ( "encoding/json" "fmt" + "sync" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" @@ -53,40 +54,48 @@ type JobContainer struct { } var ( + initOnce sync.Once cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) ar ArchiveBackend useArchive bool ) func Init(rawConfig json.RawMessage, disableArchive bool) error { - useArchive = !disableArchive + var err error - var cfg struct { - Kind string `json:"kind"` - } + initOnce.Do(func() { + useArchive = !disableArchive - if err := json.Unmarshal(rawConfig, &cfg); err != nil { - log.Warn("Error while unmarshaling raw config json") - return err - } + var cfg struct { + Kind string `json:"kind"` + } - switch cfg.Kind { - case "file": - ar = &FsArchive{} - // case "s3": - // ar = &S3Archive{} - default: - return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind) - } + if err = json.Unmarshal(rawConfig, &cfg); err != nil { + log.Warn("Error while unmarshaling raw config json") + return + } - version, err := ar.Init(rawConfig) - if err != nil { - log.Error("Error while initializing archiveBackend") - return err - } - log.Infof("Load archive version %d", version) + switch cfg.Kind { + case "file": + ar = &FsArchive{} + // case "s3": + // ar = &S3Archive{} + default: + err = fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", cfg.Kind) + } - return initClusterConfig() + var version uint64 + version, err = ar.Init(rawConfig) + if err != nil { + log.Error("Error while initializing archiveBackend") + return + } + log.Infof("Load archive version %d", version) + + err = initClusterConfig() + }) + + return err } func GetHandle() ArchiveBackend { diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 43d131b..0516abb 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -13,13 +13,15 @@ import ( ) var ( - Clusters []*schema.Cluster - nodeLists map[string]map[string]NodeList + Clusters []*schema.Cluster + GlobalMetricList []*schema.GlobalMetricListItem + nodeLists map[string]map[string]NodeList ) func initClusterConfig() error { Clusters = []*schema.Cluster{} nodeLists = map[string]map[string]NodeList{} + metricLookup := make(map[string]schema.GlobalMetricListItem) for _, c := range ar.GetClusters() { @@ -51,6 +53,12 @@ func initClusterConfig() error { return errors.New("cluster.metricConfig.scope must be a valid scope ('node', 'scocket', ...)") } + ml, ok := metricLookup[mc.Name] + if !ok { + metricLookup[mc.Name] = schema.GlobalMetricListItem{Name: mc.Name, Scope: mc.Scope, Unit: mc.Unit} + ml = metricLookup[mc.Name] + } + availability := schema.ClusterSupport{Cluster: cluster.Name} scLookup := make(map[string]*schema.SubClusterConfig) for _, scc := range mc.SubClusters { @@ -63,6 +71,7 @@ func initClusterConfig() error { if cfg, ok := scLookup[sc.Name]; ok { if !cfg.Remove { + availability.SubClusters = append(availability.SubClusters, sc.Name) newMetric.Peak = cfg.Peak newMetric.Peak = cfg.Peak newMetric.Normal = cfg.Normal @@ -74,16 +83,24 @@ func initClusterConfig() error { if newMetric.Footprint { sc.Footprint = append(sc.Footprint, newMetric.Name) } + if newMetric.Energy { + sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name) + } } } else { + availability.SubClusters = append(availability.SubClusters, sc.Name) sc.MetricConfig = append(sc.MetricConfig, *newMetric) if newMetric.Footprint { sc.Footprint = append(sc.Footprint, newMetric.Name) } + if newMetric.Energy { + sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name) + } } - } + ml.Availability = append(metricLookup[mc.Name].Availability, availability) + metricLookup[mc.Name] = ml } Clusters = append(Clusters, cluster) @@ -102,6 +119,10 @@ func initClusterConfig() error { } } + for _, ml := range metricLookup { + GlobalMetricList = append(GlobalMetricList, &ml) + } + return nil } diff --git a/pkg/archive/clusterConfig_test.go b/pkg/archive/clusterConfig_test.go index 942d29c..c73ab81 100644 --- a/pkg/archive/clusterConfig_test.go +++ b/pkg/archive/clusterConfig_test.go @@ -27,4 +27,6 @@ func TestClusterConfig(t *testing.T) { if len(sc.MetricConfig) != 15 { t.Fail() } + + // spew.Dump(archive.GlobalMetricList) } diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index 3bd05d9..026c80b 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -39,6 +39,7 @@ type SubCluster struct { MemoryBandwidth MetricValue `json:"memoryBandwidth"` MetricConfig []MetricConfig `json:"metricConfig,omitempty"` Footprint []string `json:"footprint,omitempty"` + EnergyFootprint []string `json:"energyFootprint,omitempty"` SocketsPerNode int `json:"socketsPerNode"` CoresPerSocket int `json:"coresPerSocket"` ThreadsPerCore int `json:"threadsPerCore"` @@ -66,6 +67,7 @@ type MetricConfig struct { Caution float64 `json:"caution"` Alert float64 `json:"alert"` Footprint bool `json:"footprint"` + Energy bool `json:"energy"` } type Cluster struct { @@ -74,6 +76,18 @@ type Cluster struct { SubClusters []*SubCluster `json:"subClusters"` } +type ClusterSupport struct { + Cluster string `json:"cluster"` + SubClusters []string `json:"subclusters"` +} + +type GlobalMetricListItem struct { + Name string `json:"name"` + Unit Unit `json:"unit"` + Scope MetricScope `json:"scope"` + Availability []ClusterSupport `json:"availability"` +} + // Return a list of socket IDs given a list of hwthread IDs. Even if just one // hwthread is in that socket, add it to the list. If no hwthreads other than // those in the argument list are assigned to one of the sockets in the first From e8e3b1595d2efa40a0ce6e211e6617e6eb63fdef Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 11 Jul 2024 16:12:20 +0200 Subject: [PATCH 049/443] Switch to Go 1.22 to get rid of global loop variable bug --- go.mod | 3 ++- pkg/archive/clusterConfig_test.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index fddcfbc..6e86ea7 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,13 @@ module github.com/ClusterCockpit/cc-backend -go 1.18 +go 1.22 require ( github.com/99designs/gqlgen v0.17.45 github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.3 github.com/coreos/go-oidc/v3 v3.9.0 + github.com/davecgh/go-spew v1.1.1 github.com/go-co-op/gocron v1.25.0 github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-sql-driver/mysql v1.7.0 diff --git a/pkg/archive/clusterConfig_test.go b/pkg/archive/clusterConfig_test.go index c73ab81..8624374 100644 --- a/pkg/archive/clusterConfig_test.go +++ b/pkg/archive/clusterConfig_test.go @@ -29,4 +29,5 @@ func TestClusterConfig(t *testing.T) { } // spew.Dump(archive.GlobalMetricList) + // t.Fail() } From b64ce1f67f7db0c220dda3950d58f137dff14119 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 11 Jul 2024 16:58:12 +0200 Subject: [PATCH 050/443] Add LowerIsBetter Metric boolean. Upgrade dependencies. --- api/schema.graphqls | 1 + go.mod | 92 +- go.sum | 1984 ++--------------- internal/graph/generated/generated.go | 488 ++-- internal/graph/schema.resolvers.go | 20 +- pkg/archive/clusterConfig.go | 2 + pkg/archive/clusterConfig_test.go | 6 + .../testdata/archive/fritz/cluster.json | 3 + pkg/schema/cluster.go | 41 +- 9 files changed, 506 insertions(+), 2131 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index fa8bb20..c0a7127 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -99,6 +99,7 @@ type MetricConfig { normal: Float caution: Float! alert: Float! + lowerIsBetter: Boolean subClusters: [SubClusterConfig!]! } diff --git a/go.mod b/go.mod index 6e86ea7..52b0215 100644 --- a/go.mod +++ b/go.mod @@ -3,56 +3,53 @@ module github.com/ClusterCockpit/cc-backend go 1.22 require ( - github.com/99designs/gqlgen v0.17.45 + github.com/99designs/gqlgen v0.17.49 github.com/ClusterCockpit/cc-units v0.4.0 - github.com/Masterminds/squirrel v1.5.3 - github.com/coreos/go-oidc/v3 v3.9.0 - github.com/davecgh/go-spew v1.1.1 - github.com/go-co-op/gocron v1.25.0 - github.com/go-ldap/ldap/v3 v3.4.4 - github.com/go-sql-driver/mysql v1.7.0 + github.com/Masterminds/squirrel v1.5.4 + github.com/coreos/go-oidc/v3 v3.11.0 + github.com/go-co-op/gocron v1.37.0 + github.com/go-ldap/ldap/v3 v3.4.8 + github.com/go-sql-driver/mysql v1.8.1 github.com/golang-jwt/jwt/v5 v5.2.1 - github.com/golang-migrate/migrate/v4 v4.15.2 - github.com/google/gops v0.3.27 - github.com/gorilla/handlers v1.5.1 - github.com/gorilla/mux v1.8.0 - github.com/gorilla/sessions v1.2.1 - github.com/influxdata/influxdb-client-go/v2 v2.12.2 - github.com/jmoiron/sqlx v1.3.5 - github.com/mattn/go-sqlite3 v1.14.16 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/common v0.40.0 + github.com/golang-migrate/migrate/v4 v4.17.1 + github.com/google/gops v0.3.28 + github.com/gorilla/handlers v1.5.2 + github.com/gorilla/mux v1.8.1 + github.com/gorilla/sessions v1.3.0 + github.com/influxdata/influxdb-client-go/v2 v2.13.0 + github.com/jmoiron/sqlx v1.4.0 + github.com/mattn/go-sqlite3 v1.14.22 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.55.0 github.com/qustavo/sqlhooks/v2 v2.1.0 - github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 - github.com/swaggo/http-swagger v1.3.3 + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 + github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.3 - github.com/vektah/gqlparser/v2 v2.5.11 - golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea - golang.org/x/oauth2 v0.13.0 + github.com/vektah/gqlparser/v2 v2.5.16 + golang.org/x/crypto v0.25.0 + golang.org/x/exp v0.0.0-20240707233637-46b078467d37 + golang.org/x/oauth2 v0.21.0 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/containerd v1.6.26 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/deepmap/oapi-codegen v1.12.4 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect - github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect + github.com/go-jose/go-jose/v4 v4.0.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/securecookie v1.1.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect @@ -63,30 +60,29 @@ require ( github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sosodev/duration v1.2.0 // indirect - github.com/swaggo/files v1.0.0 // indirect - github.com/urfave/cli/v2 v2.27.1 // indirect + github.com/sosodev/duration v1.3.1 // indirect + github.com/swaggo/files v1.0.1 // indirect + github.com/urfave/cli/v2 v2.27.2 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect - go.uber.org/atomic v1.10.0 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.19.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/protobuf v1.33.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.23.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 94d59c8..b250495 100644 --- a/go.sum +++ b/go.sum @@ -1,2009 +1,321 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik= -github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ= +github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0= github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= -github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= -github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.26 h1:VVfrE6ZpyisvB1fzoY8Vkiq4sy+i5oF4uk7zu03RaHs= -github.com/containerd/containerd v1.6.26/go.mod h1:I4TRdsdoo5MlKob5khDJS2EPT1l1oMNaE2MBm6FrwxM= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s= -github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= -github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw= -github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= +github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= -github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-co-op/gocron v1.25.0 h1:pzAdtily1JVIf6lGby6K0JKzhishgLOllQgNxoYbR+8= -github.com/go-co-op/gocron v1.25.0/go.mod h1:JHrQDY4iE1HZPkgTyoccY4xtDgLbrUwL+xODIbEQdnc= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= -github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs= -github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= +github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= +github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= +github.com/go-jose/go-jose/v4 v4.0.3 h1:o8aphO8Hv6RPmH+GfzVuyf7YXSBibp+8YyHdOoDESGo= +github.com/go-jose/go-jose/v4 v4.0.3/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ= +github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= -github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI= -github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= +github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/gorilla/sessions v1.3.0 h1:XYlkq7KcpOB2ZhHBPv5WpjMIxrQosiZanfoy1HLZFzg= +github.com/gorilla/sessions v1.3.0/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb-client-go/v2 v2.12.2 h1:uYABKdrEKlYm+++qfKdbgaHKBPmoWR5wpbmj6MBB/2g= -github.com/influxdata/influxdb-client-go/v2 v2.12.2/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU= +github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM= +github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.40.0 h1:Afz7EVRqGg2Mqqf4JuF9vdvp1pi220m55Pi9T2JnO4Q= -github.com/prometheus/common v0.40.0/go.mod h1:L65ZJPSmfn/UBWLQIHV7dBrKFidB/wPlF1y5TlSt9OE= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0= github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 h1:WCcC4vZDS1tYNxjWlwRJZQy28r8CMoggKnxNzxsVDMQ= -github.com/santhosh-tekuri/jsonschema/v5 v5.2.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us= -github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= +github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4= -github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= -github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= -github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= +github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg= +github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64a5ww= +github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ= github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= -github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= -github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= +github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= +github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= +github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= -gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= -modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= -modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= -modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= -modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= -modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= -modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= -modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= -modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= -modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 92467ce..57f1e90 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -176,16 +176,17 @@ type ComplexityRoot struct { } MetricConfig struct { - Aggregation func(childComplexity int) int - Alert func(childComplexity int) int - Caution func(childComplexity int) int - Name func(childComplexity int) int - Normal func(childComplexity int) int - Peak func(childComplexity int) int - Scope func(childComplexity int) int - SubClusters func(childComplexity int) int - Timestep func(childComplexity int) int - Unit func(childComplexity int) int + Aggregation func(childComplexity int) int + Alert func(childComplexity int) int + Caution func(childComplexity int) int + LowerIsBetter func(childComplexity int) int + Name func(childComplexity int) int + Normal func(childComplexity int) int + Peak func(childComplexity int) int + Scope func(childComplexity int) int + SubClusters func(childComplexity int) int + Timestep func(childComplexity int) int + Unit func(childComplexity int) int } MetricFootprints struct { @@ -338,8 +339,8 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) - Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) - MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) + Footprint(ctx context.Context, obj *schema.Job) (any, error) + MetaData(ctx context.Context, obj *schema.Job) (any, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } type MetricValueResolver interface { @@ -956,6 +957,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricConfig.Caution(childComplexity), true + case "MetricConfig.lowerIsBetter": + if e.complexity.MetricConfig.LowerIsBetter == nil { + break + } + + return e.complexity.MetricConfig.LowerIsBetter(childComplexity), true + case "MetricConfig.name": if e.complexity.MetricConfig.Name == nil { break @@ -1883,6 +1891,7 @@ type MetricConfig { normal: Float caution: Float! alert: Float! + lowerIsBetter: Boolean subClusters: [SubClusterConfig!]! } @@ -2642,7 +2651,7 @@ func (ec *executionContext) _Accelerator_id(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Accelerator_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Accelerator_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Accelerator", Field: field, @@ -2686,7 +2695,7 @@ func (ec *executionContext) _Accelerator_type(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Accelerator_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Accelerator_type(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Accelerator", Field: field, @@ -2730,7 +2739,7 @@ func (ec *executionContext) _Accelerator_model(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Accelerator_model(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Accelerator_model(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Accelerator", Field: field, @@ -2774,7 +2783,7 @@ func (ec *executionContext) _Cluster_name(ctx context.Context, field graphql.Col return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Cluster_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Cluster_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Cluster", Field: field, @@ -2818,7 +2827,7 @@ func (ec *executionContext) _Cluster_partitions(ctx context.Context, field graph return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Cluster_partitions(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Cluster_partitions(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Cluster", Field: field, @@ -2862,7 +2871,7 @@ func (ec *executionContext) _Cluster_subClusters(ctx context.Context, field grap return ec.marshalNSubCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Cluster_subClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Cluster_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Cluster", Field: field, @@ -2934,7 +2943,7 @@ func (ec *executionContext) _ClusterSupport_cluster(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_ClusterSupport_cluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_ClusterSupport_cluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "ClusterSupport", Field: field, @@ -2978,7 +2987,7 @@ func (ec *executionContext) _ClusterSupport_subClusters(ctx context.Context, fie return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_ClusterSupport_subClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_ClusterSupport_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "ClusterSupport", Field: field, @@ -3022,7 +3031,7 @@ func (ec *executionContext) _Count_name(ctx context.Context, field graphql.Colle return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Count_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Count_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Count", Field: field, @@ -3066,7 +3075,7 @@ func (ec *executionContext) _Count_count(ctx context.Context, field graphql.Coll return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Count_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Count", Field: field, @@ -3110,7 +3119,7 @@ func (ec *executionContext) _Footprints_timeWeights(ctx context.Context, field g return ec.marshalNTimeWeights2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐTimeWeights(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Footprints_timeWeights(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Footprints_timeWeights(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Footprints", Field: field, @@ -3162,7 +3171,7 @@ func (ec *executionContext) _Footprints_metrics(ctx context.Context, field graph return ec.marshalNMetricFootprints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricFootprintsᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Footprints_metrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Footprints_metrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Footprints", Field: field, @@ -3212,7 +3221,7 @@ func (ec *executionContext) _GlobalMetricListItem_name(ctx context.Context, fiel return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_GlobalMetricListItem_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_GlobalMetricListItem_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "GlobalMetricListItem", Field: field, @@ -3256,7 +3265,7 @@ func (ec *executionContext) _GlobalMetricListItem_unit(ctx context.Context, fiel return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_GlobalMetricListItem_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "GlobalMetricListItem", Field: field, @@ -3306,7 +3315,7 @@ func (ec *executionContext) _GlobalMetricListItem_scope(ctx context.Context, fie return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "GlobalMetricListItem", Field: field, @@ -3350,7 +3359,7 @@ func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Conte return ec.marshalNClusterSupport2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterSupportᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_GlobalMetricListItem_availability(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "GlobalMetricListItem", Field: field, @@ -3400,7 +3409,7 @@ func (ec *executionContext) _HistoPoint_count(ctx context.Context, field graphql return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_HistoPoint_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_HistoPoint_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "HistoPoint", Field: field, @@ -3444,7 +3453,7 @@ func (ec *executionContext) _HistoPoint_value(ctx context.Context, field graphql return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_HistoPoint_value(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_HistoPoint_value(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "HistoPoint", Field: field, @@ -3488,7 +3497,7 @@ func (ec *executionContext) _IntRangeOutput_from(ctx context.Context, field grap return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_IntRangeOutput_from(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_IntRangeOutput_from(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "IntRangeOutput", Field: field, @@ -3532,7 +3541,7 @@ func (ec *executionContext) _IntRangeOutput_to(ctx context.Context, field graphq return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_IntRangeOutput_to(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_IntRangeOutput_to(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "IntRangeOutput", Field: field, @@ -3576,7 +3585,7 @@ func (ec *executionContext) _Job_id(ctx context.Context, field graphql.Collected return ec.marshalNID2int64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3620,7 +3629,7 @@ func (ec *executionContext) _Job_jobId(ctx context.Context, field graphql.Collec return ec.marshalNInt2int64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_jobId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_jobId(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3664,7 +3673,7 @@ func (ec *executionContext) _Job_user(ctx context.Context, field graphql.Collect return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_user(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_user(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3708,7 +3717,7 @@ func (ec *executionContext) _Job_project(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_project(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3752,7 +3761,7 @@ func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_cluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_cluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3796,7 +3805,7 @@ func (ec *executionContext) _Job_subCluster(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_subCluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_subCluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3840,7 +3849,7 @@ func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.Co return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_startTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_startTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3884,7 +3893,7 @@ func (ec *executionContext) _Job_duration(ctx context.Context, field graphql.Col return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_duration(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_duration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3928,7 +3937,7 @@ func (ec *executionContext) _Job_walltime(ctx context.Context, field graphql.Col return ec.marshalNInt2int64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_walltime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_walltime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -3972,7 +3981,7 @@ func (ec *executionContext) _Job_numNodes(ctx context.Context, field graphql.Col return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_numNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_numNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4016,7 +4025,7 @@ func (ec *executionContext) _Job_numHWThreads(ctx context.Context, field graphql return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_numHWThreads(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_numHWThreads(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4060,7 +4069,7 @@ func (ec *executionContext) _Job_numAcc(ctx context.Context, field graphql.Colle return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_numAcc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_numAcc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4104,7 +4113,7 @@ func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.Collecte return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_SMT(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_SMT(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4148,7 +4157,7 @@ func (ec *executionContext) _Job_exclusive(ctx context.Context, field graphql.Co return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_exclusive(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_exclusive(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4192,7 +4201,7 @@ func (ec *executionContext) _Job_partition(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_partition(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_partition(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4236,7 +4245,7 @@ func (ec *executionContext) _Job_arrayJobId(ctx context.Context, field graphql.C return ec.marshalNInt2int64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_arrayJobId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_arrayJobId(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4280,7 +4289,7 @@ func (ec *executionContext) _Job_monitoringStatus(ctx context.Context, field gra return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_monitoringStatus(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_monitoringStatus(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4324,7 +4333,7 @@ func (ec *executionContext) _Job_state(ctx context.Context, field graphql.Collec return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobState(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_state(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_state(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4368,7 +4377,7 @@ func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.Collect return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_tags(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4420,7 +4429,7 @@ func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.Co return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐResourceᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_resources(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_resources(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4471,7 +4480,7 @@ func (ec *executionContext) _Job_concurrentJobs(ctx context.Context, field graph return ec.marshalOJobLinkResultList2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobLinkResultList(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_concurrentJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4515,12 +4524,12 @@ func (ec *executionContext) _Job_footprint(ctx context.Context, field graphql.Co if resTmp == nil { return graphql.Null } - res := resTmp.(interface{}) + res := resTmp.(any) fc.Result = res return ec.marshalOAny2interface(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_footprint(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4556,12 +4565,12 @@ func (ec *executionContext) _Job_metaData(ctx context.Context, field graphql.Col if resTmp == nil { return graphql.Null } - res := resTmp.(interface{}) + res := resTmp.(any) fc.Result = res return ec.marshalOAny2interface(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_metaData(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_metaData(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4602,7 +4611,7 @@ func (ec *executionContext) _Job_userData(ctx context.Context, field graphql.Col return ec.marshalOUser2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Job_userData(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Job_userData(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Job", Field: field, @@ -4654,7 +4663,7 @@ func (ec *executionContext) _JobLink_id(ctx context.Context, field graphql.Colle return ec.marshalNID2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobLink_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobLink_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobLink", Field: field, @@ -4698,7 +4707,7 @@ func (ec *executionContext) _JobLink_jobId(ctx context.Context, field graphql.Co return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobLink_jobId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobLink_jobId(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobLink", Field: field, @@ -4739,7 +4748,7 @@ func (ec *executionContext) _JobLinkResultList_listQuery(ctx context.Context, fi return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobLinkResultList_listQuery(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobLinkResultList_listQuery(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobLinkResultList", Field: field, @@ -4783,7 +4792,7 @@ func (ec *executionContext) _JobLinkResultList_items(ctx context.Context, field return ec.marshalNJobLink2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobLinkᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobLinkResultList_items(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobLinkResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobLinkResultList", Field: field, @@ -4830,7 +4839,7 @@ func (ec *executionContext) _JobLinkResultList_count(ctx context.Context, field return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobLinkResultList_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobLinkResultList_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobLinkResultList", Field: field, @@ -4871,7 +4880,7 @@ func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.C return ec.marshalOUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetric_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetric_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetric", Field: field, @@ -4921,7 +4930,7 @@ func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graph return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetric_timestep(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetric_timestep(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetric", Field: field, @@ -4962,7 +4971,7 @@ func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetric_series(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetric_series(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetric", Field: field, @@ -5013,7 +5022,7 @@ func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, fie return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐStatsSeries(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetric", Field: field, @@ -5067,7 +5076,7 @@ func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetricWithName_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetricWithName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetricWithName", Field: field, @@ -5111,7 +5120,7 @@ func (ec *executionContext) _JobMetricWithName_scope(ctx context.Context, field return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetricWithName_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetricWithName_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetricWithName", Field: field, @@ -5155,7 +5164,7 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobMetric(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobMetricWithName_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobMetricWithName_metric(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobMetricWithName", Field: field, @@ -5209,7 +5218,7 @@ func (ec *executionContext) _JobResultList_items(ctx context.Context, field grap return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobResultList_items(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobResultList", Field: field, @@ -5300,7 +5309,7 @@ func (ec *executionContext) _JobResultList_offset(ctx context.Context, field gra return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobResultList_offset(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobResultList_offset(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobResultList", Field: field, @@ -5341,7 +5350,7 @@ func (ec *executionContext) _JobResultList_limit(ctx context.Context, field grap return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobResultList_limit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobResultList_limit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobResultList", Field: field, @@ -5382,7 +5391,7 @@ func (ec *executionContext) _JobResultList_count(ctx context.Context, field grap return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobResultList_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobResultList_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobResultList", Field: field, @@ -5423,7 +5432,7 @@ func (ec *executionContext) _JobResultList_hasNextPage(ctx context.Context, fiel return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobResultList", Field: field, @@ -5467,7 +5476,7 @@ func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphq return ec.marshalNID2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5511,7 +5520,7 @@ func (ec *executionContext) _JobsStatistics_name(ctx context.Context, field grap return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5555,7 +5564,7 @@ func (ec *executionContext) _JobsStatistics_totalJobs(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalJobs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5599,7 +5608,7 @@ func (ec *executionContext) _JobsStatistics_runningJobs(ctx context.Context, fie return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_runningJobs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_runningJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5643,7 +5652,7 @@ func (ec *executionContext) _JobsStatistics_shortJobs(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_shortJobs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_shortJobs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5687,7 +5696,7 @@ func (ec *executionContext) _JobsStatistics_totalWalltime(ctx context.Context, f return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalWalltime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalWalltime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5731,7 +5740,7 @@ func (ec *executionContext) _JobsStatistics_totalNodes(ctx context.Context, fiel return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5775,7 +5784,7 @@ func (ec *executionContext) _JobsStatistics_totalNodeHours(ctx context.Context, return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalNodeHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalNodeHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5819,7 +5828,7 @@ func (ec *executionContext) _JobsStatistics_totalCores(ctx context.Context, fiel return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalCores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalCores(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5863,7 +5872,7 @@ func (ec *executionContext) _JobsStatistics_totalCoreHours(ctx context.Context, return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalCoreHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalCoreHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5907,7 +5916,7 @@ func (ec *executionContext) _JobsStatistics_totalAccs(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalAccs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalAccs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5951,7 +5960,7 @@ func (ec *executionContext) _JobsStatistics_totalAccHours(ctx context.Context, f return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_totalAccHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_totalAccHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -5995,7 +6004,7 @@ func (ec *executionContext) _JobsStatistics_histDuration(ctx context.Context, fi return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_histDuration(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_histDuration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -6045,7 +6054,7 @@ func (ec *executionContext) _JobsStatistics_histNumNodes(ctx context.Context, fi return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_histNumNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_histNumNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -6095,7 +6104,7 @@ func (ec *executionContext) _JobsStatistics_histNumCores(ctx context.Context, fi return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_histNumCores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_histNumCores(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -6145,7 +6154,7 @@ func (ec *executionContext) _JobsStatistics_histNumAccs(ctx context.Context, fie return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -6195,7 +6204,7 @@ func (ec *executionContext) _JobsStatistics_histMetrics(ctx context.Context, fie return ec.marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobsStatistics", Field: field, @@ -6247,7 +6256,7 @@ func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6291,7 +6300,7 @@ func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphq return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6341,7 +6350,7 @@ func (ec *executionContext) _MetricConfig_scope(ctx context.Context, field graph return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6385,7 +6394,7 @@ func (ec *executionContext) _MetricConfig_aggregation(ctx context.Context, field return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_aggregation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_aggregation(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6429,7 +6438,7 @@ func (ec *executionContext) _MetricConfig_timestep(ctx context.Context, field gr return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_timestep(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_timestep(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6473,7 +6482,7 @@ func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphq return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_peak(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_peak(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6514,7 +6523,7 @@ func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field grap return ec.marshalOFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_normal(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_normal(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6558,7 +6567,7 @@ func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_caution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_caution(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6602,7 +6611,7 @@ func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graph return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_alert(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_alert(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6615,6 +6624,47 @@ func (ec *executionContext) fieldContext_MetricConfig_alert(ctx context.Context, return fc, nil } +func (ec *executionContext) _MetricConfig_lowerIsBetter(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricConfig_lowerIsBetter(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.LowerIsBetter, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalOBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricConfig_lowerIsBetter(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricConfig", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _MetricConfig_subClusters(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricConfig_subClusters(ctx, field) if err != nil { @@ -6646,7 +6696,7 @@ func (ec *executionContext) _MetricConfig_subClusters(ctx context.Context, field return ec.marshalNSubClusterConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSubClusterConfigᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricConfig_subClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricConfig_subClusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricConfig", Field: field, @@ -6704,7 +6754,7 @@ func (ec *executionContext) _MetricFootprints_metric(ctx context.Context, field return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricFootprints_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricFootprints_metric(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricFootprints", Field: field, @@ -6748,7 +6798,7 @@ func (ec *executionContext) _MetricFootprints_data(ctx context.Context, field gr return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricFootprints_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricFootprints_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricFootprints", Field: field, @@ -6789,7 +6839,7 @@ func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field gra return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_bin(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6833,7 +6883,7 @@ func (ec *executionContext) _MetricHistoPoint_count(ctx context.Context, field g return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_count(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6874,7 +6924,7 @@ func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field gra return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_min(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6915,7 +6965,7 @@ func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field gra return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoint_max(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoint", Field: field, @@ -6959,7 +7009,7 @@ func (ec *executionContext) _MetricHistoPoints_metric(ctx context.Context, field return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoints_metric(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoints", Field: field, @@ -7003,7 +7053,7 @@ func (ec *executionContext) _MetricHistoPoints_unit(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoints_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoints", Field: field, @@ -7044,7 +7094,7 @@ func (ec *executionContext) _MetricHistoPoints_data(ctx context.Context, field g return ec.marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricHistoPoints_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricHistoPoints", Field: field, @@ -7098,7 +7148,7 @@ func (ec *executionContext) _MetricStatistics_avg(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricStatistics_avg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricStatistics_avg(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricStatistics", Field: field, @@ -7142,7 +7192,7 @@ func (ec *executionContext) _MetricStatistics_min(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricStatistics_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricStatistics_min(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricStatistics", Field: field, @@ -7186,7 +7236,7 @@ func (ec *executionContext) _MetricStatistics_max(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricStatistics_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricStatistics_max(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricStatistics", Field: field, @@ -7227,7 +7277,7 @@ func (ec *executionContext) _MetricValue_name(ctx context.Context, field graphql return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricValue_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricValue", Field: field, @@ -7271,7 +7321,7 @@ func (ec *executionContext) _MetricValue_unit(ctx context.Context, field graphql return ec.marshalNUnit2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐUnit(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricValue_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricValue_unit(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricValue", Field: field, @@ -7321,7 +7371,7 @@ func (ec *executionContext) _MetricValue_value(ctx context.Context, field graphq return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_MetricValue_value(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_MetricValue_value(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "MetricValue", Field: field, @@ -7661,7 +7711,7 @@ func (ec *executionContext) _NodeMetrics_host(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_NodeMetrics_host(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_NodeMetrics_host(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "NodeMetrics", Field: field, @@ -7705,7 +7755,7 @@ func (ec *executionContext) _NodeMetrics_subCluster(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_NodeMetrics_subCluster(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_NodeMetrics_subCluster(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "NodeMetrics", Field: field, @@ -7749,7 +7799,7 @@ func (ec *executionContext) _NodeMetrics_metrics(ctx context.Context, field grap return ec.marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_NodeMetrics_metrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_NodeMetrics_metrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "NodeMetrics", Field: field, @@ -7801,7 +7851,7 @@ func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.C return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐClusterᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_clusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_clusters(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -7853,7 +7903,7 @@ func (ec *executionContext) _Query_tags(ctx context.Context, field graphql.Colle return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTagᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_tags(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_tags(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -7905,7 +7955,7 @@ func (ec *executionContext) _Query_globalMetrics(ctx context.Context, field grap return ec.marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_globalMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_globalMetrics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -8650,7 +8700,7 @@ func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.C return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query___schema(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query___schema(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -8708,7 +8758,7 @@ func (ec *executionContext) _Resource_hostname(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Resource_hostname(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Resource_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Resource", Field: field, @@ -8749,7 +8799,7 @@ func (ec *executionContext) _Resource_hwthreads(ctx context.Context, field graph return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Resource_hwthreads(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Resource_hwthreads(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Resource", Field: field, @@ -8790,7 +8840,7 @@ func (ec *executionContext) _Resource_accelerators(ctx context.Context, field gr return ec.marshalOString2ᚕstringᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Resource_accelerators(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Resource_accelerators(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Resource", Field: field, @@ -8831,7 +8881,7 @@ func (ec *executionContext) _Resource_configuration(ctx context.Context, field g return ec.marshalOString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Resource_configuration(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Resource_configuration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Resource", Field: field, @@ -8875,7 +8925,7 @@ func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql. return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Series_hostname(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Series_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Series", Field: field, @@ -8916,7 +8966,7 @@ func (ec *executionContext) _Series_id(ctx context.Context, field graphql.Collec return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Series_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Series_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Series", Field: field, @@ -8957,7 +9007,7 @@ func (ec *executionContext) _Series_statistics(ctx context.Context, field graphq return ec.marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Series_statistics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Series_statistics(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Series", Field: field, @@ -9009,7 +9059,7 @@ func (ec *executionContext) _Series_data(ctx context.Context, field graphql.Coll return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Series_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Series_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Series", Field: field, @@ -9053,7 +9103,7 @@ func (ec *executionContext) _StatsSeries_mean(ctx context.Context, field graphql return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_StatsSeries_mean(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_StatsSeries_mean(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "StatsSeries", Field: field, @@ -9097,7 +9147,7 @@ func (ec *executionContext) _StatsSeries_median(ctx context.Context, field graph return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_StatsSeries_median(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_StatsSeries_median(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "StatsSeries", Field: field, @@ -9141,7 +9191,7 @@ func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql. return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_StatsSeries_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_StatsSeries_min(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "StatsSeries", Field: field, @@ -9185,7 +9235,7 @@ func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql. return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_StatsSeries_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_StatsSeries_max(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "StatsSeries", Field: field, @@ -9229,7 +9279,7 @@ func (ec *executionContext) _SubCluster_name(ctx context.Context, field graphql. return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9273,7 +9323,7 @@ func (ec *executionContext) _SubCluster_nodes(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_nodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_nodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9317,7 +9367,7 @@ func (ec *executionContext) _SubCluster_numberOfNodes(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_numberOfNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_numberOfNodes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9361,7 +9411,7 @@ func (ec *executionContext) _SubCluster_processorType(ctx context.Context, field return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_processorType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_processorType(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9405,7 +9455,7 @@ func (ec *executionContext) _SubCluster_socketsPerNode(ctx context.Context, fiel return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_socketsPerNode(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_socketsPerNode(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9449,7 +9499,7 @@ func (ec *executionContext) _SubCluster_coresPerSocket(ctx context.Context, fiel return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_coresPerSocket(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_coresPerSocket(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9493,7 +9543,7 @@ func (ec *executionContext) _SubCluster_threadsPerCore(ctx context.Context, fiel return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_threadsPerCore(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_threadsPerCore(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9537,7 +9587,7 @@ func (ec *executionContext) _SubCluster_flopRateScalar(ctx context.Context, fiel return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_flopRateScalar(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9589,7 +9639,7 @@ func (ec *executionContext) _SubCluster_flopRateSimd(ctx context.Context, field return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_flopRateSimd(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9641,7 +9691,7 @@ func (ec *executionContext) _SubCluster_memoryBandwidth(ctx context.Context, fie return ec.marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_memoryBandwidth(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9693,7 +9743,7 @@ func (ec *executionContext) _SubCluster_topology(ctx context.Context, field grap return ec.marshalNTopology2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTopology(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_topology(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_topology(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9751,7 +9801,7 @@ func (ec *executionContext) _SubCluster_metricConfig(ctx context.Context, field return ec.marshalNMetricConfig2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricConfigᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_metricConfig(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_metricConfig(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9777,6 +9827,8 @@ func (ec *executionContext) fieldContext_SubCluster_metricConfig(ctx context.Con return ec.fieldContext_MetricConfig_caution(ctx, field) case "alert": return ec.fieldContext_MetricConfig_alert(ctx, field) + case "lowerIsBetter": + return ec.fieldContext_MetricConfig_lowerIsBetter(ctx, field) case "subClusters": return ec.fieldContext_MetricConfig_subClusters(ctx, field) } @@ -9817,7 +9869,7 @@ func (ec *executionContext) _SubCluster_footprint(ctx context.Context, field gra return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubCluster_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubCluster_footprint(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubCluster", Field: field, @@ -9861,7 +9913,7 @@ func (ec *executionContext) _SubClusterConfig_name(ctx context.Context, field gr return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -9902,7 +9954,7 @@ func (ec *executionContext) _SubClusterConfig_peak(ctx context.Context, field gr return ec.marshalOFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_peak(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_peak(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -9943,7 +9995,7 @@ func (ec *executionContext) _SubClusterConfig_normal(ctx context.Context, field return ec.marshalOFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_normal(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_normal(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -9984,7 +10036,7 @@ func (ec *executionContext) _SubClusterConfig_caution(ctx context.Context, field return ec.marshalOFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_caution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_caution(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -10025,7 +10077,7 @@ func (ec *executionContext) _SubClusterConfig_alert(ctx context.Context, field g return ec.marshalOFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_alert(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_alert(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -10066,7 +10118,7 @@ func (ec *executionContext) _SubClusterConfig_remove(ctx context.Context, field return ec.marshalOBoolean2bool(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_SubClusterConfig_remove(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_SubClusterConfig_remove(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "SubClusterConfig", Field: field, @@ -10110,7 +10162,7 @@ func (ec *executionContext) _Tag_id(ctx context.Context, field graphql.Collected return ec.marshalNID2int64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Tag_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Tag_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Tag", Field: field, @@ -10154,7 +10206,7 @@ func (ec *executionContext) _Tag_type(ctx context.Context, field graphql.Collect return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Tag_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Tag_type(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Tag", Field: field, @@ -10198,7 +10250,7 @@ func (ec *executionContext) _Tag_name(ctx context.Context, field graphql.Collect return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Tag_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Tag_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Tag", Field: field, @@ -10242,7 +10294,7 @@ func (ec *executionContext) _TimeRangeOutput_from(ctx context.Context, field gra return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TimeRangeOutput_from(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TimeRangeOutput_from(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TimeRangeOutput", Field: field, @@ -10286,7 +10338,7 @@ func (ec *executionContext) _TimeRangeOutput_to(ctx context.Context, field graph return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TimeRangeOutput_to(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TimeRangeOutput_to(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TimeRangeOutput", Field: field, @@ -10330,7 +10382,7 @@ func (ec *executionContext) _TimeWeights_nodeHours(ctx context.Context, field gr return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TimeWeights_nodeHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TimeWeights_nodeHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TimeWeights", Field: field, @@ -10374,7 +10426,7 @@ func (ec *executionContext) _TimeWeights_accHours(ctx context.Context, field gra return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TimeWeights_accHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TimeWeights_accHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TimeWeights", Field: field, @@ -10418,7 +10470,7 @@ func (ec *executionContext) _TimeWeights_coreHours(ctx context.Context, field gr return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TimeWeights_coreHours(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TimeWeights_coreHours(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TimeWeights", Field: field, @@ -10459,7 +10511,7 @@ func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.Co return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_node(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_node(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10500,7 +10552,7 @@ func (ec *executionContext) _Topology_socket(ctx context.Context, field graphql. return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_socket(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_socket(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10541,7 +10593,7 @@ func (ec *executionContext) _Topology_memoryDomain(ctx context.Context, field gr return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_memoryDomain(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_memoryDomain(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10582,7 +10634,7 @@ func (ec *executionContext) _Topology_die(ctx context.Context, field graphql.Col return ec.marshalOInt2ᚕᚕᚖintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_die(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_die(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10623,7 +10675,7 @@ func (ec *executionContext) _Topology_core(ctx context.Context, field graphql.Co return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_core(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_core(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10664,7 +10716,7 @@ func (ec *executionContext) _Topology_accelerators(ctx context.Context, field gr return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Topology_accelerators(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Topology_accelerators(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Topology", Field: field, @@ -10716,7 +10768,7 @@ func (ec *executionContext) _Unit_base(ctx context.Context, field graphql.Collec return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Unit_base(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Unit_base(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Unit", Field: field, @@ -10757,7 +10809,7 @@ func (ec *executionContext) _Unit_prefix(ctx context.Context, field graphql.Coll return ec.marshalOString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Unit_prefix(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Unit_prefix(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Unit", Field: field, @@ -10801,7 +10853,7 @@ func (ec *executionContext) _User_username(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_username(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_username(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, @@ -10845,7 +10897,7 @@ func (ec *executionContext) _User_name(ctx context.Context, field graphql.Collec return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, @@ -10889,7 +10941,7 @@ func (ec *executionContext) _User_email(ctx context.Context, field graphql.Colle return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_email(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_email(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, @@ -10933,7 +10985,7 @@ func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Directive_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Directive_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Directive", Field: field, @@ -10974,7 +11026,7 @@ func (ec *executionContext) ___Directive_description(ctx context.Context, field return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Directive_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Directive_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Directive", Field: field, @@ -11018,7 +11070,7 @@ func (ec *executionContext) ___Directive_locations(ctx context.Context, field gr return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Directive_locations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Directive_locations(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Directive", Field: field, @@ -11062,7 +11114,7 @@ func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Directive_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Directive_args(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Directive", Field: field, @@ -11116,7 +11168,7 @@ func (ec *executionContext) ___Directive_isRepeatable(ctx context.Context, field return ec.marshalNBoolean2bool(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Directive_isRepeatable(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Directive", Field: field, @@ -11160,7 +11212,7 @@ func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___EnumValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___EnumValue_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__EnumValue", Field: field, @@ -11201,7 +11253,7 @@ func (ec *executionContext) ___EnumValue_description(ctx context.Context, field return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___EnumValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___EnumValue_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__EnumValue", Field: field, @@ -11245,7 +11297,7 @@ func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field return ec.marshalNBoolean2bool(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___EnumValue_isDeprecated(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__EnumValue", Field: field, @@ -11286,7 +11338,7 @@ func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___EnumValue_deprecationReason(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__EnumValue", Field: field, @@ -11330,7 +11382,7 @@ func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.Col return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11371,7 +11423,7 @@ func (ec *executionContext) ___Field_description(ctx context.Context, field grap return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11415,7 +11467,7 @@ func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.Col return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_args(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11469,7 +11521,7 @@ func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.Col return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_type(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11535,7 +11587,7 @@ func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field gra return ec.marshalNBoolean2bool(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_isDeprecated(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11576,7 +11628,7 @@ func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, fiel return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Field_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Field_deprecationReason(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Field", Field: field, @@ -11620,7 +11672,7 @@ func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___InputValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___InputValue_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__InputValue", Field: field, @@ -11661,7 +11713,7 @@ func (ec *executionContext) ___InputValue_description(ctx context.Context, field return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___InputValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___InputValue_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__InputValue", Field: field, @@ -11705,7 +11757,7 @@ func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphq return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___InputValue_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___InputValue_type(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__InputValue", Field: field, @@ -11768,7 +11820,7 @@ func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, fiel return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___InputValue_defaultValue(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__InputValue", Field: field, @@ -11809,7 +11861,7 @@ func (ec *executionContext) ___Schema_description(ctx context.Context, field gra return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -11853,7 +11905,7 @@ func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.C return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_types(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_types(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -11919,7 +11971,7 @@ func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graph return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_queryType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_queryType(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -11982,7 +12034,7 @@ func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field gr return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_mutationType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_mutationType(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -12045,7 +12097,7 @@ func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, fiel return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_subscriptionType(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -12111,7 +12163,7 @@ func (ec *executionContext) ___Schema_directives(ctx context.Context, field grap return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Schema_directives(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Schema_directives(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Schema", Field: field, @@ -12167,7 +12219,7 @@ func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.Coll return ec.marshalN__TypeKind2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_kind(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12208,7 +12260,7 @@ func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.Coll return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12249,7 +12301,7 @@ func (ec *executionContext) ___Type_description(ctx context.Context, field graph return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_description(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12356,7 +12408,7 @@ func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphq return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_interfaces(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_interfaces(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12419,7 +12471,7 @@ func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field gra return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_possibleTypes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_possibleTypes(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12544,7 +12596,7 @@ func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graph return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_inputFields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_inputFields(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12595,7 +12647,7 @@ func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.Co return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_ofType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_ofType(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -12658,7 +12710,7 @@ func (ec *executionContext) ___Type_specifiedByURL(ctx context.Context, field gr return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext___Type_specifiedByURL(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "__Type", Field: field, @@ -13634,7 +13686,7 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj case "concurrentJobs": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13667,7 +13719,7 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj case "footprint": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13700,7 +13752,7 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj case "metaData": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -13733,7 +13785,7 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj case "userData": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -14186,6 +14238,8 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection if out.Values[i] == graphql.Null { out.Invalids++ } + case "lowerIsBetter": + out.Values[i] = ec._MetricConfig_lowerIsBetter(ctx, field, obj) case "subClusters": out.Values[i] = ec._MetricConfig_subClusters(ctx, field, obj) if out.Values[i] == graphql.Null { @@ -14412,7 +14466,7 @@ func (ec *executionContext) _MetricValue(ctx context.Context, sel ast.SelectionS case "name": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -14686,7 +14740,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "user": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -14727,7 +14781,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "job": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -14768,7 +14822,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr case "jobsFootprints": field := field - innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -17571,7 +17625,7 @@ func (ec *executionContext) marshalOAggregate2ᚖgithubᚗcomᚋClusterCockpit return v } -func (ec *executionContext) unmarshalOAny2interface(ctx context.Context, v interface{}) (interface{}, error) { +func (ec *executionContext) unmarshalOAny2interface(ctx context.Context, v interface{}) (any, error) { if v == nil { return nil, nil } @@ -17579,7 +17633,7 @@ func (ec *executionContext) unmarshalOAny2interface(ctx context.Context, v inter return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOAny2interface(ctx context.Context, sel ast.SelectionSet, v interface{}) graphql.Marshaler { +func (ec *executionContext) marshalOAny2interface(ctx context.Context, sel ast.SelectionSet, v any) graphql.Marshaler { if v == nil { return graphql.Null } diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index c95a307..9f7b9f5 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.45 +// Code generated by github.com/99designs/gqlgen version v0.17.49 import ( "context" @@ -45,12 +45,12 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod } // Footprint is the resolver for the footprint field. -func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) { +func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) (any, error) { return r.Repo.FetchFootprint(obj) } // MetaData is the resolver for the metaData field. -func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) { +func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (any, error) { return r.Repo.FetchMetadata(obj) } @@ -419,11 +419,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type ( - clusterResolver struct{ *Resolver } - jobResolver struct{ *Resolver } - metricValueResolver struct{ *Resolver } - mutationResolver struct{ *Resolver } - queryResolver struct{ *Resolver } - subClusterResolver struct{ *Resolver } -) +type clusterResolver struct{ *Resolver } +type jobResolver struct{ *Resolver } +type metricValueResolver struct{ *Resolver } +type mutationResolver struct{ *Resolver } +type queryResolver struct{ *Resolver } +type subClusterResolver struct{ *Resolver } diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 0516abb..0673a50 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -78,6 +78,8 @@ func initClusterConfig() error { newMetric.Caution = cfg.Caution newMetric.Alert = cfg.Alert newMetric.Footprint = cfg.Footprint + newMetric.Energy = cfg.Energy + newMetric.LowerIsBetter = cfg.LowerIsBetter sc.MetricConfig = append(sc.MetricConfig, *newMetric) if newMetric.Footprint { diff --git a/pkg/archive/clusterConfig_test.go b/pkg/archive/clusterConfig_test.go index 8624374..a73f22f 100644 --- a/pkg/archive/clusterConfig_test.go +++ b/pkg/archive/clusterConfig_test.go @@ -28,6 +28,12 @@ func TestClusterConfig(t *testing.T) { t.Fail() } + for _, metric := range sc.MetricConfig { + if metric.LowerIsBetter && metric.Name != "mem_used" { + t.Fail() + } + } + // spew.Dump(archive.GlobalMetricList) // t.Fail() } diff --git a/pkg/archive/testdata/archive/fritz/cluster.json b/pkg/archive/testdata/archive/fritz/cluster.json index 8c7331c..b0263c4 100644 --- a/pkg/archive/testdata/archive/fritz/cluster.json +++ b/pkg/archive/testdata/archive/fritz/cluster.json @@ -60,6 +60,7 @@ "normal": 128, "caution": 200, "alert": 240, + "lowerIsBetter": true, "subClusters": [ { "name": "spr1tb", @@ -67,6 +68,7 @@ "normal": 512, "caution": 900, "footprint": true, + "lowerIsBetter": true, "alert": 1000 }, { @@ -75,6 +77,7 @@ "normal": 1024, "caution": 1800, "footprint": true, + "lowerIsBetter": true, "alert": 2000 } ] diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index 026c80b..5b2305e 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -46,28 +46,31 @@ type SubCluster struct { } type SubClusterConfig struct { - Name string `json:"name"` - Peak float64 `json:"peak"` - Normal float64 `json:"normal"` - Caution float64 `json:"caution"` - Alert float64 `json:"alert"` - Footprint bool `json:"footprint"` - Remove bool `json:"remove"` + Name string `json:"name"` + Peak float64 `json:"peak"` + Normal float64 `json:"normal"` + Caution float64 `json:"caution"` + Alert float64 `json:"alert"` + Footprint bool `json:"footprint"` + Remove bool `json:"remove"` + LowerIsBetter bool `json:"lowerIsBetter"` + Energy bool `json:"energy"` } type MetricConfig struct { - Unit Unit `json:"unit"` - Name string `json:"name"` - Scope MetricScope `json:"scope"` - Aggregation string `json:"aggregation"` - SubClusters []*SubClusterConfig `json:"subClusters,omitempty"` - Timestep int `json:"timestep"` - Peak float64 `json:"peak"` - Normal float64 `json:"normal"` - Caution float64 `json:"caution"` - Alert float64 `json:"alert"` - Footprint bool `json:"footprint"` - Energy bool `json:"energy"` + Unit Unit `json:"unit"` + Name string `json:"name"` + Scope MetricScope `json:"scope"` + Aggregation string `json:"aggregation"` + SubClusters []*SubClusterConfig `json:"subClusters,omitempty"` + Timestep int `json:"timestep"` + Peak float64 `json:"peak"` + Normal float64 `json:"normal"` + Caution float64 `json:"caution"` + Alert float64 `json:"alert"` + LowerIsBetter bool `json:"lowerIsBetter"` + Footprint bool `json:"footprint"` + Energy bool `json:"energy"` } type Cluster struct { From 0adfb631ef93211f9f600220b02e4d120ef3f8d7 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 11 Jul 2024 17:11:01 +0200 Subject: [PATCH 051/443] Update go version to 1.22 for Github test workflow --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6c2fc9b..e4aa02b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ jobs: - name: Install Go uses: actions/setup-go@v4 with: - go-version: 1.20.x + go-version: 1.22.x - name: Checkout code uses: actions/checkout@v3 - name: Build, Vet & Test From a4912893a8e3098d3fccb62341a5a461c271731c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 11 Jul 2024 17:23:59 +0200 Subject: [PATCH 052/443] Frontend refactor backend changes --- api/schema.graphqls | 8 +- internal/graph/generated/generated.go | 280 +++++++++++++++++++++++++- internal/graph/model/models_gen.go | 6 + internal/graph/schema.resolvers.go | 40 +++- 4 files changed, 318 insertions(+), 16 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index fa8bb20..6d3ba5c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -27,7 +27,7 @@ type Job { tags: [Tag!]! resources: [Resource!]! concurrentJobs: JobLinkResultList - footprint: Any + footprint: [FootprintValue] metaData: Any userData: User } @@ -59,6 +59,12 @@ type SubCluster { footprint: [String!]! } +type FootprintValue { + name: String! + stat: String! + value: Float! +} + type MetricValue { name: String unit: Unit! diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 92467ce..da1fa4a 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -74,6 +74,12 @@ type ComplexityRoot struct { Name func(childComplexity int) int } + FootprintValue struct { + Name func(childComplexity int) int + Stat func(childComplexity int) int + Value func(childComplexity int) int + } + Footprints struct { Metrics func(childComplexity int) int TimeWeights func(childComplexity int) int @@ -338,7 +344,7 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) - Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) + Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } @@ -459,6 +465,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Count.Name(childComplexity), true + case "FootprintValue.name": + if e.complexity.FootprintValue.Name == nil { + break + } + + return e.complexity.FootprintValue.Name(childComplexity), true + + case "FootprintValue.stat": + if e.complexity.FootprintValue.Stat == nil { + break + } + + return e.complexity.FootprintValue.Stat(childComplexity), true + + case "FootprintValue.value": + if e.complexity.FootprintValue.Value == nil { + break + } + + return e.complexity.FootprintValue.Value(childComplexity), true + case "Footprints.metrics": if e.complexity.Footprints.Metrics == nil { break @@ -1811,7 +1838,7 @@ type Job { tags: [Tag!]! resources: [Resource!]! concurrentJobs: JobLinkResultList - footprint: Any + footprint: [FootprintValue] metaData: Any userData: User } @@ -1843,6 +1870,12 @@ type SubCluster { footprint: [String!]! } +type FootprintValue { + name: String! + stat: String! + value: Float! +} + type MetricValue { name: String unit: Unit! @@ -3079,6 +3112,138 @@ func (ec *executionContext) fieldContext_Count_count(ctx context.Context, field return fc, nil } +func (ec *executionContext) _FootprintValue_name(ctx context.Context, field graphql.CollectedField, obj *model.FootprintValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_FootprintValue_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_FootprintValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "FootprintValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _FootprintValue_stat(ctx context.Context, field graphql.CollectedField, obj *model.FootprintValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_FootprintValue_stat(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stat, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_FootprintValue_stat(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "FootprintValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _FootprintValue_value(ctx context.Context, field graphql.CollectedField, obj *model.FootprintValue) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_FootprintValue_value(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Value, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalNFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_FootprintValue_value(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "FootprintValue", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Footprints_timeWeights(ctx context.Context, field graphql.CollectedField, obj *model.Footprints) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Footprints_timeWeights(ctx, field) if err != nil { @@ -4515,9 +4680,9 @@ func (ec *executionContext) _Job_footprint(ctx context.Context, field graphql.Co if resTmp == nil { return graphql.Null } - res := resTmp.(interface{}) + res := resTmp.([]*model.FootprintValue) fc.Result = res - return ec.marshalOAny2interface(ctx, field.Selections, res) + return ec.marshalOFootprintValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -4527,7 +4692,15 @@ func (ec *executionContext) fieldContext_Job_footprint(ctx context.Context, fiel IsMethod: true, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Any does not have child fields") + switch field.Name { + case "name": + return ec.fieldContext_FootprintValue_name(ctx, field) + case "stat": + return ec.fieldContext_FootprintValue_stat(ctx, field) + case "value": + return ec.fieldContext_FootprintValue_value(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type FootprintValue", field.Name) }, } return fc, nil @@ -13303,6 +13476,55 @@ func (ec *executionContext) _Count(ctx context.Context, sel ast.SelectionSet, ob return out } +var footprintValueImplementors = []string{"FootprintValue"} + +func (ec *executionContext) _FootprintValue(ctx context.Context, sel ast.SelectionSet, obj *model.FootprintValue) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, footprintValueImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("FootprintValue") + case "name": + out.Values[i] = ec._FootprintValue_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stat": + out.Values[i] = ec._FootprintValue_stat(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "value": + out.Values[i] = ec._FootprintValue_value(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var footprintsImplementors = []string{"Footprints"} func (ec *executionContext) _Footprints(ctx context.Context, sel ast.SelectionSet, obj *model.Footprints) graphql.Marshaler { @@ -17631,6 +17853,54 @@ func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpi return &res, graphql.ErrorOnPath(ctx, err) } +func (ec *executionContext) marshalOFootprintValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx context.Context, sel ast.SelectionSet, v []*model.FootprintValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalOFootprintValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + return ret +} + +func (ec *executionContext) marshalOFootprintValue2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx context.Context, sel ast.SelectionSet, v *model.FootprintValue) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._FootprintValue(ctx, sel, v) +} + func (ec *executionContext) marshalOFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprints(ctx context.Context, sel ast.SelectionSet, v *model.Footprints) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index d575fc3..b19cab2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -21,6 +21,12 @@ type FloatRange struct { To float64 `json:"to"` } +type FootprintValue struct { + Name string `json:"name"` + Stat string `json:"stat"` + Value float64 `json:"value"` +} + type Footprints struct { TimeWeights *TimeWeights `json:"timeWeights"` Metrics []*MetricFootprints `json:"metrics"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index c95a307..b21108a 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "strconv" + "strings" "time" "github.com/ClusterCockpit/cc-backend/internal/config" @@ -45,8 +46,29 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod } // Footprint is the resolver for the footprint field. -func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) (interface{}, error) { - return r.Repo.FetchFootprint(obj) +func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) { + rawFootprint, err := r.Repo.FetchFootprint(obj) + + if err != nil { + log.Warn("Error while fetching job footprint data") + return nil, err + } + + res := []*model.FootprintValue{} + for name, value := range rawFootprint { + + parts := strings.Split(name, "_") + statPart := parts[len(parts)-1] + nameParts := parts[:len(parts)-1] + + res = append(res, &model.FootprintValue{ + Name: strings.Join(nameParts, "_"), + Stat: statPart, + Value: value, + }) + } + + return res, err } // MetaData is the resolver for the metaData field. @@ -419,11 +441,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type ( - clusterResolver struct{ *Resolver } - jobResolver struct{ *Resolver } - metricValueResolver struct{ *Resolver } - mutationResolver struct{ *Resolver } - queryResolver struct{ *Resolver } - subClusterResolver struct{ *Resolver } -) +type clusterResolver struct{ *Resolver } +type jobResolver struct{ *Resolver } +type metricValueResolver struct{ *Resolver } +type mutationResolver struct{ *Resolver } +type queryResolver struct{ *Resolver } +type subClusterResolver struct{ *Resolver } From e14d6a81fee95c80e43629a58c29df8d9fff9d1f Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 11 Jul 2024 17:24:33 +0200 Subject: [PATCH 053/443] fix: fix db migration to v8, changes key name to cpu_load --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 5db1d1b..643b87e 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -5,7 +5,7 @@ UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); -UPDATE job SET footprint = json_insert(footprint, '$.load_avg', job.load_avg); +UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); ALTER TABLE job DROP flops_any_avg; ALTER TABLE job DROP mem_bw_avg; ALTER TABLE job DROP mem_used_max; From a8721dcc69432375296216ac39ed3c5ab15ee24a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 11 Jul 2024 17:37:53 +0200 Subject: [PATCH 054/443] Regenerate gql after internal merge --- internal/graph/generated/generated.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 04445e4..e8c72ec 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -346,7 +346,7 @@ type JobResolver interface { ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) - MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) + MetaData(ctx context.Context, obj *schema.Job) (any, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } type MetricValueResolver interface { @@ -3152,7 +3152,7 @@ func (ec *executionContext) _FootprintValue_name(ctx context.Context, field grap return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_FootprintValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_FootprintValue_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "FootprintValue", Field: field, @@ -3196,7 +3196,7 @@ func (ec *executionContext) _FootprintValue_stat(ctx context.Context, field grap return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_FootprintValue_stat(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_FootprintValue_stat(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "FootprintValue", Field: field, @@ -3240,7 +3240,7 @@ func (ec *executionContext) _FootprintValue_value(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_FootprintValue_value(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_FootprintValue_value(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "FootprintValue", Field: field, From a07d167390e12776535062ff32e4ea0ae5e45027 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 12 Jul 2024 09:17:31 +0200 Subject: [PATCH 055/443] Fix build error with updated prometheus client --- internal/metricdata/prometheus.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index 0d3848f..a8d9f39 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -166,10 +166,10 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { var rt http.RoundTripper = nil if prom_pw := os.Getenv("PROMETHEUS_PASSWORD"); prom_pw != "" && config.Username != "" { prom_pw := promcfg.Secret(prom_pw) - rt = promcfg.NewBasicAuthRoundTripper(config.Username, prom_pw, "", promapi.DefaultRoundTripper) + rt = promcfg.NewBasicAuthRoundTripper(promcfg.NewInlineSecret(config.Username), promcfg.NewInlineSecret(string(prom_pw)), promapi.DefaultRoundTripper) } else { if config.Username != "" { - return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set.") + return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set") } } // init client @@ -204,8 +204,8 @@ func (pdb *PrometheusDataRepository) FormatQuery( metric string, scope schema.MetricScope, nodes []string, - cluster string) (string, error) { - + cluster string, +) (string, error) { args := PromQLArgs{} if len(nodes) > 0 { args.Nodes = fmt.Sprintf("(%s)%s", nodeRegex(nodes), pdb.suffix) @@ -233,12 +233,13 @@ func (pdb *PrometheusDataRepository) RowToSeries( from time.Time, step int64, steps int64, - row *promm.SampleStream) schema.Series { + row *promm.SampleStream, +) schema.Series { ts := from.Unix() hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) // init array of expected length with NaN values := make([]schema.Float, steps+1) - for i, _ := range values { + for i := range values { values[i] = schema.NaN } // copy recorded values from prom sample pair @@ -263,8 +264,8 @@ func (pdb *PrometheusDataRepository) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { - + ctx context.Context, +) (schema.JobData, error) { // TODO respect requested scope if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { scopes = append(scopes, schema.MetricScopeNode) @@ -306,7 +307,6 @@ func (pdb *PrometheusDataRepository) LoadData( Step: time.Duration(metricConfig.Timestep * 1e9), } result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) - if err != nil { log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) return nil, errors.New("Prometheus query error") @@ -335,7 +335,7 @@ func (pdb *PrometheusDataRepository) LoadData( pdb.RowToSeries(from, step, steps, row)) } // only add metric if at least one host returned data - if !ok && len(jobMetric.Series) > 0{ + if !ok && len(jobMetric.Series) > 0 { jobData[metric][scope] = jobMetric } // sort by hostname to get uniform coloring @@ -351,8 +351,8 @@ func (pdb *PrometheusDataRepository) LoadData( func (pdb *PrometheusDataRepository) LoadStats( job *schema.Job, metrics []string, - ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - + ctx context.Context, +) (map[string]map[string]schema.MetricStatistics, error) { // map of metrics of nodes of stats stats := map[string]map[string]schema.MetricStatistics{} @@ -376,7 +376,8 @@ func (pdb *PrometheusDataRepository) LoadNodeData( metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, - ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { t0 := time.Now() // Map of hosts of metrics of value slices data := make(map[string]map[string][]*schema.JobMetric) @@ -411,7 +412,6 @@ func (pdb *PrometheusDataRepository) LoadNodeData( Step: time.Duration(metricConfig.Timestep * 1e9), } result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) - if err != nil { log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) return nil, errors.New("Prometheus query error") From 68a97dc9806d4a13c5172a98e1034776d3db85bb Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 12 Jul 2024 13:20:54 +0200 Subject: [PATCH 056/443] Add footprint to global metric list --- pkg/archive/clusterConfig.go | 5 ++++- pkg/schema/cluster.go | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 0673a50..a68a530 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -55,7 +55,9 @@ func initClusterConfig() error { ml, ok := metricLookup[mc.Name] if !ok { - metricLookup[mc.Name] = schema.GlobalMetricListItem{Name: mc.Name, Scope: mc.Scope, Unit: mc.Unit} + metricLookup[mc.Name] = schema.GlobalMetricListItem{ + Name: mc.Name, Scope: mc.Scope, Unit: mc.Unit, Footprint: mc.Footprint, + } ml = metricLookup[mc.Name] } availability := schema.ClusterSupport{Cluster: cluster.Name} @@ -84,6 +86,7 @@ func initClusterConfig() error { if newMetric.Footprint { sc.Footprint = append(sc.Footprint, newMetric.Name) + ml.Footprint = true } if newMetric.Energy { sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name) diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index 5b2305e..ef1be89 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -88,6 +88,7 @@ type GlobalMetricListItem struct { Name string `json:"name"` Unit Unit `json:"unit"` Scope MetricScope `json:"scope"` + Footprint bool `json:"footprint"` Availability []ClusterSupport `json:"availability"` } From c61ffce0e9900de18871f19357565d187921cd92 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 12 Jul 2024 13:21:19 +0200 Subject: [PATCH 057/443] Make job query on metric stats generic --- api/schema.graphqls | 18 ++- internal/graph/generated/generated.go | 175 ++++++++++++++++++++------ internal/graph/model/models_gen.go | 10 +- internal/graph/schema.resolvers.go | 1 - internal/repository/jobQuery.go | 19 +-- 5 files changed, 159 insertions(+), 64 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 7579dcb..8edae6c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -194,6 +194,7 @@ type GlobalMetricListItem { name: String! unit: Unit! scope: MetricScope! + footprint: Boolean availability: [ClusterSupport!]! } @@ -208,6 +209,11 @@ type User { email: String! } +input MetricStatItem { + metricName: String! + range: FloatRange! +} + type Query { clusters: [Cluster!]! # List of all clusters tags: [Tag!]! # List of all tags @@ -259,11 +265,7 @@ input JobFilter { startTime: TimeRange state: [JobState!] - flopsAnyAvg: FloatRange - memBwAvg: FloatRange - loadAvg: FloatRange - memUsedMax: FloatRange - + metricStats: [MetricStatItem!] exclusive: Int node: StringInput } @@ -288,9 +290,13 @@ input StringInput { } input IntRange { from: Int!, to: Int! } -input FloatRange { from: Float!, to: Float! } input TimeRange { from: Time, to: Time } +input FloatRange { + from: Float! + to: Float! +} + type JobResultList { items: [Job!]! offset: Int diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index e8c72ec..91839a9 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -87,6 +87,7 @@ type ComplexityRoot struct { GlobalMetricListItem struct { Availability func(childComplexity int) int + Footprint func(childComplexity int) int Name func(childComplexity int) int Scope func(childComplexity int) int Unit func(childComplexity int) int @@ -508,6 +509,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.GlobalMetricListItem.Availability(childComplexity), true + case "GlobalMetricListItem.footprint": + if e.complexity.GlobalMetricListItem.Footprint == nil { + break + } + + return e.complexity.GlobalMetricListItem.Footprint(childComplexity), true + case "GlobalMetricListItem.name": if e.complexity.GlobalMetricListItem.Name == nil { break @@ -1716,6 +1724,7 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { ec.unmarshalInputFloatRange, ec.unmarshalInputIntRange, ec.unmarshalInputJobFilter, + ec.unmarshalInputMetricStatItem, ec.unmarshalInputOrderByInput, ec.unmarshalInputPageRequest, ec.unmarshalInputStringInput, @@ -2013,6 +2022,7 @@ type GlobalMetricListItem { name: String! unit: Unit! scope: MetricScope! + footprint: Boolean availability: [ClusterSupport!]! } @@ -2027,6 +2037,11 @@ type User { email: String! } +input MetricStatItem { + metricName: String! + range: FloatRange! +} + type Query { clusters: [Cluster!]! # List of all clusters tags: [Tag!]! # List of all tags @@ -2078,11 +2093,7 @@ input JobFilter { startTime: TimeRange state: [JobState!] - flopsAnyAvg: FloatRange - memBwAvg: FloatRange - loadAvg: FloatRange - memUsedMax: FloatRange - + metricStats: [MetricStatItem!] exclusive: Int node: StringInput } @@ -2107,9 +2118,13 @@ input StringInput { } input IntRange { from: Int!, to: Int! } -input FloatRange { from: Float!, to: Float! } input TimeRange { from: Time, to: Time } +input FloatRange { + from: Float! + to: Float! +} + type JobResultList { items: [Job!]! offset: Int @@ -3493,6 +3508,47 @@ func (ec *executionContext) fieldContext_GlobalMetricListItem_scope(_ context.Co return fc, nil } +func (ec *executionContext) _GlobalMetricListItem_footprint(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalMetricListItem_footprint(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Footprint, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(bool) + fc.Result = res + return ec.marshalOBoolean2bool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalMetricListItem_footprint(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalMetricListItem", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _GlobalMetricListItem_availability(ctx context.Context, field graphql.CollectedField, obj *schema.GlobalMetricListItem) (ret graphql.Marshaler) { fc, err := ec.fieldContext_GlobalMetricListItem_availability(ctx, field) if err != nil { @@ -8142,6 +8198,8 @@ func (ec *executionContext) fieldContext_Query_globalMetrics(_ context.Context, return ec.fieldContext_GlobalMetricListItem_unit(ctx, field) case "scope": return ec.fieldContext_GlobalMetricListItem_scope(ctx, field) + case "footprint": + return ec.fieldContext_GlobalMetricListItem_footprint(ctx, field) case "availability": return ec.fieldContext_GlobalMetricListItem_availability(ctx, field) } @@ -12975,7 +13033,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int asMap[k] = v } - fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax", "exclusive", "node"} + fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -13087,34 +13145,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int return it, err } it.State = data - case "flopsAnyAvg": - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg")) - data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) + case "metricStats": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metricStats")) + data, err := ec.unmarshalOMetricStatItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricStatItemᚄ(ctx, v) if err != nil { return it, err } - it.FlopsAnyAvg = data - case "memBwAvg": - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg")) - data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) - if err != nil { - return it, err - } - it.MemBwAvg = data - case "loadAvg": - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg")) - data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) - if err != nil { - return it, err - } - it.LoadAvg = data - case "memUsedMax": - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax")) - data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) - if err != nil { - return it, err - } - it.MemUsedMax = data + it.MetricStats = data case "exclusive": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) @@ -13135,6 +13172,40 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int return it, nil } +func (ec *executionContext) unmarshalInputMetricStatItem(ctx context.Context, obj interface{}) (model.MetricStatItem, error) { + var it model.MetricStatItem + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"metricName", "range"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "metricName": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metricName")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.MetricName = data + case "range": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("range")) + data, err := ec.unmarshalNFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) + if err != nil { + return it, err + } + it.Range = data + } + } + + return it, nil +} + func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj interface{}) (model.OrderByInput, error) { var it model.OrderByInput asMap := map[string]interface{}{} @@ -13647,6 +13718,8 @@ func (ec *executionContext) _GlobalMetricListItem(ctx context.Context, sel ast.S if out.Values[i] == graphql.Null { out.Invalids++ } + case "footprint": + out.Values[i] = ec._GlobalMetricListItem_footprint(ctx, field, obj) case "availability": out.Values[i] = ec._GlobalMetricListItem_availability(ctx, field, obj) if out.Values[i] == graphql.Null { @@ -16369,6 +16442,11 @@ func (ec *executionContext) marshalNFloat2ᚕᚕfloat64ᚄ(ctx context.Context, return ret } +func (ec *executionContext) unmarshalNFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) { + res, err := ec.unmarshalInputFloatRange(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalNGlobalMetricListItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐGlobalMetricListItemᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.GlobalMetricListItem) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -17117,6 +17195,11 @@ func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋ return v } +func (ec *executionContext) unmarshalNMetricStatItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricStatItem(ctx context.Context, v interface{}) (*model.MetricStatItem, error) { + res, err := ec.unmarshalInputMetricStatItem(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricValue(ctx context.Context, sel ast.SelectionSet, v schema.MetricValue) graphql.Marshaler { return ec._MetricValue(ctx, sel, &v) } @@ -17899,14 +17982,6 @@ func (ec *executionContext) marshalOFloat2float64(ctx context.Context, sel ast.S return graphql.WrapContextMarshaler(ctx, res) } -func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) { - if v == nil { - return nil, nil - } - res, err := ec.unmarshalInputFloatRange(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - func (ec *executionContext) marshalOFootprintValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx context.Context, sel ast.SelectionSet, v []*model.FootprintValue) graphql.Marshaler { if v == nil { return graphql.Null @@ -18295,6 +18370,26 @@ func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpit return ret } +func (ec *executionContext) unmarshalOMetricStatItem2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricStatItemᚄ(ctx context.Context, v interface{}) ([]*model.MetricStatItem, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + vSlice = graphql.CoerceList(v) + } + var err error + res := make([]*model.MetricStatItem, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNMetricStatItem2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricStatItem(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + func (ec *executionContext) marshalOMetricStatistics2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v schema.MetricStatistics) graphql.Marshaler { return ec._MetricStatistics(ctx, sel, &v) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index b19cab2..e3b4a11 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -58,10 +58,7 @@ type JobFilter struct { NumHWThreads *schema.IntRange `json:"numHWThreads,omitempty"` StartTime *schema.TimeRange `json:"startTime,omitempty"` State []schema.JobState `json:"state,omitempty"` - FlopsAnyAvg *FloatRange `json:"flopsAnyAvg,omitempty"` - MemBwAvg *FloatRange `json:"memBwAvg,omitempty"` - LoadAvg *FloatRange `json:"loadAvg,omitempty"` - MemUsedMax *FloatRange `json:"memUsedMax,omitempty"` + MetricStats []*MetricStatItem `json:"metricStats,omitempty"` Exclusive *int `json:"exclusive,omitempty"` Node *StringInput `json:"node,omitempty"` } @@ -129,6 +126,11 @@ type MetricHistoPoints struct { Data []*MetricHistoPoint `json:"data,omitempty"` } +type MetricStatItem struct { + MetricName string `json:"metricName"` + Range *FloatRange `json:"range"` +} + type Mutation struct { } diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 320b58b..f36e25a 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -48,7 +48,6 @@ func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*mod // Footprint is the resolver for the footprint field. func (r *jobResolver) Footprint(ctx context.Context, obj *schema.Job) ([]*model.FootprintValue, error) { rawFootprint, err := r.Repo.FetchFootprint(obj) - if err != nil { log.Warn("Error while fetching job footprint data") return nil, err diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index a985519..c52577d 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -176,17 +176,10 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select if filter.Node != nil { query = buildStringCondition("job.resources", filter.Node, query) } - if filter.FlopsAnyAvg != nil { - query = buildFloatCondition("job.flops_any_avg", filter.FlopsAnyAvg, query) - } - if filter.MemBwAvg != nil { - query = buildFloatCondition("job.mem_bw_avg", filter.MemBwAvg, query) - } - if filter.LoadAvg != nil { - query = buildFloatCondition("job.load_avg", filter.LoadAvg, query) - } - if filter.MemUsedMax != nil { - query = buildFloatCondition("job.mem_used_max", filter.MemUsedMax, query) + if filter.MetricStats != nil { + for _, m := range filter.MetricStats { + query = buildFloatJsonCondition("job.metric_stats", m.Range, query) + } } return query } @@ -207,8 +200,8 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui } } -func buildFloatCondition(field string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder { - return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To) +func buildFloatJsonCondition(field string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder { + return query.Where("JSON_EXTRACT(footprint, '$."+field+"') BETWEEN ? AND ?", cond.From, cond.To) } func buildStringCondition(field string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder { From 04586756085317093a7474c12892f6f24bda8c4e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 12 Jul 2024 13:42:12 +0200 Subject: [PATCH 058/443] Convert histogram query to json keys --- internal/repository/stats.go | 94 ++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 52 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 6865e18..0775db5 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -41,8 +41,8 @@ var sortBy2column = map[model.SortByAggregate]string{ func (r *JobRepository) buildCountQuery( filter []*model.JobFilter, kind string, - col string) sq.SelectBuilder { - + col string, +) sq.SelectBuilder { var query sq.SelectBuilder if col != "" { @@ -69,8 +69,8 @@ func (r *JobRepository) buildCountQuery( func (r *JobRepository) buildStatsQuery( filter []*model.JobFilter, - col string) sq.SelectBuilder { - + col string, +) sq.SelectBuilder { var query sq.SelectBuilder castType := r.getCastType() @@ -87,7 +87,6 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType), ).From("job").GroupBy(col) - } else { // Scan columns: totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", @@ -138,8 +137,8 @@ func (r *JobRepository) JobsStatsGrouped( filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, - groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { - + groupBy *model.Aggregate, +) ([]*model.JobsStatistics, error) { start := time.Now() col := groupBy2column[*groupBy] query := r.buildStatsQuery(filter, col) @@ -218,7 +217,8 @@ func (r *JobRepository) JobsStatsGrouped( TotalCores: totalCores, TotalCoreHours: totalCoreHours, TotalAccs: totalAccs, - TotalAccHours: totalAccHours}) + TotalAccHours: totalAccHours, + }) } else { stats = append(stats, &model.JobsStatistics{ @@ -230,7 +230,8 @@ func (r *JobRepository) JobsStatsGrouped( TotalCores: totalCores, TotalCoreHours: totalCoreHours, TotalAccs: totalAccs, - TotalAccHours: totalAccHours}) + TotalAccHours: totalAccHours, + }) } } } @@ -241,8 +242,8 @@ func (r *JobRepository) JobsStatsGrouped( func (r *JobRepository) JobsStats( ctx context.Context, - filter []*model.JobFilter) ([]*model.JobsStatistics, error) { - + filter []*model.JobFilter, +) ([]*model.JobsStatistics, error) { start := time.Now() query := r.buildStatsQuery(filter, "") query, err := SecurityCheck(ctx, query) @@ -277,7 +278,8 @@ func (r *JobRepository) JobsStats( TotalWalltime: int(walltime.Int64), TotalNodeHours: totalNodeHours, TotalCoreHours: totalCoreHours, - TotalAccHours: totalAccHours}) + TotalAccHours: totalAccHours, + }) } log.Debugf("Timer JobStats %s", time.Since(start)) @@ -299,8 +301,8 @@ func LoadJobStat(job *schema.JobMeta, metric string) float64 { func (r *JobRepository) JobCountGrouped( ctx context.Context, filter []*model.JobFilter, - groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { - + groupBy *model.Aggregate, +) ([]*model.JobsStatistics, error) { start := time.Now() col := groupBy2column[*groupBy] query := r.buildCountQuery(filter, "", col) @@ -327,7 +329,8 @@ func (r *JobRepository) JobCountGrouped( stats = append(stats, &model.JobsStatistics{ ID: id.String, - TotalJobs: int(cnt.Int64)}) + TotalJobs: int(cnt.Int64), + }) } } @@ -340,8 +343,8 @@ func (r *JobRepository) AddJobCountGrouped( filter []*model.JobFilter, groupBy *model.Aggregate, stats []*model.JobsStatistics, - kind string) ([]*model.JobsStatistics, error) { - + kind string, +) ([]*model.JobsStatistics, error) { start := time.Now() col := groupBy2column[*groupBy] query := r.buildCountQuery(filter, kind, col) @@ -388,8 +391,8 @@ func (r *JobRepository) AddJobCount( ctx context.Context, filter []*model.JobFilter, stats []*model.JobsStatistics, - kind string) ([]*model.JobsStatistics, error) { - + kind string, +) ([]*model.JobsStatistics, error) { start := time.Now() query := r.buildCountQuery(filter, kind, "") query, err := SecurityCheck(ctx, query) @@ -432,7 +435,8 @@ func (r *JobRepository) AddJobCount( func (r *JobRepository) AddHistograms( ctx context.Context, filter []*model.JobFilter, - stat *model.JobsStatistics) (*model.JobsStatistics, error) { + stat *model.JobsStatistics, +) (*model.JobsStatistics, error) { start := time.Now() castType := r.getCastType() @@ -471,7 +475,8 @@ func (r *JobRepository) AddMetricHistograms( ctx context.Context, filter []*model.JobFilter, metrics []string, - stat *model.JobsStatistics) (*model.JobsStatistics, error) { + stat *model.JobsStatistics, +) (*model.JobsStatistics, error) { start := time.Now() // Running Jobs Only: First query jobdata from sqlite, then query data and make bins @@ -503,8 +508,8 @@ func (r *JobRepository) AddMetricHistograms( func (r *JobRepository) jobsStatisticsHistogram( ctx context.Context, value string, - filters []*model.JobFilter) ([]*model.HistoPoint, error) { - + filters []*model.JobFilter, +) ([]*model.HistoPoint, error) { start := time.Now() query, qerr := SecurityCheck(ctx, sq.Select(value, "COUNT(job.id) AS count").From("job")) @@ -540,26 +545,8 @@ func (r *JobRepository) jobsStatisticsHistogram( func (r *JobRepository) jobsMetricStatisticsHistogram( ctx context.Context, metric string, - filters []*model.JobFilter) (*model.MetricHistoPoints, error) { - - var dbMetric string - switch metric { - case "cpu_load": - dbMetric = "load_avg" - case "flops_any": - dbMetric = "flops_any_avg" - case "mem_bw": - dbMetric = "mem_bw_avg" - case "mem_used": - dbMetric = "mem_used_max" - case "net_bw": - dbMetric = "net_bw_avg" - case "file_bw": - dbMetric = "file_bw_avg" - default: - return nil, fmt.Errorf("%s not implemented", metric) - } - + filters []*model.JobFilter, +) (*model.MetricHistoPoints, error) { // Get specific Peak or largest Peak var metricConfig *schema.MetricConfig var peak float64 = 0.0 @@ -593,14 +580,15 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( // Make bins, see https://jereze.com/code/sql-histogram/ start := time.Now() + jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, metric) crossJoinQuery := sq.Select( - fmt.Sprintf(`max(%s) as max`, dbMetric), - fmt.Sprintf(`min(%s) as min`, dbMetric), + fmt.Sprintf(`max(%s) as max`, jm), + fmt.Sprintf(`min(%s) as min`, jm), ).From("job").Where( - fmt.Sprintf(`%s is not null`, dbMetric), + fmt.Sprintf(`%s is not null`, jm), ).Where( - fmt.Sprintf(`%s <= %f`, dbMetric, peak), + fmt.Sprintf(`%s <= %f`, jm, peak), ) crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) @@ -619,16 +607,18 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( } bins := 10 - binQuery := fmt.Sprintf(`CAST( (case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * %d as INTEGER )`, dbMetric, dbMetric, bins) + binQuery := fmt.Sprintf(`CAST( (case when %s = value.max + then value.max*0.999999999 else %s end - value.min) / (value.max - + value.min) * %d as INTEGER )`, metric, metric, bins) mainQuery := sq.Select( fmt.Sprintf(`%s + 1 as bin`, binQuery), - fmt.Sprintf(`count(job.%s) as count`, dbMetric), + fmt.Sprintf(`count(%s) as count`, metric), fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), ).From("job").CrossJoin( fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., - ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) + ).Where(fmt.Sprintf(`%s is not null and %s <= %f`, metric, metric, peak)) mainQuery, qerr := SecurityCheck(ctx, mainQuery) @@ -669,8 +659,8 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( func (r *JobRepository) runningJobsMetricStatisticsHistogram( ctx context.Context, metrics []string, - filters []*model.JobFilter) []*model.MetricHistoPoints { - + filters []*model.JobFilter, +) []*model.MetricHistoPoints { // Get Jobs jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) if err != nil { From e348ec74fde105e384b4e1817fb5f0afaaa97e04 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 12 Jul 2024 14:08:48 +0200 Subject: [PATCH 059/443] Fix bugs in stats.go --- internal/repository/stats.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 0775db5..33cafa0 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -286,6 +286,7 @@ func (r *JobRepository) JobsStats( return stats, nil } +// FIXME: Make generic func LoadJobStat(job *schema.JobMeta, metric string) float64 { if stats, ok := job.Statistics[metric]; ok { if metric == "mem_used" { @@ -609,16 +610,16 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( bins := 10 binQuery := fmt.Sprintf(`CAST( (case when %s = value.max then value.max*0.999999999 else %s end - value.min) / (value.max - - value.min) * %d as INTEGER )`, metric, metric, bins) + value.min) * %d as INTEGER )`, jm, jm, bins) mainQuery := sq.Select( fmt.Sprintf(`%s + 1 as bin`, binQuery), - fmt.Sprintf(`count(%s) as count`, metric), + fmt.Sprintf(`count(%s) as count`, jm), fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), ).From("job").CrossJoin( fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., - ).Where(fmt.Sprintf(`%s is not null and %s <= %f`, metric, metric, peak)) + ).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak)) mainQuery, qerr := SecurityCheck(ctx, mainQuery) @@ -643,7 +644,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( for rows.Next() { point := model.MetricHistoPoint{} if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { - log.Warnf("Error while scanning rows for %s", metric) + log.Warnf("Error while scanning rows for %s", jm) return nil, err // Totally bricks cc-backend if returned and if all metrics requested? } From 01a4d33514aad00d42b406f349d1f307330b4e2c Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 14 Jul 2024 11:18:38 +0200 Subject: [PATCH 060/443] Refactor: Archive workers and Tasks Work in progress --- cmd/cc-backend/main.go | 65 +----------- go.mod | 2 + go.sum | 4 + internal/metricdata/metricdata.go | 3 +- internal/repository/archiveWorker.go | 112 +++++++++++++++++++++ internal/repository/footprintWorker.go | 8 ++ internal/repository/job.go | 97 ------------------ internal/taskManager/retentionService.go | 70 +++++++++++++ internal/taskManager/stopJobsExceedTime.go | 27 +++++ internal/taskManager/taskManager.go | 29 ++++++ 10 files changed, 256 insertions(+), 161 deletions(-) create mode 100644 internal/repository/archiveWorker.go create mode 100644 internal/repository/footprintWorker.go create mode 100644 internal/taskManager/retentionService.go create mode 100644 internal/taskManager/stopJobsExceedTime.go create mode 100644 internal/taskManager/taskManager.go diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index b0faa13..abe453a 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -16,7 +16,6 @@ import ( "net/http" "os" "os/signal" - "runtime" "runtime/debug" "strings" "sync" @@ -34,13 +33,13 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" + "github.com/ClusterCockpit/cc-backend/internal/taskManager" "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/web" - "github.com/go-co-op/gocron" "github.com/google/gops/agent" "github.com/gorilla/handlers" "github.com/gorilla/mux" @@ -628,79 +627,21 @@ func main() { api.JobRepository.WaitForArchiving() }() - s := gocron.NewScheduler(time.Local) - if config.Keys.StopJobsExceedingWalltime > 0 { - log.Info("Register undead jobs service") - - s.Every(1).Day().At("3:00").Do(func() { - err = jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) - if err != nil { - log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error()) - } - runtime.GC() - }) + taskManager.RegisterStopJobsExceedTime() } var cfg struct { Retention schema.Retention `json:"retention"` Compression int `json:"compression"` } - cfg.Retention.IncludeDB = true if err = json.Unmarshal(config.Keys.Archive, &cfg); err != nil { log.Warn("Error while unmarshaling raw config json") } - switch cfg.Retention.Policy { - case "delete": - log.Info("Register retention delete service") - - s.Every(1).Day().At("4:00").Do(func() { - startTime := time.Now().Unix() - int64(cfg.Retention.Age*24*3600) - jobs, err := jobRepo.FindJobsBetween(0, startTime) - if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) - } - archive.GetHandle().CleanUp(jobs) - - if cfg.Retention.IncludeDB { - cnt, err := jobRepo.DeleteJobsBefore(startTime) - if err != nil { - log.Errorf("Error while deleting retention jobs from db: %s", err.Error()) - } else { - log.Infof("Retention: Removed %d jobs from db", cnt) - } - if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %s", err.Error()) - } - } - }) - case "move": - log.Info("Register retention move service") - - s.Every(1).Day().At("4:00").Do(func() { - startTime := time.Now().Unix() - int64(cfg.Retention.Age*24*3600) - jobs, err := jobRepo.FindJobsBetween(0, startTime) - if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) - } - archive.GetHandle().Move(jobs, cfg.Retention.Location) - - if cfg.Retention.IncludeDB { - cnt, err := jobRepo.DeleteJobsBefore(startTime) - if err != nil { - log.Errorf("Error while deleting retention jobs from db: %v", err) - } else { - log.Infof("Retention: Removed %d jobs from db", cnt) - } - if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %v", err) - } - } - }) - } + taskManager.RegisterRetentionService(cfg.Retention) if cfg.Compression > 0 { log.Info("Register compression service") diff --git a/go.mod b/go.mod index 52b0215..f21316e 100644 --- a/go.mod +++ b/go.mod @@ -42,6 +42,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect + github.com/go-co-op/gocron/v2 v2.9.0 // indirect github.com/go-jose/go-jose/v4 v4.0.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect @@ -54,6 +55,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index b250495..4ca5643 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl5 github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= +github.com/go-co-op/gocron/v2 v2.9.0 h1:+0nTyI3mjc2FGIClBdDWpaLPCNrJ+62o9xbS0ZklEKQ= +github.com/go-co-op/gocron/v2 v2.9.0/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w= github.com/go-jose/go-jose/v4 v4.0.3 h1:o8aphO8Hv6RPmH+GfzVuyf7YXSBibp+8YyHdOoDESGo= github.com/go-jose/go-jose/v4 v4.0.3/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ= @@ -133,6 +135,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 74d4347..c826113 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -162,7 +162,7 @@ func LoadData(job *schema.Job, ttl = 2 * time.Minute } - prepareJobData(job, jd, scopes) + prepareJobData(jd, scopes) return jd, ttl, size }) @@ -266,7 +266,6 @@ func cacheKey( // statisticsSeries should be available so that a min/median/max Graph can be // used instead of a lot of single lines. func prepareJobData( - job *schema.Job, jobData schema.JobData, scopes []schema.MetricScope, ) { diff --git a/internal/repository/archiveWorker.go b/internal/repository/archiveWorker.go new file mode 100644 index 0000000..42febb5 --- /dev/null +++ b/internal/repository/archiveWorker.go @@ -0,0 +1,112 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package repository + +import ( + "context" + "encoding/json" + "time" + + "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" + sq "github.com/Masterminds/squirrel" +) + +// Archiving worker thread +func (r *JobRepository) archivingWorker() { + for { + select { + case job, ok := <-r.archiveChannel: + if !ok { + break + } + start := time.Now() + // not using meta data, called to load JobMeta into Cache? + // will fail if job meta not in repository + if _, err := r.FetchMetadata(job); err != nil { + log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error()) + r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) + continue + } + + // metricdata.ArchiveJob will fetch all the data from a MetricDataRepository and push into configured archive backend + // TODO: Maybe use context with cancel/timeout here + jobMeta, err := metricdata.ArchiveJob(job, context.Background()) + if err != nil { + log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error()) + r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) + continue + } + + // Update the jobs database entry one last time: + if err := r.MarkArchived(jobMeta, schema.MonitoringStatusArchivingSuccessful); err != nil { + log.Errorf("archiving job (dbid: %d) failed at marking archived step: %s", job.ID, err.Error()) + continue + } + log.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) + log.Printf("archiving job (dbid: %d) successful", job.ID) + r.archivePending.Done() + } + } +} + +// Stop updates the job with the database id jobId using the provided arguments. +func (r *JobRepository) MarkArchived( + jobMeta *schema.JobMeta, + monitoringStatus int32, +) error { + stmt := sq.Update("job"). + Set("monitoring_status", monitoringStatus). + Where("job.id = ?", jobMeta.JobID) + + sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) + if err != nil { + log.Errorf("cannot get subcluster: %s", err.Error()) + return err + } + footprint := make(map[string]float64) + + for _, fp := range sc.Footprint { + footprint[fp] = LoadJobStat(jobMeta, fp) + } + + var rawFootprint []byte + + if rawFootprint, err = json.Marshal(footprint); err != nil { + log.Warnf("Error while marshaling footprint for job, DB ID '%v'", jobMeta.ID) + return err + } + + stmt = stmt.Set("footprint", rawFootprint) + + if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { + log.Warn("Error while marking job as archived") + return err + } + return nil +} + +func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32) (err error) { + stmt := sq.Update("job"). + Set("monitoring_status", monitoringStatus). + Where("job.id = ?", job) + + _, err = stmt.RunWith(r.stmtCache).Exec() + return +} + +// Trigger async archiving +func (r *JobRepository) TriggerArchiving(job *schema.Job) { + r.archivePending.Add(1) + r.archiveChannel <- job +} + +// Wait for background thread to finish pending archiving operations +func (r *JobRepository) WaitForArchiving() { + // close channel and wait for worker to process remaining jobs + r.archivePending.Wait() +} diff --git a/internal/repository/footprintWorker.go b/internal/repository/footprintWorker.go new file mode 100644 index 0000000..2aa0c2b --- /dev/null +++ b/internal/repository/footprintWorker.go @@ -0,0 +1,8 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package repository + +func (r *JobRepository) footprintWorker() { +} diff --git a/internal/repository/job.go b/internal/repository/job.go index 33b619f..ca8350f 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -5,7 +5,6 @@ package repository import ( - "context" "database/sql" "encoding/json" "errors" @@ -15,7 +14,6 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/graph/model" - "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" @@ -278,101 +276,6 @@ func (r *JobRepository) DeleteJobById(id int64) error { return err } -func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32) (err error) { - stmt := sq.Update("job"). - Set("monitoring_status", monitoringStatus). - Where("job.id = ?", job) - - _, err = stmt.RunWith(r.stmtCache).Exec() - return -} - -// Stop updates the job with the database id jobId using the provided arguments. -func (r *JobRepository) MarkArchived( - jobMeta *schema.JobMeta, - monitoringStatus int32, -) error { - stmt := sq.Update("job"). - Set("monitoring_status", monitoringStatus). - Where("job.id = ?", jobMeta.JobID) - - sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) - if err != nil { - log.Errorf("cannot get subcluster: %s", err.Error()) - return err - } - footprint := make(map[string]float64) - - for _, fp := range sc.Footprint { - footprint[fp] = LoadJobStat(jobMeta, fp) - } - - var rawFootprint []byte - - if rawFootprint, err = json.Marshal(footprint); err != nil { - log.Warnf("Error while marshaling footprint for job, DB ID '%v'", jobMeta.ID) - return err - } - - stmt = stmt.Set("footprint", rawFootprint) - - if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { - log.Warn("Error while marking job as archived") - return err - } - return nil -} - -// Archiving worker thread -func (r *JobRepository) archivingWorker() { - for { - select { - case job, ok := <-r.archiveChannel: - if !ok { - break - } - start := time.Now() - // not using meta data, called to load JobMeta into Cache? - // will fail if job meta not in repository - if _, err := r.FetchMetadata(job); err != nil { - log.Errorf("archiving job (dbid: %d) failed at check metadata step: %s", job.ID, err.Error()) - r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) - continue - } - - // metricdata.ArchiveJob will fetch all the data from a MetricDataRepository and push into configured archive backend - // TODO: Maybe use context with cancel/timeout here - jobMeta, err := metricdata.ArchiveJob(job, context.Background()) - if err != nil { - log.Errorf("archiving job (dbid: %d) failed at archiving job step: %s", job.ID, err.Error()) - r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) - continue - } - - // Update the jobs database entry one last time: - if err := r.MarkArchived(jobMeta, schema.MonitoringStatusArchivingSuccessful); err != nil { - log.Errorf("archiving job (dbid: %d) failed at marking archived step: %s", job.ID, err.Error()) - continue - } - log.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) - log.Printf("archiving job (dbid: %d) successful", job.ID) - r.archivePending.Done() - } - } -} - -// Trigger async archiving -func (r *JobRepository) TriggerArchiving(job *schema.Job) { - r.archivePending.Add(1) - r.archiveChannel <- job -} - -// Wait for background thread to finish pending archiving operations -func (r *JobRepository) WaitForArchiving() { - // close channel and wait for worker to process remaining jobs - r.archivePending.Wait() -} - func (r *JobRepository) FindUserOrProjectOrJobname(user *schema.User, searchterm string) (jobid string, username string, project string, jobname string) { if _, err := strconv.Atoi(searchterm); err == nil { // Return empty on successful conversion: parent method will redirect for integer jobId return searchterm, "", "", "" diff --git a/internal/taskManager/retentionService.go b/internal/taskManager/retentionService.go new file mode 100644 index 0000000..ef29b6a --- /dev/null +++ b/internal/taskManager/retentionService.go @@ -0,0 +1,70 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "time" + + "github.com/ClusterCockpit/cc-backend/pkg/archive" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/go-co-op/gocron/v2" +) + +func RegisterRetentionService(cfg schema.Retention) { + switch cfg.Policy { + case "delete": + + log.Info("Register retention delete service") + + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), + gocron.NewTask( + func() { + startTime := time.Now().Unix() - int64(cfg.Age*24*3600) + jobs, err := jobRepo.FindJobsBetween(0, startTime) + if err != nil { + log.Warnf("Error while looking for retention jobs: %s", err.Error()) + } + archive.GetHandle().CleanUp(jobs) + + if cfg.IncludeDB { + cnt, err := jobRepo.DeleteJobsBefore(startTime) + if err != nil { + log.Errorf("Error while deleting retention jobs from db: %s", err.Error()) + } else { + log.Infof("Retention: Removed %d jobs from db", cnt) + } + if err = jobRepo.Optimize(); err != nil { + log.Errorf("Error occured in db optimization: %s", err.Error()) + } + } + })) + case "move": + log.Info("Register retention move service") + + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), + gocron.NewTask( + func() { + startTime := time.Now().Unix() - int64(cfg.Age*24*3600) + jobs, err := jobRepo.FindJobsBetween(0, startTime) + if err != nil { + log.Warnf("Error while looking for retention jobs: %s", err.Error()) + } + archive.GetHandle().Move(jobs, cfg.Location) + + if cfg.IncludeDB { + cnt, err := jobRepo.DeleteJobsBefore(startTime) + if err != nil { + log.Errorf("Error while deleting retention jobs from db: %v", err) + } else { + log.Infof("Retention: Removed %d jobs from db", cnt) + } + if err = jobRepo.Optimize(); err != nil { + log.Errorf("Error occured in db optimization: %v", err) + } + } + })) + } +} diff --git a/internal/taskManager/stopJobsExceedTime.go b/internal/taskManager/stopJobsExceedTime.go new file mode 100644 index 0000000..d97813a --- /dev/null +++ b/internal/taskManager/stopJobsExceedTime.go @@ -0,0 +1,27 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "runtime" + + "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/go-co-op/gocron/v2" +) + +func RegisterStopJobsExceedTime() { + log.Info("Register undead jobs service") + + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(03, 0, 0))), + gocron.NewTask( + func() { + err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime) + if err != nil { + log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error()) + } + runtime.GC() + })) +} diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go new file mode 100644 index 0000000..5ddc179 --- /dev/null +++ b/internal/taskManager/taskManager.go @@ -0,0 +1,29 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "github.com/ClusterCockpit/cc-backend/internal/repository" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/go-co-op/gocron/v2" +) + +var ( + s gocron.Scheduler + jobRepo *repository.JobRepository +) + +func init() { + var err error + jobRepo = repository.GetJobRepository() + s, err = gocron.NewScheduler() + if err != nil { + log.Fatalf("Error while creating gocron scheduler: %s", err.Error()) + } +} + +func Shutdown() { + s.Shutdown() +} From 801607fc16340029610c41a22d1d63664255f818 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 16 Jul 2024 12:08:10 +0200 Subject: [PATCH 061/443] Refactor main Convert components to Singletons Restructure main package Reduce dependencies --- Makefile | 2 +- cmd/cc-backend/cli.go | 33 ++ cmd/cc-backend/init.go | 85 ++++ cmd/cc-backend/main.go | 491 ++------------------- cmd/cc-backend/server.go | 335 ++++++++++++++ internal/api/api_test.go | 19 +- internal/api/rest.go | 14 +- internal/auth/auth.go | 122 ++--- internal/auth/ldap.go | 28 -- internal/graph/resolver.go | 24 + internal/taskManager/compressionService.go | 41 ++ internal/taskManager/ldapSyncService.go | 36 ++ internal/taskManager/retentionService.go | 97 ++-- internal/taskManager/taskManager.go | 60 ++- 14 files changed, 774 insertions(+), 613 deletions(-) create mode 100644 cmd/cc-backend/cli.go create mode 100644 cmd/cc-backend/init.go create mode 100644 cmd/cc-backend/server.go create mode 100644 internal/taskManager/compressionService.go create mode 100644 internal/taskManager/ldapSyncService.go diff --git a/Makefile b/Makefile index f54c6ea..508c9fb 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ TARGET = ./cc-backend VAR = ./var CFG = config.json .env FRONTEND = ./web/frontend -VERSION = 1.3.1 +VERSION = 1.4.0 GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' diff --git a/cmd/cc-backend/cli.go b/cmd/cc-backend/cli.go new file mode 100644 index 0000000..f828a24 --- /dev/null +++ b/cmd/cc-backend/cli.go @@ -0,0 +1,33 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package main + +import "flag" + +var ( + flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagRevertDB, flagForceDB, flagDev, flagVersion, flagLogDateTime bool + flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string +) + +func cliInit() { + flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env") + flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") + flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap") + flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling") + flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)") + flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI") + flag.BoolVar(&flagVersion, "version", false, "Show version information and exit") + flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit") + flag.BoolVar(&flagRevertDB, "revert-db", false, "Migrate database to previous version and exit") + flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit") + flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") + flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") + flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin,support,manager,api,user]:`") + flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`") + flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`") + flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `:,...`") + flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") + flag.Parse() +} diff --git a/cmd/cc-backend/init.go b/cmd/cc-backend/init.go new file mode 100644 index 0000000..5a00a11 --- /dev/null +++ b/cmd/cc-backend/init.go @@ -0,0 +1,85 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package main + +import ( + "fmt" + "os" + + "github.com/ClusterCockpit/cc-backend/internal/repository" + "github.com/ClusterCockpit/cc-backend/internal/util" + "github.com/ClusterCockpit/cc-backend/pkg/log" +) + +const envString = ` +# Base64 encoded Ed25519 keys (DO NOT USE THESE TWO IN PRODUCTION!) +# You can generate your own keypair using the gen-keypair tool +JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0=" +JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q==" + +# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION) +SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629" +` + +const configString = ` +{ + "addr": "127.0.0.1:8080", + "archive": { + "kind": "file", + "path": "./var/job-archive" + }, + "jwts": { + "max-age": "2000h" + }, + "clusters": [ + { + "name": "name", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2023-01-01T00:00:00Z", + "to": null + } + } + } + ] +} +` + +func initEnv() { + if util.CheckFileExists("var") { + fmt.Print("Directory ./var already exists. Exiting!\n") + os.Exit(0) + } + + if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil { + log.Fatalf("Writing config.json failed: %s", err.Error()) + } + + if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil { + log.Fatalf("Writing .env failed: %s", err.Error()) + } + + if err := os.Mkdir("var", 0o777); err != nil { + log.Fatalf("Mkdir var failed: %s", err.Error()) + } + + err := repository.MigrateDB("sqlite3", "./var/job.db") + if err != nil { + log.Fatalf("Initialize job.db failed: %s", err.Error()) + } +} diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index abe453a..e1546c7 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -5,157 +5,47 @@ package main import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "flag" "fmt" - "io" - "net" - "net/http" "os" "os/signal" "runtime/debug" "strings" "sync" "syscall" - "time" - "github.com/99designs/gqlgen/graphql/handler" - "github.com/99designs/gqlgen/graphql/playground" - "github.com/ClusterCockpit/cc-backend/internal/api" "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/graph" - "github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/importer" "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/internal/repository" - "github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/internal/taskManager" - "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/ClusterCockpit/cc-backend/web" "github.com/google/gops/agent" - "github.com/gorilla/handlers" - "github.com/gorilla/mux" - httpSwagger "github.com/swaggo/http-swagger" _ "github.com/go-sql-driver/mysql" _ "github.com/mattn/go-sqlite3" ) const logoString = ` - ____ _ _ ____ _ _ _ -/ ___| |_ _ ___| |_ ___ _ __ / ___|___ ___| | ___ __ (_) |_ + _____ _ _ ____ _ _ _ +/ ___| |_ _ ___| |_ ___ _ __ / ___|___ ___| | ___ __ (_) |_ | | | | | | / __| __/ _ \ '__| | / _ \ / __| |/ / '_ \| | __| | |___| | |_| \__ \ || __/ | | |__| (_) | (__| <| |_) | | |_ -\____|_|\__,_|___/\__\___|_| \____\___/ \___|_|\_\ .__/|_|\__| +\_____|_|\__,_|___/\__\___|_| \____\___/ \___|_|\_\ .__/|_|\__| |_| ` -const envString = ` -# Base64 encoded Ed25519 keys (DO NOT USE THESE TWO IN PRODUCTION!) -# You can generate your own keypair using the gen-keypair tool -JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0=" -JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q==" - -# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION) -SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629" -` - -const configString = ` -{ - "addr": "127.0.0.1:8080", - "archive": { - "kind": "file", - "path": "./var/job-archive" - }, - "jwts": { - "max-age": "2000h" - }, - "clusters": [ - { - "name": "name", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "http://localhost:8082", - "token": "" - }, - "filterRanges": { - "numNodes": { - "from": 1, - "to": 64 - }, - "duration": { - "from": 0, - "to": 86400 - }, - "startTime": { - "from": "2023-01-01T00:00:00Z", - "to": null - } - } - } - ] -} -` - var ( date string commit string version string ) -func initEnv() { - if util.CheckFileExists("var") { - fmt.Print("Directory ./var already exists. Exiting!\n") - os.Exit(0) - } - - if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil { - log.Fatalf("Writing config.json failed: %s", err.Error()) - } - - if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil { - log.Fatalf("Writing .env failed: %s", err.Error()) - } - - if err := os.Mkdir("var", 0o777); err != nil { - log.Fatalf("Mkdir var failed: %s", err.Error()) - } - - err := repository.MigrateDB("sqlite3", "./var/job.db") - if err != nil { - log.Fatalf("Initialize job.db failed: %s", err.Error()) - } -} - func main() { - var flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagRevertDB, flagForceDB, flagDev, flagVersion, flagLogDateTime bool - var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string - flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env") - flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") - flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap") - flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling") - flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)") - flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI") - flag.BoolVar(&flagVersion, "version", false, "Show version information and exit") - flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit") - flag.BoolVar(&flagRevertDB, "revert-db", false, "Migrate database to previous version and exit") - flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit") - flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") - flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") - flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin,support,manager,api,user]:`") - flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`") - flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`") - flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `:,...`") - flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") - flag.Parse() + cliInit() if flagVersion { fmt.Print(logoString) @@ -170,14 +60,6 @@ func main() { // Apply config flags for pkg/log log.Init(flagLogLevel, flagLogDateTime) - if flagInit { - initEnv() - fmt.Print("Succesfully setup environment!\n") - fmt.Print("Please review config.json and .env and adjust it to your needs.\n") - fmt.Print("Add your job-archive at ./var/job-archive.\n") - os.Exit(0) - } - // See https://github.com/google/gops (Runtime overhead is almost zero) if flagGops { if err := agent.Listen(agent.Options{}); err != nil { @@ -201,6 +83,8 @@ func main() { config.Keys.DB = os.Getenv(envvar) } + repository.Connect(config.Keys.DBDriver, config.Keys.DB) + if flagMigrateDB { err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB) if err != nil { @@ -225,19 +109,17 @@ func main() { os.Exit(0) } - repository.Connect(config.Keys.DBDriver, config.Keys.DB) - db := repository.GetConnection() + if flagInit { + initEnv() + fmt.Print("Succesfully setup environment!\n") + fmt.Print("Please review config.json and .env and adjust it to your needs.\n") + fmt.Print("Add your job-archive at ./var/job-archive.\n") + os.Exit(0) + } - var authentication *auth.Authentication if !config.Keys.DisableAuthentication { - var err error - if authentication, err = auth.Init(); err != nil { - log.Fatalf("auth initialization failed: %v", err) - } - if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil { - authentication.SessionMaxAge = d - } + auth.Init() if flagNewUser != "" { parts := strings.SplitN(flagNewUser, ":", 3) @@ -259,12 +141,14 @@ func main() { } } + authHandle := auth.GetAuthInstance() + if flagSyncLDAP { - if authentication.LdapAuth == nil { + if authHandle.LdapAuth == nil { log.Fatal("cannot sync: LDAP authentication is not configured") } - if err := authentication.LdapAuth.Sync(); err != nil { + if err := authHandle.LdapAuth.Sync(); err != nil { log.Fatalf("LDAP sync failed: %v", err) } log.Info("LDAP sync successfull") @@ -281,7 +165,7 @@ func main() { log.Warnf("user '%s' does not have the API role", user.Username) } - jwt, err := authentication.JwtAuth.ProvideJWT(user) + jwt, err := authHandle.JwtAuth.ProvideJWT(user) if err != nil { log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err) } @@ -317,299 +201,22 @@ func main() { return } - // Setup the http.Handler/Router used by the server - jobRepo := repository.GetJobRepository() - resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo} - graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) - if os.Getenv("DEBUG") != "1" { - // Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed. - // The problem with this is that then, no more stacktrace is printed to stderr. - graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error { - switch e := err.(type) { - case string: - return fmt.Errorf("MAIN > Panic: %s", e) - case error: - return fmt.Errorf("MAIN > Panic caused by: %w", e) - } - - return errors.New("MAIN > Internal server error (panic)") - }) - } - - api := &api.RestApi{ - JobRepository: jobRepo, - Resolver: resolver, - MachineStateDir: config.Keys.MachineStateDir, - Authentication: authentication, - } - - r := mux.NewRouter() - buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date} - - info := map[string]interface{}{} - info["hasOpenIDConnect"] = false - - if config.Keys.OpenIDConfig != nil { - openIDConnect := auth.NewOIDC(authentication) - openIDConnect.RegisterEndpoints(r) - info["hasOpenIDConnect"] = true - } - - r.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - log.Debugf("##%v##", info) - web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info}) - }).Methods(http.MethodGet) - r.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - web.RenderTemplate(rw, "imprint.tmpl", &web.Page{Title: "Imprint", Build: buildInfo}) - }) - r.HandleFunc("/privacy", func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - web.RenderTemplate(rw, "privacy.tmpl", &web.Page{Title: "Privacy", Build: buildInfo}) - }) - - secured := r.PathPrefix("/").Subrouter() - securedapi := r.PathPrefix("/api").Subrouter() - userapi := r.PathPrefix("/userapi").Subrouter() - configapi := r.PathPrefix("/config").Subrouter() - frontendapi := r.PathPrefix("/frontend").Subrouter() - - if !config.Keys.DisableAuthentication { - r.Handle("/login", authentication.Login( - // On success: - http.RedirectHandler("/", http.StatusTemporaryRedirect), - - // On failure: - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Login failed - ClusterCockpit", - MsgType: "alert-warning", - Message: err.Error(), - Build: buildInfo, - Infos: info, - }) - })).Methods(http.MethodPost) - - r.Handle("/jwt-login", authentication.Login( - // On success: - http.RedirectHandler("/", http.StatusTemporaryRedirect), - - // On failure: - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Login failed - ClusterCockpit", - MsgType: "alert-warning", - Message: err.Error(), - Build: buildInfo, - Infos: info, - }) - })) - - r.Handle("/logout", authentication.Logout( - http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Bye - ClusterCockpit", - MsgType: "alert-info", - Message: "Logout successful", - Build: buildInfo, - Infos: info, - }) - }))).Methods(http.MethodPost) - - secured.Use(func(next http.Handler) http.Handler { - return authentication.Auth( - // On success; - next, - - // On failure: - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.WriteHeader(http.StatusUnauthorized) - web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, - }) - }) - }) - - securedapi.Use(func(next http.Handler) http.Handler { - return authentication.AuthApi( - // On success; - next, - - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) - }) - - userapi.Use(func(next http.Handler) http.Handler { - return authentication.AuthUserApi( - // On success; - next, - - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) - }) - - configapi.Use(func(next http.Handler) http.Handler { - return authentication.AuthConfigApi( - // On success; - next, - - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) - }) - - frontendapi.Use(func(next http.Handler) http.Handler { - return authentication.AuthFrontendApi( - // On success; - next, - - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) - }) - } - - if flagDev { - r.Handle("/playground", playground.Handler("GraphQL playground", "/query")) - r.PathPrefix("/swagger/").Handler(httpSwagger.Handler( - httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet) - } - secured.Handle("/query", graphQLEndpoint) - - // Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project. - secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) { - routerConfig.HandleSearchBar(rw, r, buildInfo) - }) - - // Mount all /monitoring/... and /api/... routes. - routerConfig.SetupRoutes(secured, buildInfo) - api.MountApiRoutes(securedapi) - api.MountUserApiRoutes(userapi) - api.MountConfigApiRoutes(configapi) - api.MountFrontendApiRoutes(frontendapi) - - if config.Keys.EmbedStaticFiles { - if i, err := os.Stat("./var/img"); err == nil { - if i.IsDir() { - log.Info("Use local directory for static images") - r.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img")))) - } - } - r.PathPrefix("/").Handler(web.ServeFiles()) - } else { - r.PathPrefix("/").Handler(http.FileServer(http.Dir(config.Keys.StaticFiles))) - } - - r.Use(handlers.CompressHandler) - r.Use(handlers.RecoveryHandler(handlers.PrintRecoveryStack(true))) - r.Use(handlers.CORS( - handlers.AllowCredentials(), - handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization", "Origin"}), - handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}), - handlers.AllowedOrigins([]string{"*"}))) - handler := handlers.CustomLoggingHandler(io.Discard, r, func(_ io.Writer, params handlers.LogFormatterParams) { - if strings.HasPrefix(params.Request.RequestURI, "/api/") { - log.Debugf("%s %s (%d, %.02fkb, %dms)", - params.Request.Method, params.URL.RequestURI(), - params.StatusCode, float32(params.Size)/1024, - time.Since(params.TimeStamp).Milliseconds()) - } else { - log.Debugf("%s %s (%d, %.02fkb, %dms)", - params.Request.Method, params.URL.RequestURI(), - params.StatusCode, float32(params.Size)/1024, - time.Since(params.TimeStamp).Milliseconds()) - } - }) - - var wg sync.WaitGroup - server := http.Server{ - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - Handler: handler, - Addr: config.Keys.Addr, - } - - // Start http or https server - listener, err := net.Listen("tcp", config.Keys.Addr) - if err != nil { - log.Fatalf("starting http listener failed: %v", err) - } - - if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { - go func() { - http.ListenAndServe(":80", http.RedirectHandler(config.Keys.RedirectHttpTo, http.StatusMovedPermanently)) - }() - } - - if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" { - cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) - if err != nil { - log.Fatalf("loading X509 keypair failed: %v", err) - } - listener = tls.NewListener(listener, &tls.Config{ - Certificates: []tls.Certificate{cert}, - CipherSuites: []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - }, - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - }) - fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr) - } else { - fmt.Printf("HTTP server listening at %s...", config.Keys.Addr) - } + taskManager.Start() + serverInit() // Because this program will want to bind to a privileged port (like 80), the listener must // be established first, then the user can be changed, and after that, // the actual http server can be started. - if err = runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { + if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { log.Fatalf("error while preparing server start: %s", err.Error()) } + var wg sync.WaitGroup + wg.Add(1) go func() { defer wg.Done() - if err = server.Serve(listener); err != nil && err != http.ErrServerClosed { - log.Fatalf("starting server failed: %v", err) - } + serverStart() }() wg.Add(1) @@ -620,55 +227,9 @@ func main() { <-sigs runtimeEnv.SystemdNotifiy(false, "Shutting down ...") - // First shut down the server gracefully (waiting for all ongoing requests) - server.Shutdown(context.Background()) - - // Then, wait for any async archivings still pending... - api.JobRepository.WaitForArchiving() + taskManager.Shutdown() }() - if config.Keys.StopJobsExceedingWalltime > 0 { - taskManager.RegisterStopJobsExceedTime() - } - - var cfg struct { - Retention schema.Retention `json:"retention"` - Compression int `json:"compression"` - } - cfg.Retention.IncludeDB = true - - if err = json.Unmarshal(config.Keys.Archive, &cfg); err != nil { - log.Warn("Error while unmarshaling raw config json") - } - - taskManager.RegisterRetentionService(cfg.Retention) - - if cfg.Compression > 0 { - log.Info("Register compression service") - - s.Every(1).Day().At("5:00").Do(func() { - var jobs []*schema.Job - - ar := archive.GetHandle() - startTime := time.Now().Unix() - int64(cfg.Compression*24*3600) - lastTime := ar.CompressLast(startTime) - if startTime == lastTime { - log.Info("Compression Service - Complete archive run") - jobs, err = jobRepo.FindJobsBetween(0, startTime) - - } else { - jobs, err = jobRepo.FindJobsBetween(lastTime, startTime) - } - - if err != nil { - log.Warnf("Error while looking for compression jobs: %v", err) - } - ar.Compress(jobs) - }) - } - - s.StartAsync() - if os.Getenv("GOGC") == "" { debug.SetGCPercent(25) } diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go new file mode 100644 index 0000000..5531415 --- /dev/null +++ b/cmd/cc-backend/server.go @@ -0,0 +1,335 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package main + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/99designs/gqlgen/graphql/handler" + "github.com/99designs/gqlgen/graphql/playground" + "github.com/ClusterCockpit/cc-backend/internal/api" + "github.com/ClusterCockpit/cc-backend/internal/auth" + "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/graph" + "github.com/ClusterCockpit/cc-backend/internal/graph/generated" + "github.com/ClusterCockpit/cc-backend/internal/routerConfig" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/web" + "github.com/gorilla/handlers" + "github.com/gorilla/mux" + httpSwagger "github.com/swaggo/http-swagger" +) + +var ( + router *mux.Router + server *http.Server + apiHandle *api.RestApi +) + +func serverInit() { + // Setup the http.Handler/Router used by the server + graph.Init() + resolver := graph.GetResolverInstance() + graphQLEndpoint := handler.NewDefaultServer( + generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) + + if os.Getenv("DEBUG") != "1" { + // Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed. + // The problem with this is that then, no more stacktrace is printed to stderr. + graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error { + switch e := err.(type) { + case string: + return fmt.Errorf("MAIN > Panic: %s", e) + case error: + return fmt.Errorf("MAIN > Panic caused by: %w", e) + } + + return errors.New("MAIN > Internal server error (panic)") + }) + } + + authHandle := auth.GetAuthInstance() + + apiHandle = api.New() + + router = mux.NewRouter() + buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date} + + info := map[string]interface{}{} + info["hasOpenIDConnect"] = false + + if config.Keys.OpenIDConfig != nil { + openIDConnect := auth.NewOIDC(authHandle) + openIDConnect.RegisterEndpoints(router) + info["hasOpenIDConnect"] = true + } + + router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + log.Debugf("##%v##", info) + web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info}) + }).Methods(http.MethodGet) + router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + web.RenderTemplate(rw, "imprint.tmpl", &web.Page{Title: "Imprint", Build: buildInfo}) + }) + router.HandleFunc("/privacy", func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + web.RenderTemplate(rw, "privacy.tmpl", &web.Page{Title: "Privacy", Build: buildInfo}) + }) + + secured := router.PathPrefix("/").Subrouter() + securedapi := router.PathPrefix("/api").Subrouter() + userapi := router.PathPrefix("/userapi").Subrouter() + configapi := router.PathPrefix("/config").Subrouter() + frontendapi := router.PathPrefix("/frontend").Subrouter() + + if !config.Keys.DisableAuthentication { + router.Handle("/login", authHandle.Login( + // On success: + http.RedirectHandler("/", http.StatusTemporaryRedirect), + + // On failure: + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusUnauthorized) + web.RenderTemplate(rw, "login.tmpl", &web.Page{ + Title: "Login failed - ClusterCockpit", + MsgType: "alert-warning", + Message: err.Error(), + Build: buildInfo, + Infos: info, + }) + })).Methods(http.MethodPost) + + router.Handle("/jwt-login", authHandle.Login( + // On success: + http.RedirectHandler("/", http.StatusTemporaryRedirect), + + // On failure: + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusUnauthorized) + web.RenderTemplate(rw, "login.tmpl", &web.Page{ + Title: "Login failed - ClusterCockpit", + MsgType: "alert-warning", + Message: err.Error(), + Build: buildInfo, + Infos: info, + }) + })) + + router.Handle("/logout", authHandle.Logout( + http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Add("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + web.RenderTemplate(rw, "login.tmpl", &web.Page{ + Title: "Bye - ClusterCockpit", + MsgType: "alert-info", + Message: "Logout successful", + Build: buildInfo, + Infos: info, + }) + }))).Methods(http.MethodPost) + + secured.Use(func(next http.Handler) http.Handler { + return authHandle.Auth( + // On success; + next, + + // On failure: + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.WriteHeader(http.StatusUnauthorized) + web.RenderTemplate(rw, "login.tmpl", &web.Page{ + Title: "Authentication failed - ClusterCockpit", + MsgType: "alert-danger", + Message: err.Error(), + Build: buildInfo, + Infos: info, + }) + }) + }) + + securedapi.Use(func(next http.Handler) http.Handler { + return authHandle.AuthApi( + // On success; + next, + + // On failure: JSON Response + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), + }) + }) + }) + + userapi.Use(func(next http.Handler) http.Handler { + return authHandle.AuthUserApi( + // On success; + next, + + // On failure: JSON Response + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), + }) + }) + }) + + configapi.Use(func(next http.Handler) http.Handler { + return authHandle.AuthConfigApi( + // On success; + next, + + // On failure: JSON Response + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), + }) + }) + }) + + frontendapi.Use(func(next http.Handler) http.Handler { + return authHandle.AuthFrontendApi( + // On success; + next, + + // On failure: JSON Response + func(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), + }) + }) + }) + } + + if flagDev { + router.Handle("/playground", playground.Handler("GraphQL playground", "/query")) + router.PathPrefix("/swagger/").Handler(httpSwagger.Handler( + httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet) + } + secured.Handle("/query", graphQLEndpoint) + + // Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project. + secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) { + routerConfig.HandleSearchBar(rw, r, buildInfo) + }) + + // Mount all /monitoring/... and /api/... routes. + routerConfig.SetupRoutes(secured, buildInfo) + apiHandle.MountApiRoutes(securedapi) + apiHandle.MountUserApiRoutes(userapi) + apiHandle.MountConfigApiRoutes(configapi) + apiHandle.MountFrontendApiRoutes(frontendapi) + + if config.Keys.EmbedStaticFiles { + if i, err := os.Stat("./var/img"); err == nil { + if i.IsDir() { + log.Info("Use local directory for static images") + router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img")))) + } + } + router.PathPrefix("/").Handler(web.ServeFiles()) + } else { + router.PathPrefix("/").Handler(http.FileServer(http.Dir(config.Keys.StaticFiles))) + } + + router.Use(handlers.CompressHandler) + router.Use(handlers.RecoveryHandler(handlers.PrintRecoveryStack(true))) + router.Use(handlers.CORS( + handlers.AllowCredentials(), + handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization", "Origin"}), + handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}), + handlers.AllowedOrigins([]string{"*"}))) +} + +func serverStart() { + handler := handlers.CustomLoggingHandler(io.Discard, router, func(_ io.Writer, params handlers.LogFormatterParams) { + if strings.HasPrefix(params.Request.RequestURI, "/api/") { + log.Debugf("%s %s (%d, %.02fkb, %dms)", + params.Request.Method, params.URL.RequestURI(), + params.StatusCode, float32(params.Size)/1024, + time.Since(params.TimeStamp).Milliseconds()) + } else { + log.Debugf("%s %s (%d, %.02fkb, %dms)", + params.Request.Method, params.URL.RequestURI(), + params.StatusCode, float32(params.Size)/1024, + time.Since(params.TimeStamp).Milliseconds()) + } + }) + + server = &http.Server{ + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + Handler: handler, + Addr: config.Keys.Addr, + } + + // Start http or https server + listener, err := net.Listen("tcp", config.Keys.Addr) + if err != nil { + log.Fatalf("starting http listener failed: %v", err) + } + + if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { + go func() { + http.ListenAndServe(":80", http.RedirectHandler(config.Keys.RedirectHttpTo, http.StatusMovedPermanently)) + }() + } + + if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" { + cert, err := tls.LoadX509KeyPair( + config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) + if err != nil { + log.Fatalf("loading X509 keypair failed: %v", err) + } + listener = tls.NewListener(listener, &tls.Config{ + Certificates: []tls.Certificate{cert}, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + }, + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + }) + fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr) + } else { + fmt.Printf("HTTP server listening at %s...", config.Keys.Addr) + } + + if err = server.Serve(listener); err != nil && err != http.ErrServerClosed { + log.Fatalf("starting server failed: %v", err) + } +} + +func serverShutdown() { + // First shut down the server gracefully (waiting for all ongoing requests) + server.Shutdown(context.Background()) + + // Then, wait for any async archivings still pending... + apiHandle.JobRepository.WaitForArchiving() +} diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 0354a0f..80a7e64 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/ClusterCockpit/cc-backend/internal/api" + "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/metricdata" @@ -144,7 +145,6 @@ func setup(t *testing.T) *api.RestApi { archiveCfg := fmt.Sprintf("{\"kind\": \"file\",\"path\": \"%s\"}", jobarchive) repository.Connect("sqlite3", dbfilepath) - db := repository.GetConnection() if err := archive.Init(json.RawMessage(archiveCfg), config.Keys.DisableArchive); err != nil { t.Fatal(err) @@ -154,13 +154,10 @@ func setup(t *testing.T) *api.RestApi { t.Fatal(err) } - jobRepo := repository.GetJobRepository() - resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo} + auth.Init() + graph.Init() - return &api.RestApi{ - JobRepository: resolver.Repo, - Resolver: resolver, - } + return api.New() } func cleanup() { @@ -253,12 +250,13 @@ func TestRestApi(t *testing.T) { t.Fatal(err) } - job, err := restapi.Resolver.Query().Job(ctx, strconv.Itoa(int(res.DBID))) + resolver := graph.GetResolverInstance() + job, err := resolver.Query().Job(ctx, strconv.Itoa(int(res.DBID))) if err != nil { t.Fatal(err) } - job.Tags, err = restapi.Resolver.Job().Tags(ctx, job) + job.Tags, err = resolver.Job().Tags(ctx, job) if err != nil { t.Fatal(err) } @@ -314,7 +312,8 @@ func TestRestApi(t *testing.T) { } restapi.JobRepository.WaitForArchiving() - job, err := restapi.Resolver.Query().Job(ctx, strconv.Itoa(int(dbid))) + resolver := graph.GetResolverInstance() + job, err := resolver.Query().Job(ctx, strconv.Itoa(int(dbid))) if err != nil { t.Fatal(err) } diff --git a/internal/api/rest.go b/internal/api/rest.go index b447a21..01eb429 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -53,12 +53,19 @@ import ( type RestApi struct { JobRepository *repository.JobRepository - Resolver *graph.Resolver Authentication *auth.Authentication MachineStateDir string RepositoryMutex sync.Mutex } +func New() *RestApi { + return &RestApi{ + JobRepository: repository.GetJobRepository(), + MachineStateDir: config.Keys.MachineStateDir, + Authentication: auth.GetAuthInstance(), + } +} + func (api *RestApi) MountApiRoutes(r *mux.Router) { r.StrictSlash(true) @@ -893,7 +900,6 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { } job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) - if err != nil { handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) return @@ -977,7 +983,6 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) } job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) - if err != nil { handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) return @@ -1105,7 +1110,8 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { } `json:"error"` } - data, err := api.Resolver.Query().JobMetrics(r.Context(), id, metrics, scopes) + resolver := graph.GetResolverInstance() + data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes) if err != nil { json.NewEncoder(rw).Encode(Respone{ Error: &struct { diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 50f4121..e45fa9d 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -12,6 +12,7 @@ import ( "errors" "net/http" "os" + "sync" "time" "github.com/ClusterCockpit/cc-backend/internal/config" @@ -26,6 +27,11 @@ type Authenticator interface { Login(user *schema.User, rw http.ResponseWriter, r *http.Request) (*schema.User, error) } +var ( + initOnce sync.Once + authInstance *Authentication +) + type Authentication struct { sessionStore *sessions.CookieStore LdapAuth *LdapAuthenticator @@ -62,71 +68,79 @@ func (auth *Authentication) AuthViaSession( }, nil } -func Init() (*Authentication, error) { - auth := &Authentication{} +func Init() { + initOnce.Do(func() { + authInstance = &Authentication{} - sessKey := os.Getenv("SESSION_KEY") - if sessKey == "" { - log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") - bytes := make([]byte, 32) - if _, err := rand.Read(bytes); err != nil { - log.Error("Error while initializing authentication -> failed to generate random bytes for session key") - return nil, err - } - auth.sessionStore = sessions.NewCookieStore(bytes) - } else { - bytes, err := base64.StdEncoding.DecodeString(sessKey) - if err != nil { - log.Error("Error while initializing authentication -> decoding session key failed") - return nil, err - } - auth.sessionStore = sessions.NewCookieStore(bytes) - } - - if config.Keys.LdapConfig != nil { - ldapAuth := &LdapAuthenticator{} - if err := ldapAuth.Init(); err != nil { - log.Warn("Error while initializing authentication -> ldapAuth init failed") + sessKey := os.Getenv("SESSION_KEY") + if sessKey == "" { + log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + log.Fatal("Error while initializing authentication -> failed to generate random bytes for session key") + } + authInstance.sessionStore = sessions.NewCookieStore(bytes) } else { - auth.LdapAuth = ldapAuth - auth.authenticators = append(auth.authenticators, auth.LdapAuth) - } - } else { - log.Info("Missing LDAP configuration: No LDAP support!") - } - - if config.Keys.JwtConfig != nil { - auth.JwtAuth = &JWTAuthenticator{} - if err := auth.JwtAuth.Init(); err != nil { - log.Error("Error while initializing authentication -> jwtAuth init failed") - return nil, err + bytes, err := base64.StdEncoding.DecodeString(sessKey) + if err != nil { + log.Fatal("Error while initializing authentication -> decoding session key failed") + } + authInstance.sessionStore = sessions.NewCookieStore(bytes) } - jwtSessionAuth := &JWTSessionAuthenticator{} - if err := jwtSessionAuth.Init(); err != nil { - log.Info("jwtSessionAuth init failed: No JWT login support!") + if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil { + authInstance.SessionMaxAge = d + } + + if config.Keys.LdapConfig != nil { + ldapAuth := &LdapAuthenticator{} + if err := ldapAuth.Init(); err != nil { + log.Warn("Error while initializing authentication -> ldapAuth init failed") + } else { + authInstance.LdapAuth = ldapAuth + authInstance.authenticators = append(authInstance.authenticators, authInstance.LdapAuth) + } } else { - auth.authenticators = append(auth.authenticators, jwtSessionAuth) + log.Info("Missing LDAP configuration: No LDAP support!") } - jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{} - if err := jwtCookieSessionAuth.Init(); err != nil { - log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!") + if config.Keys.JwtConfig != nil { + authInstance.JwtAuth = &JWTAuthenticator{} + if err := authInstance.JwtAuth.Init(); err != nil { + log.Fatal("Error while initializing authentication -> jwtAuth init failed") + } + + jwtSessionAuth := &JWTSessionAuthenticator{} + if err := jwtSessionAuth.Init(); err != nil { + log.Info("jwtSessionAuth init failed: No JWT login support!") + } else { + authInstance.authenticators = append(authInstance.authenticators, jwtSessionAuth) + } + + jwtCookieSessionAuth := &JWTCookieSessionAuthenticator{} + if err := jwtCookieSessionAuth.Init(); err != nil { + log.Info("jwtCookieSessionAuth init failed: No JWT cookie login support!") + } else { + authInstance.authenticators = append(authInstance.authenticators, jwtCookieSessionAuth) + } } else { - auth.authenticators = append(auth.authenticators, jwtCookieSessionAuth) + log.Info("Missing JWT configuration: No JWT token support!") } - } else { - log.Info("Missing JWT configuration: No JWT token support!") + + authInstance.LocalAuth = &LocalAuthenticator{} + if err := authInstance.LocalAuth.Init(); err != nil { + log.Fatal("Error while initializing authentication -> localAuth init failed") + } + authInstance.authenticators = append(authInstance.authenticators, authInstance.LocalAuth) + }) +} + +func GetAuthInstance() *Authentication { + if authInstance == nil { + log.Fatal("Authentication module not initialized!") } - auth.LocalAuth = &LocalAuthenticator{} - if err := auth.LocalAuth.Init(); err != nil { - log.Error("Error while initializing authentication -> localAuth init failed") - return nil, err - } - auth.authenticators = append(auth.authenticators, auth.LocalAuth) - - return auth, nil + return authInstance } func persistUser(user *schema.User) { diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go index 05672c5..cc7c4f6 100644 --- a/internal/auth/ldap.go +++ b/internal/auth/ldap.go @@ -10,7 +10,6 @@ import ( "net/http" "os" "strings" - "time" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" @@ -34,33 +33,6 @@ func (la *LdapAuthenticator) Init() error { lc := config.Keys.LdapConfig - if lc.SyncInterval != "" { - interval, err := time.ParseDuration(lc.SyncInterval) - if err != nil { - log.Warnf("Could not parse duration for sync interval: %v", - lc.SyncInterval) - return err - } - - if interval == 0 { - log.Info("Sync interval is zero") - return nil - } - - go func() { - ticker := time.NewTicker(interval) - for t := range ticker.C { - log.Printf("sync started at %s", t.Format(time.RFC3339)) - if err := la.Sync(); err != nil { - log.Errorf("sync failed: %s", err.Error()) - } - log.Print("sync done") - } - }() - } else { - log.Info("LDAP configuration key sync_interval invalid") - } - if lc.UserAttr != "" { la.UserAttr = lc.UserAttr } else { diff --git a/internal/graph/resolver.go b/internal/graph/resolver.go index dd7bc3b..0f4dc06 100644 --- a/internal/graph/resolver.go +++ b/internal/graph/resolver.go @@ -1,15 +1,39 @@ package graph import ( + "sync" + "github.com/ClusterCockpit/cc-backend/internal/repository" + "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/jmoiron/sqlx" ) // This file will not be regenerated automatically. // // It serves as dependency injection for your app, add any dependencies you require here. +var ( + initOnce sync.Once + resolverInstance *Resolver +) type Resolver struct { DB *sqlx.DB Repo *repository.JobRepository } + +func Init() { + initOnce.Do(func() { + db := repository.GetConnection() + resolverInstance = &Resolver{ + DB: db.DB, Repo: repository.GetJobRepository(), + } + }) +} + +func GetResolverInstance() *Resolver { + if resolverInstance == nil { + log.Fatal("Authentication module not initialized!") + } + + return resolverInstance +} diff --git a/internal/taskManager/compressionService.go b/internal/taskManager/compressionService.go new file mode 100644 index 0000000..005a5bb --- /dev/null +++ b/internal/taskManager/compressionService.go @@ -0,0 +1,41 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "time" + + "github.com/ClusterCockpit/cc-backend/pkg/archive" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/go-co-op/gocron/v2" +) + +func RegisterCompressionService(compressOlderThan int) { + log.Info("Register compression service") + + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(05, 0, 0))), + gocron.NewTask( + func() { + var jobs []*schema.Job + var err error + + ar := archive.GetHandle() + startTime := time.Now().Unix() - int64(compressOlderThan*24*3600) + lastTime := ar.CompressLast(startTime) + if startTime == lastTime { + log.Info("Compression Service - Complete archive run") + jobs, err = jobRepo.FindJobsBetween(0, startTime) + + } else { + jobs, err = jobRepo.FindJobsBetween(lastTime, startTime) + } + + if err != nil { + log.Warnf("Error while looking for compression jobs: %v", err) + } + ar.Compress(jobs) + })) +} diff --git a/internal/taskManager/ldapSyncService.go b/internal/taskManager/ldapSyncService.go new file mode 100644 index 0000000..a998aa8 --- /dev/null +++ b/internal/taskManager/ldapSyncService.go @@ -0,0 +1,36 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "time" + + "github.com/ClusterCockpit/cc-backend/internal/auth" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/go-co-op/gocron/v2" +) + +func RegisterLdapSyncService(ds string) { + interval, err := parseDuration(ds) + if err != nil { + log.Warnf("Could not parse duration for sync interval: %v", + ds) + return + } + + auth := auth.GetAuthInstance() + + log.Info("Register LDAP sync service") + s.NewJob(gocron.DurationJob(interval), + gocron.NewTask( + func() { + t := time.Now() + log.Printf("ldap sync started at %s", t.Format(time.RFC3339)) + if err := auth.LdapAuth.Sync(); err != nil { + log.Errorf("ldap sync failed: %s", err.Error()) + } + log.Print("ldap sync done") + })) +} diff --git a/internal/taskManager/retentionService.go b/internal/taskManager/retentionService.go index ef29b6a..502f890 100644 --- a/internal/taskManager/retentionService.go +++ b/internal/taskManager/retentionService.go @@ -9,62 +9,59 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/go-co-op/gocron/v2" ) -func RegisterRetentionService(cfg schema.Retention) { - switch cfg.Policy { - case "delete": +func RegisterRetentionDeleteService(age int, includeDB bool) { + log.Info("Register retention delete service") - log.Info("Register retention delete service") + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), + gocron.NewTask( + func() { + startTime := time.Now().Unix() - int64(age*24*3600) + jobs, err := jobRepo.FindJobsBetween(0, startTime) + if err != nil { + log.Warnf("Error while looking for retention jobs: %s", err.Error()) + } + archive.GetHandle().CleanUp(jobs) - s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), - gocron.NewTask( - func() { - startTime := time.Now().Unix() - int64(cfg.Age*24*3600) - jobs, err := jobRepo.FindJobsBetween(0, startTime) + if includeDB { + cnt, err := jobRepo.DeleteJobsBefore(startTime) if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) + log.Errorf("Error while deleting retention jobs from db: %s", err.Error()) + } else { + log.Infof("Retention: Removed %d jobs from db", cnt) } - archive.GetHandle().CleanUp(jobs) - - if cfg.IncludeDB { - cnt, err := jobRepo.DeleteJobsBefore(startTime) - if err != nil { - log.Errorf("Error while deleting retention jobs from db: %s", err.Error()) - } else { - log.Infof("Retention: Removed %d jobs from db", cnt) - } - if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %s", err.Error()) - } + if err = jobRepo.Optimize(); err != nil { + log.Errorf("Error occured in db optimization: %s", err.Error()) } - })) - case "move": - log.Info("Register retention move service") - - s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), - gocron.NewTask( - func() { - startTime := time.Now().Unix() - int64(cfg.Age*24*3600) - jobs, err := jobRepo.FindJobsBetween(0, startTime) - if err != nil { - log.Warnf("Error while looking for retention jobs: %s", err.Error()) - } - archive.GetHandle().Move(jobs, cfg.Location) - - if cfg.IncludeDB { - cnt, err := jobRepo.DeleteJobsBefore(startTime) - if err != nil { - log.Errorf("Error while deleting retention jobs from db: %v", err) - } else { - log.Infof("Retention: Removed %d jobs from db", cnt) - } - if err = jobRepo.Optimize(); err != nil { - log.Errorf("Error occured in db optimization: %v", err) - } - } - })) - } + } + })) +} + +func RegisterRetentionMoveService(age int, includeDB bool, location string) { + log.Info("Register retention move service") + + s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(04, 0, 0))), + gocron.NewTask( + func() { + startTime := time.Now().Unix() - int64(age*24*3600) + jobs, err := jobRepo.FindJobsBetween(0, startTime) + if err != nil { + log.Warnf("Error while looking for retention jobs: %s", err.Error()) + } + archive.GetHandle().Move(jobs, location) + + if includeDB { + cnt, err := jobRepo.DeleteJobsBefore(startTime) + if err != nil { + log.Errorf("Error while deleting retention jobs from db: %v", err) + } else { + log.Infof("Retention: Removed %d jobs from db", cnt) + } + if err = jobRepo.Optimize(); err != nil { + log.Errorf("Error occured in db optimization: %v", err) + } + } + })) } diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index 5ddc179..bcd2c06 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -5,8 +5,13 @@ package taskManager import ( + "encoding/json" + "time" + + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/go-co-op/gocron/v2" ) @@ -15,13 +20,66 @@ var ( jobRepo *repository.JobRepository ) -func init() { +func parseDuration(s string) (time.Duration, error) { + interval, err := time.ParseDuration(s) + if err != nil { + log.Warnf("Could not parse duration for sync interval: %v", + s) + return 0, err + } + + if interval == 0 { + log.Info("TaskManager: Sync interval is zero") + } + + return interval, nil +} + +func Start() { var err error jobRepo = repository.GetJobRepository() s, err = gocron.NewScheduler() if err != nil { log.Fatalf("Error while creating gocron scheduler: %s", err.Error()) } + + if config.Keys.StopJobsExceedingWalltime > 0 { + RegisterStopJobsExceedTime() + } + + var cfg struct { + Retention schema.Retention `json:"retention"` + Compression int `json:"compression"` + } + cfg.Retention.IncludeDB = true + + if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil { + log.Warn("Error while unmarshaling raw config json") + } + + switch cfg.Retention.Policy { + case "delete": + RegisterRetentionDeleteService( + cfg.Retention.Age, + cfg.Retention.IncludeDB) + case "move": + RegisterRetentionMoveService( + cfg.Retention.Age, + cfg.Retention.IncludeDB, + cfg.Retention.Location) + } + + if cfg.Compression > 0 { + RegisterCompressionService(cfg.Compression) + } + + lc := config.Keys.LdapConfig + + if lc.SyncInterval != "" { + RegisterLdapSyncService(lc.SyncInterval) + } + + s.Start() } func Shutdown() { From b6f011c669455c8d93dbb4b4ef9f1b807dfb6253 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 16 Jul 2024 12:34:27 +0200 Subject: [PATCH 062/443] Move footprint update task placeholder to taskmanager --- .../footprintWorker.go => taskManager/footprintService.go} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename internal/{repository/footprintWorker.go => taskManager/footprintService.go} (74%) diff --git a/internal/repository/footprintWorker.go b/internal/taskManager/footprintService.go similarity index 74% rename from internal/repository/footprintWorker.go rename to internal/taskManager/footprintService.go index 2aa0c2b..28a5a72 100644 --- a/internal/repository/footprintWorker.go +++ b/internal/taskManager/footprintService.go @@ -2,7 +2,7 @@ // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -package repository +package taskManager -func (r *JobRepository) footprintWorker() { +func registerFootprintWorker() { } From 721b6b2afa1af162cbde13100b049d96918a4689 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 20 Jul 2024 08:59:07 +0200 Subject: [PATCH 063/443] Change footprint variabel from bool to string The footprint variable also indicates the type of statistic used now --- api/schema.graphqls | 2 +- internal/graph/generated/generated.go | 8 ++++---- internal/importer/testdata/cluster-fritz.json | 8 ++++---- pkg/archive/clusterConfig.go | 6 +++--- pkg/archive/testdata/archive/alex/cluster.json | 10 +++++----- .../testdata/archive/fritz/cluster.json | 18 +++++++++--------- pkg/schema/cluster.go | 6 +++--- 7 files changed, 29 insertions(+), 29 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 8edae6c..d703990 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -194,7 +194,7 @@ type GlobalMetricListItem { name: String! unit: Unit! scope: MetricScope! - footprint: Boolean + footprint: String availability: [ClusterSupport!]! } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 91839a9..d54ddb1 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -2022,7 +2022,7 @@ type GlobalMetricListItem { name: String! unit: Unit! scope: MetricScope! - footprint: Boolean + footprint: String availability: [ClusterSupport!]! } @@ -3531,9 +3531,9 @@ func (ec *executionContext) _GlobalMetricListItem_footprint(ctx context.Context, if resTmp == nil { return graphql.Null } - res := resTmp.(bool) + res := resTmp.(string) fc.Result = res - return ec.marshalOBoolean2bool(ctx, field.Selections, res) + return ec.marshalOString2string(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_GlobalMetricListItem_footprint(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -3543,7 +3543,7 @@ func (ec *executionContext) fieldContext_GlobalMetricListItem_footprint(_ contex IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Boolean does not have child fields") + return nil, errors.New("field of type String does not have child fields") }, } return fc, nil diff --git a/internal/importer/testdata/cluster-fritz.json b/internal/importer/testdata/cluster-fritz.json index 8519fd4..e9f5e43 100644 --- a/internal/importer/testdata/cluster-fritz.json +++ b/internal/importer/testdata/cluster-fritz.json @@ -8,7 +8,7 @@ }, "scope": "node", "aggregation": "avg", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 72, "normal": 72, @@ -36,7 +36,7 @@ }, "scope": "node", "aggregation": "sum", - "footprint": true, + "footprint": "max", "timestep": 60, "peak": 256, "normal": 128, @@ -51,7 +51,7 @@ }, "scope": "hwthread", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 5600, "normal": 1000, @@ -94,7 +94,7 @@ }, "scope": "socket", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 350, "normal": 100, diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index a68a530..6f0178c 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -84,9 +84,9 @@ func initClusterConfig() error { newMetric.LowerIsBetter = cfg.LowerIsBetter sc.MetricConfig = append(sc.MetricConfig, *newMetric) - if newMetric.Footprint { + if newMetric.Footprint != "" { sc.Footprint = append(sc.Footprint, newMetric.Name) - ml.Footprint = true + ml.Footprint = newMetric.Footprint } if newMetric.Energy { sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name) @@ -96,7 +96,7 @@ func initClusterConfig() error { availability.SubClusters = append(availability.SubClusters, sc.Name) sc.MetricConfig = append(sc.MetricConfig, *newMetric) - if newMetric.Footprint { + if newMetric.Footprint != "" { sc.Footprint = append(sc.Footprint, newMetric.Name) } if newMetric.Energy { diff --git a/pkg/archive/testdata/archive/alex/cluster.json b/pkg/archive/testdata/archive/alex/cluster.json index fe791f4..cc2888d 100644 --- a/pkg/archive/testdata/archive/alex/cluster.json +++ b/pkg/archive/testdata/archive/alex/cluster.json @@ -8,7 +8,7 @@ }, "scope": "node", "aggregation": "avg", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 128, "normal": 128, @@ -36,7 +36,7 @@ }, "scope": "node", "aggregation": "sum", - "footprint": true, + "footprint": "max", "timestep": 60, "peak": 512, "normal": 128, @@ -51,7 +51,7 @@ }, "scope": "hwthread", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 9216, "normal": 1000, @@ -66,7 +66,7 @@ }, "scope": "socket", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 350, "normal": 100, @@ -108,7 +108,7 @@ }, "scope": "accelerator", "aggregation": "avg", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 100, "normal": 80, diff --git a/pkg/archive/testdata/archive/fritz/cluster.json b/pkg/archive/testdata/archive/fritz/cluster.json index b0263c4..58ec3af 100644 --- a/pkg/archive/testdata/archive/fritz/cluster.json +++ b/pkg/archive/testdata/archive/fritz/cluster.json @@ -8,7 +8,7 @@ }, "scope": "node", "aggregation": "avg", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 72, "normal": 72, @@ -20,7 +20,7 @@ "peak": 104, "normal": 104, "caution": 52, - "footprint": true, + "footprint": "avg", "alert": 20 }, { @@ -28,7 +28,7 @@ "peak": 104, "normal": 104, "caution": 52, - "footprint": true, + "footprint": "avg", "alert": 20 } ] @@ -54,7 +54,7 @@ }, "scope": "node", "aggregation": "sum", - "footprint": true, + "footprint": "max", "timestep": 60, "peak": 256, "normal": 128, @@ -67,7 +67,7 @@ "peak": 1024, "normal": 512, "caution": 900, - "footprint": true, + "footprint": "max", "lowerIsBetter": true, "alert": 1000 }, @@ -76,7 +76,7 @@ "peak": 2048, "normal": 1024, "caution": 1800, - "footprint": true, + "footprint": "max", "lowerIsBetter": true, "alert": 2000 } @@ -90,7 +90,7 @@ }, "scope": "hwthread", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 5600, "normal": 1000, @@ -103,7 +103,7 @@ "normal": 1500, "caution": 400, "alert": 50, - "footprint": true + "footprint": "avg" }, { "name": "spr2tb", @@ -187,7 +187,7 @@ }, "scope": "socket", "aggregation": "sum", - "footprint": true, + "footprint": "avg", "timestep": 60, "peak": 350, "normal": 100, diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index ef1be89..a77bd32 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -51,7 +51,7 @@ type SubClusterConfig struct { Normal float64 `json:"normal"` Caution float64 `json:"caution"` Alert float64 `json:"alert"` - Footprint bool `json:"footprint"` + Footprint string `json:"footprint,omitempty"` Remove bool `json:"remove"` LowerIsBetter bool `json:"lowerIsBetter"` Energy bool `json:"energy"` @@ -69,7 +69,7 @@ type MetricConfig struct { Caution float64 `json:"caution"` Alert float64 `json:"alert"` LowerIsBetter bool `json:"lowerIsBetter"` - Footprint bool `json:"footprint"` + Footprint string `json:"footprint,omitempty"` Energy bool `json:"energy"` } @@ -88,7 +88,7 @@ type GlobalMetricListItem struct { Name string `json:"name"` Unit Unit `json:"unit"` Scope MetricScope `json:"scope"` - Footprint bool `json:"footprint"` + Footprint string `json:"footprint,omitempty"` Availability []ClusterSupport `json:"availability"` } From c2f72f72acec211bbe9b40e117397da03cc506b7 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 20 Jul 2024 08:59:51 +0200 Subject: [PATCH 064/443] Update go dependencies --- go.mod | 3 +-- go.sum | 18 ++---------------- internal/repository/testdata/job.db | Bin 114688 -> 114688 bytes 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index f21316e..e343d65 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.4 github.com/coreos/go-oidc/v3 v3.11.0 - github.com/go-co-op/gocron v1.37.0 + github.com/go-co-op/gocron/v2 v2.9.0 github.com/go-ldap/ldap/v3 v3.4.8 github.com/go-sql-driver/mysql v1.8.1 github.com/golang-jwt/jwt/v5 v5.2.1 @@ -42,7 +42,6 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect - github.com/go-co-op/gocron/v2 v2.9.0 // indirect github.com/go-jose/go-jose/v4 v4.0.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect diff --git a/go.sum b/go.sum index 4ca5643..d8759fc 100644 --- a/go.sum +++ b/go.sum @@ -38,7 +38,6 @@ github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/ github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -59,8 +58,6 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= -github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= github.com/go-co-op/gocron/v2 v2.9.0 h1:+0nTyI3mjc2FGIClBdDWpaLPCNrJ+62o9xbS0ZklEKQ= github.com/go-co-op/gocron/v2 v2.9.0/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w= github.com/go-jose/go-jose/v4 v4.0.3 h1:o8aphO8Hv6RPmH+GfzVuyf7YXSBibp+8YyHdOoDESGo= @@ -92,7 +89,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -144,13 +140,8 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= @@ -187,7 +178,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -204,8 +194,6 @@ github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAi github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -226,7 +214,6 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= @@ -242,9 +229,10 @@ github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08 github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= @@ -311,10 +299,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db index a70e062886758c81c3f1441b5386e31ded881970..c345327e67e5c6dff1500c214831d7da28019c13 100644 GIT binary patch delta 1460 zcma)5{Yw*Z9KO%p?aobfj=98_wR0jU*m;|$x6>?z8DXLc3R%#0>2$*8&iD76{Zp2c$$0LH3L=0bM}1Y^AD3XTjKsyRZ5l+8}F(IW(e|Bd=!R#2%*qNbK#J@)TlW*@~B9gvCr+ zEoQ(8n^Liyk27gZHf~E}aQtj4v*?GTL&wfyrNMha3izfZp6mVc;UFl>gIt~9@0Daf zqA)0h&TwxiC|nDJ3xpC>3y9eq`0@0G_rfsoYq_s99=GDy>TrzBaYVDL63bVh@6biNgG2n-l? z-wp2|`o0~E>Tny(6A6t{hQ=BZX%fX$nWD3nXI=bhrJ|u7AN{T={N1>-RTv?jQ|DH~ zGwM1AFL9jktA%giDQ;MjOmuDJ0ROR}VXY;%Jz|^q*i3eOC`Xkr@)TtH459 z3rk~t+Zc%qlc-%OjGshPOJO{uHI;ECkFl0Iv!*n04y^8@w0eO^Z%G05j^?`B=xkf+|0P z+e!7hkx?(S0(_b{r@-=Docr$<4wORP8&jGSf^zl@$0n6BOcv} zNh{T^3KVmDQq0#u`CPA8-#$x8`DTUop1RBfMTz_^ZyiRtxr$-RuvnKtXPC9rSr zwPh@4;$hW@5D2{?Uc;3FCI=9>zLm83AVdm16DMOgvr=))`wYBLtGb76Su}gKs<61jb7C z=?<$HEvDP7XLQ{DN1U;meeyg@^XYc;86AXxR<+e%c$(nY>5$GOQuWY%-Legg0ssTh BYP$dc From c4d93e492b49940da78f7d788a4ffd90ff476d27 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 20 Jul 2024 10:03:14 +0200 Subject: [PATCH 065/443] Remove bugs in main init --- cmd/cc-backend/main.go | 6 ++++-- internal/taskManager/taskManager.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index e1546c7..3a49923 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -83,8 +83,6 @@ func main() { config.Keys.DB = os.Getenv(envvar) } - repository.Connect(config.Keys.DBDriver, config.Keys.DB) - if flagMigrateDB { err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB) if err != nil { @@ -109,6 +107,8 @@ func main() { os.Exit(0) } + repository.Connect(config.Keys.DBDriver, config.Keys.DB) + if flagInit { initEnv() fmt.Print("Succesfully setup environment!\n") @@ -227,6 +227,8 @@ func main() { <-sigs runtimeEnv.SystemdNotifiy(false, "Shutting down ...") + serverShutdown() + taskManager.Shutdown() }() diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index bcd2c06..006469c 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -75,7 +75,7 @@ func Start() { lc := config.Keys.LdapConfig - if lc.SyncInterval != "" { + if lc != nil && lc.SyncInterval != "" { RegisterLdapSyncService(lc.SyncInterval) } From 6a1cb51c2f59c0c0cd895ecae33742de16c55bba Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 22 Jul 2024 15:41:33 +0200 Subject: [PATCH 066/443] Refactor svelte frontend - Adapt to new metricConfig logic - Footprint-Metrics generalized for bar card - Footprint-Metrics in stats filter and sorting - Frontend always uses GQL, except adminOptions - Job View will load scopes for all metrics on request --- api/schema.graphqls | 2 + internal/api/rest.go | 1 - internal/graph/generated/generated.go | 64 ++++- internal/graph/model/models_gen.go | 2 + internal/repository/jobQuery.go | 40 ++- internal/repository/stats.go | 13 +- web/frontend/src/Analysis.root.svelte | 57 ++--- web/frontend/src/Config.root.svelte | 3 - web/frontend/src/HistogramSelection.svelte | 18 +- web/frontend/src/Job.root.svelte | 237 ++++++++++-------- web/frontend/src/JobFootprint.svelte | 156 ++++-------- web/frontend/src/Jobs.root.svelte | 2 +- web/frontend/src/Metric.svelte | 59 ++--- web/frontend/src/MetricSelection.svelte | 59 ++--- web/frontend/src/Node.root.svelte | 38 +-- web/frontend/src/PlotSelection.svelte | 1 - web/frontend/src/StatsTable.svelte | 5 - web/frontend/src/Status.root.svelte | 2 +- web/frontend/src/Systems.root.svelte | 35 ++- web/frontend/src/TagManagement.svelte | 3 - web/frontend/src/User.root.svelte | 5 +- web/frontend/src/Zoom.svelte | 65 ----- web/frontend/src/config/UserSettings.svelte | 1 - web/frontend/src/config/admin/AddUser.svelte | 1 - .../src/config/admin/EditProject.svelte | 2 - web/frontend/src/config/admin/EditRole.svelte | 2 - web/frontend/src/filters/Filters.svelte | 5 +- web/frontend/src/filters/Resources.svelte | 1 - web/frontend/src/filters/Stats.svelte | 88 ++----- web/frontend/src/joblist/JobList.svelte | 37 ++- web/frontend/src/joblist/Row.svelte | 37 +-- web/frontend/src/joblist/SortSelection.svelte | 45 ++-- web/frontend/src/plots/MetricPlot.svelte | 111 +++----- web/frontend/src/plots/Polar.svelte | 5 +- web/frontend/src/plots/Roofline.svelte | 6 +- web/frontend/src/units.js | 1 + web/frontend/src/utils.js | 226 +++++++++-------- web/templates/monitoring/job.tmpl | 2 - 38 files changed, 627 insertions(+), 810 deletions(-) delete mode 100644 web/frontend/src/Zoom.svelte diff --git a/api/schema.graphqls b/api/schema.graphqls index d703990..568c15d 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -272,6 +272,7 @@ input JobFilter { input OrderByInput { field: String! + type: String!, order: SortDirectionEnum! = ASC } @@ -319,6 +320,7 @@ type HistoPoint { type MetricHistoPoints { metric: String! unit: String! + stat: String data: [MetricHistoPoint!] } diff --git a/internal/api/rest.go b/internal/api/rest.go index 01eb429..c8f4e7a 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -119,7 +119,6 @@ func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { if api.Authentication != nil { r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet) r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost) - r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) // Fetched in Job.svelte: Needs All-User-Access-Session-Auth } } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index d54ddb1..9ca0a60 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -211,6 +211,7 @@ type ComplexityRoot struct { MetricHistoPoints struct { Data func(childComplexity int) int Metric func(childComplexity int) int + Stat func(childComplexity int) int Unit func(childComplexity int) int } @@ -1104,6 +1105,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricHistoPoints.Metric(childComplexity), true + case "MetricHistoPoints.stat": + if e.complexity.MetricHistoPoints.Stat == nil { + break + } + + return e.complexity.MetricHistoPoints.Stat(childComplexity), true + case "MetricHistoPoints.unit": if e.complexity.MetricHistoPoints.Unit == nil { break @@ -2100,6 +2108,7 @@ input JobFilter { input OrderByInput { field: String! + type: String!, order: SortDirectionEnum! = ASC } @@ -2147,6 +2156,7 @@ type HistoPoint { type MetricHistoPoints { metric: String! unit: String! + stat: String data: [MetricHistoPoint!] } @@ -6445,6 +6455,8 @@ func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(_ context.Co return ec.fieldContext_MetricHistoPoints_metric(ctx, field) case "unit": return ec.fieldContext_MetricHistoPoints_unit(ctx, field) + case "stat": + return ec.fieldContext_MetricHistoPoints_stat(ctx, field) case "data": return ec.fieldContext_MetricHistoPoints_data(ctx, field) } @@ -7295,6 +7307,47 @@ func (ec *executionContext) fieldContext_MetricHistoPoints_unit(_ context.Contex return fc, nil } +func (ec *executionContext) _MetricHistoPoints_stat(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_stat(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stat, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_stat(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricHistoPoints_data(ctx, field) if err != nil { @@ -13217,7 +13270,7 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj asMap["order"] = "ASC" } - fieldsInOrder := [...]string{"field", "order"} + fieldsInOrder := [...]string{"field", "type", "order"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -13231,6 +13284,13 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj return it, err } it.Field = data + case "type": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("type")) + data, err := ec.unmarshalNString2string(ctx, v) + if err != nil { + return it, err + } + it.Type = data case "order": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v) @@ -14673,6 +14733,8 @@ func (ec *executionContext) _MetricHistoPoints(ctx context.Context, sel ast.Sele if out.Values[i] == graphql.Null { out.Invalids++ } + case "stat": + out.Values[i] = ec._MetricHistoPoints_stat(ctx, field, obj) case "data": out.Values[i] = ec._MetricHistoPoints_data(ctx, field, obj) default: diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index e3b4a11..6c731a2 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -123,6 +123,7 @@ type MetricHistoPoint struct { type MetricHistoPoints struct { Metric string `json:"metric"` Unit string `json:"unit"` + Stat *string `json:"stat,omitempty"` Data []*MetricHistoPoint `json:"data,omitempty"` } @@ -142,6 +143,7 @@ type NodeMetrics struct { type OrderByInput struct { Field string `json:"field"` + Type string `json:"type"` Order SortDirectionEnum `json:"order"` } diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index c52577d..7b575ef 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -31,14 +31,28 @@ func (r *JobRepository) QueryJobs( if order != nil { field := toSnakeCase(order.Field) - - switch order.Order { - case model.SortDirectionEnumAsc: - query = query.OrderBy(fmt.Sprintf("job.%s ASC", field)) - case model.SortDirectionEnumDesc: - query = query.OrderBy(fmt.Sprintf("job.%s DESC", field)) - default: - return nil, errors.New("REPOSITORY/QUERY > invalid sorting order") + if order.Type == "col" { + // "col": Fixed column name query + switch order.Order { + case model.SortDirectionEnumAsc: + query = query.OrderBy(fmt.Sprintf("job.%s ASC", field)) + case model.SortDirectionEnumDesc: + query = query.OrderBy(fmt.Sprintf("job.%s DESC", field)) + default: + return nil, errors.New("REPOSITORY/QUERY > invalid sorting order for column") + } + } else { + // "foot": Order by footprint JSON field values + // Verify and Search Only in Valid Jsons + query = query.Where("JSON_VALID(meta_data)") + switch order.Order { + case model.SortDirectionEnumAsc: + query = query.OrderBy(fmt.Sprintf("JSON_EXTRACT(footprint, \"$.%s\") ASC", field)) + case model.SortDirectionEnumDesc: + query = query.OrderBy(fmt.Sprintf("JSON_EXTRACT(footprint, \"$.%s\") DESC", field)) + default: + return nil, errors.New("REPOSITORY/QUERY > invalid sorting order for footprint") + } } } @@ -177,8 +191,8 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildStringCondition("job.resources", filter.Node, query) } if filter.MetricStats != nil { - for _, m := range filter.MetricStats { - query = buildFloatJsonCondition("job.metric_stats", m.Range, query) + for _, ms := range filter.MetricStats { + query = buildFloatJsonCondition(ms.MetricName, ms.Range, query) } } return query @@ -200,8 +214,10 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui } } -func buildFloatJsonCondition(field string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder { - return query.Where("JSON_EXTRACT(footprint, '$."+field+"') BETWEEN ? AND ?", cond.From, cond.To) +func buildFloatJsonCondition(condName string, condRange *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder { + // Verify and Search Only in Valid Jsons + query = query.Where("JSON_VALID(footprint)") + return query.Where("JSON_EXTRACT(footprint, \"$."+condName+"\") BETWEEN ? AND ?", condRange.From, condRange.To) } func buildStringCondition(field string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder { diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 33cafa0..81ca8d1 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -552,12 +552,14 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( var metricConfig *schema.MetricConfig var peak float64 = 0.0 var unit string = "" + var footprintStat string = "" for _, f := range filters { if f.Cluster != nil { metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) peak = metricConfig.Peak unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + footprintStat = metricConfig.Footprint log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) } } @@ -572,21 +574,26 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( if unit == "" { unit = m.Unit.Prefix + m.Unit.Base } + if footprintStat == "" { + footprintStat = m.Footprint + } } } } } - // log.Debugf("Metric %s: DB %s, Peak %f, Unit %s", metric, dbMetric, peak, unit) + // log.Debugf("Metric %s, Peak %f, Unit %s, Aggregation %s", metric, peak, unit, aggreg) // Make bins, see https://jereze.com/code/sql-histogram/ start := time.Now() - jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, metric) + jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, (metric + "_" + footprintStat)) crossJoinQuery := sq.Select( fmt.Sprintf(`max(%s) as max`, jm), fmt.Sprintf(`min(%s) as min`, jm), ).From("job").Where( + "JSON_VALID(footprint)", + ).Where( fmt.Sprintf(`%s is not null`, jm), ).Where( fmt.Sprintf(`%s <= %f`, jm, peak), @@ -651,7 +658,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( points = append(points, &point) } - result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points} log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return &result, nil diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 0592f28..7ae073d 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -48,8 +48,10 @@ let colWidth1, colWidth2, colWidth3, colWidth4; let numBins = 50; let maxY = -1; + + const initialized = getContext("initialized"); + const globalMetrics = getContext("globalMetrics"); const ccconfig = getContext("cc-config"); - const metricConfig = getContext("metrics"); let metricsInHistograms = ccconfig.analysis_view_histogramMetrics, metricsInScatterplots = ccconfig.analysis_view_scatterPlotMetrics; @@ -268,6 +270,19 @@ } } + let availableMetrics = []; + let metricUnits = {}; + let metricScopes = {}; + function loadMetrics(isInitialized) { + if (!isInitialized) return + availableMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster.name))] + for (let sm of availableMetrics) { + metricUnits[sm.name] = (sm?.unit?.prefix ? sm.unit.prefix : "") + (sm?.unit?.base ? sm.unit.base : "") + metricScopes[sm.name] = sm?.scope + } + } + + $: loadMetrics($initialized) $: updateEntityConfiguration(groupSelection.key); $: updateCategoryConfiguration(sortSelection.key); @@ -285,7 +300,7 @@ {$initq.error.message} {:else if cluster} mc.name)} + availableMetrics={availableMetrics.map((av) => av.name)} bind:metricsInHistograms bind:metricsInScatterplots /> @@ -506,7 +521,7 @@ metric, ...binsFromFootprint( $footprintsQuery.data.footprints.timeWeights, - metricConfig(cluster.name, metric)?.scope, + metricScopes[metric], $footprintsQuery.data.footprints.metrics.find( (f) => f.metric == metric, ).data, @@ -521,22 +536,8 @@ height={250} usesBins={true} title="Average Distribution of '{item.metric}'" - xlabel={`${item.metric} bin maximum ${ - (metricConfig(cluster.name, item.metric)?.unit?.prefix - ? "[" + metricConfig(cluster.name, item.metric)?.unit?.prefix - : "") + - (metricConfig(cluster.name, item.metric)?.unit?.base - ? metricConfig(cluster.name, item.metric)?.unit?.base + "]" - : "") - }`} - xunit={`${ - (metricConfig(cluster.name, item.metric)?.unit?.prefix - ? metricConfig(cluster.name, item.metric)?.unit?.prefix - : "") + - (metricConfig(cluster.name, item.metric)?.unit?.base - ? metricConfig(cluster.name, item.metric)?.unit?.base - : "") - }`} + xlabel={`${item.metric} bin maximum [${metricUnits[item.metric]}]`} + xunit={`${metricUnits[item.metric]}`} ylabel="Normalized Hours" yunit="Hours" /> @@ -578,22 +579,8 @@ {width} height={250} color={"rgba(0, 102, 204, 0.33)"} - xLabel={`${item.m1} [${ - (metricConfig(cluster.name, item.m1)?.unit?.prefix - ? metricConfig(cluster.name, item.m1)?.unit?.prefix - : "") + - (metricConfig(cluster.name, item.m1)?.unit?.base - ? metricConfig(cluster.name, item.m1)?.unit?.base - : "") - }]`} - yLabel={`${item.m2} [${ - (metricConfig(cluster.name, item.m2)?.unit?.prefix - ? metricConfig(cluster.name, item.m2)?.unit?.prefix - : "") + - (metricConfig(cluster.name, item.m2)?.unit?.base - ? metricConfig(cluster.name, item.m2)?.unit?.base - : "") - }]`} + xLabel={`${item.m1} [${metricUnits[item.m1]}]`} + yLabel={`${item.m2} [${metricUnits[item.m2]}]`} X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeWeights.nodeHours} diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index 61e99a8..fde9342 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -1,12 +1,9 @@ (isOpen = !isOpen)}> diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 8cf8f87..fa7ddd1 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -2,10 +2,14 @@ import { init, groupByScope, - fetchMetricsStore, checkMetricDisabled, transformDataForRoofline, } from "./utils.js"; + import { + queryStore, + gql, + getContextClient + } from "@urql/svelte"; import { Row, Col, @@ -34,15 +38,27 @@ export let authlevel; export let roles; - const accMetrics = [ - "acc_utilization", - "acc_mem_used", - "acc_power", - "nv_mem_util", - "nv_sm_clock", - "nv_temp", - ]; - let accNodeOnly; + // Setup General + + const ccconfig = getContext("cc-config") + + let isMetricsSelectionOpen = false, + showFootprint = !!ccconfig[`job_view_showFootprint`], + selectedMetrics = [], + selectedScopes = []; + + let plots = {}, + jobTags, + statsTable, + jobFootprint; + + let missingMetrics = [], + missingHosts = [], + somethingMissing = false; + + // Setup GQL + // First: Add Job Query to init function -> Only requires DBID as argument, received via URL-ID + // Second: Trigger jobMetrics query with now received jobInfos (scopes: from job metadata, selectedMetrics: from config or all, job: from url-id) const { query: initq } = init(` job(id: "${dbid}") { @@ -55,99 +71,100 @@ metaData, userData { name, email }, concurrentJobs { items { id, jobId }, count, listQuery }, - flopsAnyAvg, memBwAvg, loadAvg + footprint { name, stat, value } } `); - const ccconfig = getContext("cc-config"), - clusters = getContext("clusters"), - metrics = getContext("metrics"); + const client = getContextClient(); + const query = gql` + query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!) { + jobMetrics(id: $dbid, metrics: $selectedMetrics, scopes: $selectedScopes) { + name + scope + metric { + unit { + prefix + base + } + timestep + statisticsSeries { + min + median + max + } + series { + hostname + id + data + statistics { + min + avg + max + } + } + } + } + } + `; - let isMetricsSelectionOpen = false, - selectedMetrics = [], - showFootprint = true, - isFetched = new Set(); - const [jobMetrics, startFetching] = fetchMetricsStore(); + $: jobMetrics = queryStore({ + client: client, + query: query, + variables: { dbid, selectedMetrics, selectedScopes }, + }); + + function loadAllScopes() { + selectedScopes = [...selectedScopes, "socket", "core"] + jobMetrics = queryStore({ + client: client, + query: query, + variables: { dbid, selectedMetrics, selectedScopes}, + }); + } + + // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { let job = $initq.data.job; if (!job) return; - selectedMetrics = - ccconfig[`job_view_selectedMetrics:${job.cluster}`] || - clusters - .find((c) => c.name == job.cluster) - .metricConfig.map((mc) => mc.name); - - showFootprint = - ccconfig[`job_view_showFootprint`] - - let toFetch = new Set([ + const pendingMetrics = [ "flops_any", "mem_bw", - ...selectedMetrics, + ...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || + $initq.data.globalMetrics.reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster)) { + names.push(gm.name); + } + return names; + }, []) + ), ...(ccconfig[`job_view_polarPlotMetrics:${job.cluster}`] || - ccconfig[`job_view_polarPlotMetrics`]), + ccconfig[`job_view_polarPlotMetrics`] + ), ...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || - ccconfig[`job_view_nodestats_selectedMetrics`]), - ]); + ccconfig[`job_view_nodestats_selectedMetrics`] + ), + ]; - // Select default Scopes to load: Check before if accelerator metrics are not on accelerator scope by default - accNodeOnly = [...toFetch].some(function (m) { - if (accMetrics.includes(m)) { - const mc = metrics(job.cluster, m); - return mc.scope !== "accelerator"; - } else { - return false; - } + // Select default Scopes to load: Check before if any metric has accelerator scope by default + const accScopeDefault = [...pendingMetrics].some(function (m) { + const cluster = $initq.data.clusters.find((c) => c.name == job.cluster); + const subCluster = cluster.subClusters.find((sc) => sc.name == job.subCluster); + return subCluster.metricConfig.find((smc) => smc.name == m)?.scope === "accelerator"; }); - if (job.numAcc === 0 || accNodeOnly === true) { - // No Accels or Accels on Node Scope - startFetching( - job, - [...toFetch], - job.numNodes > 2 ? ["node"] : ["node", "socket", "core"], - ); - } else { - // Accels and not on node scope - startFetching( - job, - [...toFetch], - job.numNodes > 2 - ? ["node", "accelerator"] - : ["node", "accelerator", "socket", "core"], - ); + const pendingScopes = ["node"] + if (accScopeDefault) pendingScopes.push("accelerator") + if (job.numNodes === 1) { + pendingScopes.push("socket") + pendingScopes.push("core") } - isFetched = toFetch; + selectedMetrics = [...new Set(pendingMetrics)]; + selectedScopes = [...new Set(pendingScopes)]; }); - const lazyFetchMoreMetrics = () => { - let notYetFetched = new Set(); - for (let m of selectedMetrics) { - if (!isFetched.has(m)) { - notYetFetched.add(m); - isFetched.add(m); - } - } - - if (notYetFetched.size > 0) - startFetching( - $initq.data.job, - [...notYetFetched], - $initq.data.job.numNodes > 2 ? ["node"] : ["node", "core"], - ); - }; - - // Fetch more data once required: - $: if ($initq.data && $jobMetrics.data && selectedMetrics) - lazyFetchMoreMetrics(); - - let plots = {}, - jobTags, - statsTable, - jobFootprint; - + // Interactive Document Title $: document.title = $initq.fetching ? "Loading..." : $initq.error @@ -155,15 +172,15 @@ : `Job ${$initq.data.job.jobId} - ClusterCockpit`; // Find out what metrics or hosts are missing: - let missingMetrics = [], - missingHosts = [], - somethingMissing = false; - $: if ($initq.data && $jobMetrics.data) { + $: if ($initq?.data && $jobMetrics?.data?.jobMetrics) { let job = $initq.data.job, metrics = $jobMetrics.data.jobMetrics, - metricNames = clusters - .find((c) => c.name == job.cluster) - .metricConfig.map((mc) => mc.name); + metricNames = $initq.data.globalMetrics.reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster)) { + names.push(gm.name); + } + return names; + }, []); // Metric not found in JobMetrics && Metric not explicitly disabled in config or deselected: Was expected, but is Missing missingMetrics = metricNames.filter( @@ -192,6 +209,7 @@ somethingMissing = missingMetrics.length > 0 || missingHosts.length > 0; } + // Helper const orderAndMap = (grouped, selectedMetrics) => selectedMetrics.map((metric) => ({ metric: metric, @@ -214,18 +232,15 @@ {/if} - {#if $jobMetrics.data && showFootprint} - {#key $jobMetrics.data} - - - - {/key} + {#if $initq.data && showFootprint} + + + {/if} - {#if $jobMetrics.data && $initq.data} + {#if $initq?.data && $jobMetrics?.data?.jobMetrics} {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} {#if authlevel > roles.manager} @@ -270,27 +285,29 @@ `job_view_polarPlotMetrics:${$initq.data.job.cluster}` ] || ccconfig[`job_view_polarPlotMetrics`]} cluster={$initq.data.job.cluster} + subCluster={$initq.data.job.subCluster} jobMetrics={$jobMetrics.data.jobMetrics} /> c.name == $initq.data.job.cluster) .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} data={transformDataForRoofline( $jobMetrics.data.jobMetrics.find( (m) => m.name == "flops_any" && m.scope == "node", - ).metric, + )?.metric, $jobMetrics.data.jobMetrics.find( (m) => m.name == "mem_bw" && m.scope == "node", - ).metric, + )?.metric, )} /> {:else} + {/if} @@ -318,7 +335,7 @@ {$jobMetrics.error.message} {:else if $jobMetrics.fetching} - {:else if $jobMetrics.data && $initq.data} + {:else if $initq?.data && $jobMetrics?.data?.jobMetrics} statsTable.moreLoaded(detail)} + on:load-all={loadAllScopes} job={$initq.data.job} metricName={item.metric} + metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} + nativeScope={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.scope} rawData={item.data.map((x) => x.metric)} scopes={item.data.map((x) => x.scope)} {width} @@ -388,8 +407,8 @@ tab="Statistics Table" active={!somethingMissing} > - {#if $jobMetrics.data} - {#key $jobMetrics.data} + {#if $jobMetrics?.data?.jobMetrics} + {#key $jobMetrics.data.jobMetrics} - export function findJobThresholds(job, metricConfig, subClusterConfig) { - if (!job || !metricConfig || !subClusterConfig) { + export function findJobThresholds(job, metricConfig) { + if (!job || !metricConfig) { console.warn("Argument missing for findJobThresholds!"); return null; } - const subclusterThresholds = metricConfig.subClusters.find( - (sc) => sc.name == subClusterConfig.name, - ); + // metricConfig is on subCluster-Level const defaultThresholds = { - peak: subclusterThresholds - ? subclusterThresholds.peak - : metricConfig.peak, - normal: subclusterThresholds - ? subclusterThresholds.normal - : metricConfig.normal, - caution: subclusterThresholds - ? subclusterThresholds.caution - : metricConfig.caution, - alert: subclusterThresholds - ? subclusterThresholds.alert - : metricConfig.alert, + peak: metricConfig.peak, + normal: metricConfig.normal, + caution: metricConfig.caution, + alert: metricConfig.alert }; // Job_Exclusivity does not matter, only aggregation if (metricConfig.aggregation === "avg") { return defaultThresholds; } else if (metricConfig.aggregation === "sum") { - const jobFraction = - job.numHWThreads / subClusterConfig.topology.node.length; + const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster) + const jobFraction = job.numHWThreads / topol.node.length; + return { peak: round(defaultThresholds.peak * jobFraction, 0), normal: round(defaultThresholds.normal * jobFraction, 0), @@ -55,109 +46,56 @@ Progress, Icon, Tooltip, + Row, + Col } from "@sveltestrap/sveltestrap"; - import { mean, round } from "mathjs"; + import { round } from "mathjs"; export let job; - export let jobMetrics; export let view = "job"; export let width = "auto"; + export let height = "310px"; - const clusters = getContext("clusters"); - const subclusterConfig = clusters - .find((c) => c.name == job.cluster) - .subClusters.find((sc) => sc.name == job.subCluster); - - const footprintMetrics = - job.numAcc !== 0 - ? job.exclusive !== 1 // GPU - ? ["acc_utilization", "acc_mem_used", "nv_sm_clock", "nv_mem_util"] // Shared - : ["acc_utilization", "acc_mem_used", "nv_sm_clock", "nv_mem_util"] // Exclusive - : (job.exclusive !== 1) // CPU Only - ? ["flops_any", "mem_used"] // Shared - : ["cpu_load", "flops_any", "mem_used", "mem_bw"]; // Exclusive - - const footprintData = footprintMetrics.map((fm) => { + const footprintData = job?.footprint?.map((jf) => { // Unit - const fmc = getContext("metrics")(job.cluster, fm); - let unit = ""; - if (fmc?.unit?.base) unit = fmc.unit.prefix + fmc.unit.base; + const fmc = getContext("getMetricConfig")(job.cluster, job.subCluster, jf.name); + const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "") // Threshold / -Differences - const fmt = findJobThresholds(job, fmc, subclusterConfig); - if (fm === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); + const fmt = findJobThresholds(job, fmc); + if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); - // Value: Primarily use backend sourced avgs from job.*, secondarily calculate/read from metricdata - // Exclusivity does not matter - let mv = 0.0; - if (fmc.aggregation === "avg") { - if (fm === "cpu_load" && job.loadAvg !== 0) { - mv = round(job.loadAvg, 2); - } else if (fm === "flops_any" && job.flopsAnyAvg !== 0) { - mv = round(job.flopsAnyAvg, 2); - } else if (fm === "mem_bw" && job.memBwAvg !== 0) { - mv = round(job.memBwAvg, 2); - } else { - // Calculate Avg from jobMetrics - const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); - if (jm?.metric?.statisticsSeries) { - const noNan = jm.metric.statisticsSeries.median.filter(function (val) { - return val != null; - }); - mv = round(mean(noNan), 2); - } else if (jm?.metric?.series?.length > 1) { - const avgs = jm.metric.series.map((jms) => jms.statistics.avg); - mv = round(mean(avgs), 2); - } else if (jm?.metric?.series) { - mv = round(jm.metric.series[0].statistics.avg, 2); - } - } - } else if (fmc.aggregation === "sum") { - // Calculate Sum from jobMetrics: Sum all node averages - const jm = jobMetrics.find((jm) => jm.name === fm && jm.scope === "node"); - if (jm?.metric?.series?.length > 1) { // More than 1 node - const avgs = jm.metric.series.map((jms) => jms.statistics.avg); - mv = round(avgs.reduce((a, b) => a + b, 0)); - } else if (jm?.metric?.series) { - mv = round(jm.metric.series[0].statistics.avg, 2); - } - } else { - console.warn( - "Missing or unkown aggregation mode (sum/avg) for metric:", - metricConfig, - ); - } - - // Define basic data + // Define basic data -> Value: Use as Provided const fmBase = { - name: fm, + name: jf.name + ' (' + jf.stat + ')', + avg: jf.value, unit: unit, - avg: mv, max: fmt.peak, + dir: fmc.lowerIsBetter }; - if (evalFootprint(fm, mv, fmt, "alert")) { + if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "alert")) { return { ...fmBase, color: "danger", - message: `Metric average way ${fm === "mem_used" ? "above" : "below"} expected normal thresholds.`, - impact: 3, + message: `Metric average way ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + impact: 3 }; - } else if (evalFootprint(fm, mv, fmt, "caution")) { + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "caution")) { return { ...fmBase, color: "warning", - message: `Metric average ${fm === "mem_used" ? "above" : "below"} expected normal thresholds.`, + message: `Metric average ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, impact: 2, }; - } else if (evalFootprint(fm, mv, fmt, "normal")) { + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "normal")) { return { ...fmBase, color: "success", message: "Metric average within expected thresholds.", impact: 1, }; - } else if (evalFootprint(fm, mv, fmt, "peak")) { + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "peak")) { return { ...fmBase, color: "info", @@ -176,23 +114,23 @@ } }); - function evalFootprint(metric, mean, thresholds, level) { - // mem_used has inverse logic regarding threshold levels, notify levels triggered if mean > threshold + function evalFootprint(mean, thresholds, lowerIsBetter, level) { + // Handle Metrics in which less value is better switch (level) { case "peak": - if (metric === "mem_used") - return false; // mem_used over peak -> return false to trigger impact -1 + if (lowerIsBetter) + return false; // metric over peak -> return false to trigger impact -1 else return mean <= thresholds.peak && mean > thresholds.normal; case "alert": - if (metric === "mem_used") + if (lowerIsBetter) return mean <= thresholds.peak && mean >= thresholds.alert; else return mean <= thresholds.alert && mean >= 0; case "caution": - if (metric === "mem_used") + if (lowerIsBetter) return mean < thresholds.alert && mean >= thresholds.caution; else return mean <= thresholds.caution && mean > thresholds.alert; case "normal": - if (metric === "mem_used") + if (lowerIsBetter) return mean < thresholds.caution && mean >= 0; else return mean <= thresholds.normal && mean > thresholds.caution; default: @@ -201,7 +139,7 @@ } - + {#if view === "job"} @@ -250,9 +188,21 @@ offset={[0, 20]}>{fpd.message}
-
- -
+ + {#if fpd.dir} + + + + {/if} + + + + {#if !fpd.dir} + + + + {/if} + {/each} {#if job?.metaData?.message}
diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index f7c99ff..d0e4fca 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -27,7 +27,7 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobList, matchedJobs = null; - let sorting = { field: "startTime", order: "DESC" }, + let sorting = { field: "startTime", type: "col", order: "DESC" }, isSortingOpen = false, isMetricsSelectionOpen = false; let metrics = filterPresets.cluster diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index 279df13..0e1359f 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -1,5 +1,5 @@ - {metricName} ({(metricConfig?.unit?.prefix - ? metricConfig.unit.prefix - : "") + (metricConfig?.unit?.base ? metricConfig.unit.base : "")}) + {metricName} ({unit}) {#if job.resources.length > 1} @@ -118,8 +87,8 @@ bind:this={plot} {width} height={300} - {cluster} - {subCluster} + cluster={job.cluster} + subCluster={job.subCluster} timestep={data.timestep} scope={selectedScope} metric={metricName} @@ -132,8 +101,8 @@ bind:this={plot} {width} height={300} - {cluster} - {subCluster} + cluster={job.cluster} + subCluster={job.subCluster} timestep={data.timestep} scope={selectedScope} metric={metricName} diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 91fd8e6..76398af 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -27,8 +27,8 @@ export let showFootprint = false; export let view = "job"; - const clusters = getContext("clusters"), - onInit = getContext("on-init"); + const onInit = getContext("on-init") + const globalMetrics = getContext("globalMetrics") let newMetricsOrder = []; let unorderedMetrics = [...metrics]; @@ -36,30 +36,34 @@ onInit(() => { if (allMetrics == null) allMetrics = new Set(); - for (let c of clusters) - for (let metric of c.metricConfig) allMetrics.add(metric.name); + for (let metric of globalMetrics) allMetrics.add(metric.name); }); $: { if (allMetrics != null) { if (cluster == null) { - // console.log('Reset to full metric list') - for (let c of clusters) - for (let metric of c.metricConfig) allMetrics.add(metric.name); + for (let metric of globalMetrics) allMetrics.add(metric.name); } else { - // console.log('Recalculate available metrics for ' + cluster) allMetrics.clear(); - for (let c of clusters) - if (c.name == cluster) - for (let metric of c.metricConfig) allMetrics.add(metric.name); + for (let gm of globalMetrics) { + if (gm.availability.find((av) => av.cluster === cluster)) allMetrics.add(gm.name); + } } - newMetricsOrder = [...allMetrics].filter((m) => !metrics.includes(m)); newMetricsOrder.unshift(...metrics.filter((m) => allMetrics.has(m))); unorderedMetrics = unorderedMetrics.filter((m) => allMetrics.has(m)); } } + function printAvailability(metric, cluster) { + const avail = globalMetrics.find((gm) => gm.name === metric)?.availability + if (cluster == null) { + return avail.map((av) => av.cluster).join(',') + } else { + return avail.find((av) => av.cluster === cluster).subClusters.join(',') + } + } + const client = getContextClient(); const updateConfigurationMutation = ({ name, value }) => { return mutationStore({ @@ -106,7 +110,6 @@ }).subscribe((res) => { if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }); @@ -118,7 +121,6 @@ value: JSON.stringify(showFootprint), }).subscribe((res) => { if (res.fetching === false && res.error) { - console.log("Error on footprint subscription: " + res.error); throw res.error; } }); @@ -161,34 +163,7 @@ {/if} {metric} - {cluster == null - ? clusters // No single cluster specified: List Clusters with Metric - .filter( - (c) => c.metricConfig.find((m) => m.name == metric) != null, - ) - .map((c) => c.name) - .join(", ") - : clusters // Single cluster requested: List Subclusters with do not have metric remove flag - .filter((c) => c.name == cluster) - .filter( - (c) => c.metricConfig.find((m) => m.name == metric) != null, - ) - .map(function (c) { - let scNames = c.subClusters.map((sc) => sc.name); - scNames.forEach(function (scName) { - let met = c.metricConfig.find((m) => m.name == metric); - let msc = met.subClusters.find( - (msc) => msc.name == scName, - ); - if (msc != null) { - if (msc.remove == true) { - scNames = scNames.filter((scn) => scn != msc.name); - } - } - }); - return scNames; - }) - .join(", ")} + {printAvailability(metric, cluster)} {/each} diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index 0a5a75e..98ee3e9 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -29,6 +29,8 @@ from.setMinutes(from.getMinutes() - 30); } + const initialized = getContext("initialized") + const globalMetrics = getContext("globalMetrics") const ccconfig = getContext("cc-config"); const clusters = getContext("clusters"); const client = getContextClient(); @@ -74,15 +76,11 @@ let itemsPerPage = ccconfig.plot_list_jobsPerPage; let page = 1; let paging = { itemsPerPage, page }; - let sorting = { field: "startTime", order: "DESC" }; + let sorting = { field: "startTime", type: "col", order: "DESC" }; $: filter = [ { cluster: { eq: cluster } }, { node: { contains: hostname } }, { state: ["running"] }, - // {startTime: { - // from: from.toISOString(), - // to: to.toISOString() - // }} ]; const nodeJobsQuery = gql` @@ -92,10 +90,6 @@ $paging: PageRequest! ) { jobs(filter: $filter, order: $sorting, page: $paging) { - # items { - # id - # jobId - # } count } } @@ -107,26 +101,16 @@ variables: { paging, sorting, filter }, }); - let metricUnits = {}; - $: if ($nodeMetricsData.data) { - let thisCluster = clusters.find((c) => c.name == cluster); - if (thisCluster) { - for (let metric of thisCluster.metricConfig) { - if (metric.unit.prefix || metric.unit.base) { - metricUnits[metric.name] = - "(" + - (metric.unit.prefix ? metric.unit.prefix : "") + - (metric.unit.base ? metric.unit.base : "") + - ")"; - } else { - // If no unit defined: Omit Unit Display - metricUnits[metric.name] = ""; - } - } + let systemUnits = {}; + function loadUnits(isInitialized) { + if (!isInitialized) return + const systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))] + for (let sm of systemMetrics) { + systemUnits[sm.name] = (sm?.unit?.prefix ? sm.unit.prefix : "") + (sm?.unit?.base ? sm.unit.base : "") } } - const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000); + $: loadUnits($initialized) @@ -195,7 +179,7 @@ >

{item.name} - {metricUnits[item.name]} + {systemUnits[item.name] ? "(" + systemUnits[item.name] + ")" : ""}

{#if item.disabled === false && item.metric} { if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }); } diff --git a/web/frontend/src/StatsTable.svelte b/web/frontend/src/StatsTable.svelte index 3a9d84d..0db6a71 100644 --- a/web/frontend/src/StatsTable.svelte +++ b/web/frontend/src/StatsTable.svelte @@ -74,10 +74,6 @@ return s.dir != "up" ? s1[stat] - s2[stat] : s2[stat] - s1[stat]; }); } - - export function moreLoaded(jobMetric) { - jobMetrics = [...jobMetrics, jobMetric]; - } @@ -85,7 +81,6 @@ diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 48c3711..aadb3a9 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -146,7 +146,7 @@ `, variables: { cluster: cluster, - metrics: ["flops_any", "mem_bw"], + metrics: ["flops_any", "mem_bw"], // Fixed names for roofline and status bars from: from.toISOString(), to: to.toISOString(), filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 4a7f633..95ceecd 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -29,9 +29,10 @@ from.setMinutes(from.getMinutes() - 30); } - const clusters = getContext("clusters"); + const initialized = getContext("initialized"); const ccconfig = getContext("cc-config"); - const metricConfig = getContext("metrics"); + const clusters = getContext("clusters"); + const globalMetrics = getContext("globalMetrics"); let plotHeight = 300; let hostnameFilter = ""; @@ -80,24 +81,18 @@ }, }); - let metricUnits = {}; - $: if ($nodesQuery.data) { - let thisCluster = clusters.find((c) => c.name == cluster); - if (thisCluster) { - for (let metric of thisCluster.metricConfig) { - if (metric.unit.prefix || metric.unit.base) { - metricUnits[metric.name] = - "(" + - (metric.unit.prefix ? metric.unit.prefix : "") + - (metric.unit.base ? metric.unit.base : "") + - ")"; - } else { - // If no unit defined: Omit Unit Display - metricUnits[metric.name] = ""; - } - } + let systemMetrics = []; + let systemUnits = {}; + function loadMetrics(isInitialized) { + if (!isInitialized) return + systemMetrics = [...globalMetrics.filter((gm) => gm?.availability.find((av) => av.cluster == cluster))] + for (let sm of systemMetrics) { + systemUnits[sm.name] = (sm?.unit?.prefix ? sm.unit.prefix : "") + (sm?.unit?.base ? sm.unit.base : "") } } + + $: loadMetrics($initialized) + @@ -123,9 +118,9 @@ Metric diff --git a/web/frontend/src/TagManagement.svelte b/web/frontend/src/TagManagement.svelte index e9fb9e9..afec176 100644 --- a/web/frontend/src/TagManagement.svelte +++ b/web/frontend/src/TagManagement.svelte @@ -107,7 +107,6 @@ addTagToJob(res.data.createTag); } else if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }); } @@ -120,7 +119,6 @@ pendingChange = false; } else if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }); } @@ -134,7 +132,6 @@ pendingChange = false; } else if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }, ); diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 41969d9..6526e6f 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -32,7 +32,7 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobList; let jobFilters = []; - let sorting = { field: "startTime", order: "DESC" }, + let sorting = { field: "startTime", type: "col", order: "DESC" }, isSortingOpen = false; let metrics = ccconfig.plot_list_selectedMetrics, isMetricsSelectionOpen = false; @@ -70,6 +70,7 @@ histMetrics { metric unit + stat data { min max @@ -245,7 +246,7 @@ usesBins={true} {width} height={250} - title="Distribution of '{item.metric}' averages" + title="Distribution of '{item.metric} ({item.stat})' footprints" xlabel={`${item.metric} bin maximum ${item?.unit ? `[${item.unit}]` : ``}`} xunit={item.unit} ylabel="Number of Jobs" diff --git a/web/frontend/src/Zoom.svelte b/web/frontend/src/Zoom.svelte deleted file mode 100644 index c5f73c1..0000000 --- a/web/frontend/src/Zoom.svelte +++ /dev/null @@ -1,65 +0,0 @@ - - -
- - - - - - Window Size: - - - ({windowSize}%) - - - - Window Position: - - - -
diff --git a/web/frontend/src/config/UserSettings.svelte b/web/frontend/src/config/UserSettings.svelte index cd1d9a3..7eaa04e 100644 --- a/web/frontend/src/config/UserSettings.svelte +++ b/web/frontend/src/config/UserSettings.svelte @@ -23,7 +23,6 @@ popMessage(text, target, "#048109"); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { diff --git a/web/frontend/src/config/admin/AddUser.svelte b/web/frontend/src/config/admin/AddUser.svelte index 84aacc3..154bb3e 100644 --- a/web/frontend/src/config/admin/AddUser.svelte +++ b/web/frontend/src/config/admin/AddUser.svelte @@ -23,7 +23,6 @@ form.reset(); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { diff --git a/web/frontend/src/config/admin/EditProject.svelte b/web/frontend/src/config/admin/EditProject.svelte index e1c518f..e7d6379 100644 --- a/web/frontend/src/config/admin/EditProject.svelte +++ b/web/frontend/src/config/admin/EditProject.svelte @@ -32,7 +32,6 @@ reloadUserList(); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { @@ -64,7 +63,6 @@ reloadUserList(); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { diff --git a/web/frontend/src/config/admin/EditRole.svelte b/web/frontend/src/config/admin/EditRole.svelte index 6b24e3e..a26c48b 100644 --- a/web/frontend/src/config/admin/EditRole.svelte +++ b/web/frontend/src/config/admin/EditRole.svelte @@ -34,7 +34,6 @@ reloadUserList(); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { @@ -66,7 +65,6 @@ reloadUserList(); } else { let text = await res.text(); - // console.log(res.statusText) throw new Error("Response Code " + res.status + "-> " + text); } } catch (err) { diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 7253ff7..ef92c31 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -136,8 +136,8 @@ if (filters.project) items.push({ project: { [filters.projectMatch]: filters.project } }); if (filters.jobName) items.push({ jobName: { contains: filters.jobName } }); - for (let stat of filters.stats) - items.push({ [stat.field]: { from: stat.from, to: stat.to } }); + if (filters.stats.length != 0) + items.push({ metricStats: filters.stats.map((st) => { return { metricName: st.field, range: { from: st.from, to: st.to }} }) }); dispatch("update", { filters: items }); changeURL(); @@ -412,7 +412,6 @@ /> update()} diff --git a/web/frontend/src/filters/Resources.svelte b/web/frontend/src/filters/Resources.svelte index 01f1c57..19af205 100644 --- a/web/frontend/src/filters/Resources.svelte +++ b/web/frontend/src/filters/Resources.svelte @@ -59,7 +59,6 @@ 0, ); - // console.log(header) let minNumNodes = 1, maxNumNodes = 0, minNumHWThreads = 1, diff --git a/web/frontend/src/filters/Stats.svelte b/web/frontend/src/filters/Stats.svelte index ee80a4b..b19793f 100644 --- a/web/frontend/src/filters/Stats.svelte +++ b/web/frontend/src/filters/Stats.svelte @@ -1,5 +1,6 @@ (isOpen = !isOpen)}> @@ -126,8 +73,7 @@ color="danger" on:click={() => { isOpen = false; - resetRange($initialized, cluster); - statistics.forEach((stat) => (stat.enabled = false)); + resetRanges(); stats = []; dispatch("update", { stats }); }}>Reset gm.name === m)?.unit + return (rawUnit?.prefix ? rawUnit.prefix : "") + (rawUnit?.base ? rawUnit.base : "") + } + const client = getContextClient(); const query = gql` query ( @@ -75,7 +80,11 @@ name } metaData - footprint + footprint { + name + stat + value + } } count hasNextPage @@ -141,7 +150,6 @@ paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList } else if (res.fetching === false && res.error) { throw res.error; - // console.log('Error on subscription: ' + res.error) } }); } @@ -215,22 +223,7 @@ > {metric} {#if $initialized} - ({clusters - .map((cluster) => - cluster.metricConfig.find((m) => m.name == metric), - ) - .filter((m) => m != null) - .map( - (m) => - (m.unit?.prefix ? m.unit?.prefix : "") + - (m.unit?.base ? m.unit?.base : ""), - ) // Build unitStr - .reduce( - (arr, unitStr) => - arr.includes(unitStr) ? arr : [...arr, unitStr], - [], - ) // w/o this, output would be [unitStr, unitStr] - .join(", ")}) + ({getUnit(metric)}) {/if} {/each} diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index dd92ec4..5d4b2c7 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -30,16 +30,11 @@ : ["core"] : ["node"]; - function distinct(value, index, array) { - return array.indexOf(value) === index; - } - const cluster = getContext("clusters").find((c) => c.name == job.cluster); - const metricConfig = getContext("metrics"); // Get all MetricConfs which include subCluster-specific settings for this job const client = getContextClient(); const query = gql` - query ($id: ID!, $queryMetrics: [String!]!, $scopes: [MetricScope!]!) { - jobMetrics(id: $id, metrics: $queryMetrics, scopes: $scopes) { + query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { + jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { name scope metric { @@ -71,34 +66,14 @@ $: metricsQuery = queryStore({ client: client, query: query, - variables: { id, queryMetrics, scopes }, + variables: { id, metrics, scopes }, }); - - let queryMetrics = null; - $: if (showFootprint) { - queryMetrics = [ - "cpu_load", - "flops_any", - "mem_used", - "mem_bw", - "acc_utilization", - ...metrics, - ].filter(distinct); - scopes = ["node"]; - } else { - queryMetrics = [...metrics]; - scopes = job.numNodes == 1 - ? job.numAcc >= 1 - ? ["core", "accelerator"] - : ["core"] - : ["node"]; - } - + export function refresh() { metricsQuery = queryStore({ client: client, query: query, - variables: { id, queryMetrics, scopes }, + variables: { id, metrics, scopes }, // requestPolicy: 'network-only' // use default cache-first for refresh }); } @@ -166,8 +141,8 @@
diff --git a/web/frontend/src/joblist/SortSelection.svelte b/web/frontend/src/joblist/SortSelection.svelte index 2cc8615..ba6f9b8 100644 --- a/web/frontend/src/joblist/SortSelection.svelte +++ b/web/frontend/src/joblist/SortSelection.svelte @@ -17,24 +17,39 @@ ModalHeader, ModalFooter, } from "@sveltestrap/sveltestrap"; + import { getContext } from "svelte"; + import { getSortItems } from "../utils.js"; export let isOpen = false; - export let sorting = { field: "startTime", order: "DESC" }; + export let sorting = { field: "startTime", type: "col", order: "DESC" }; - let sortableColumns = [ - { field: "startTime", text: "Start Time", order: "DESC" }, - { field: "duration", text: "Duration", order: "DESC" }, - { field: "numNodes", text: "Number of Nodes", order: "DESC" }, - { field: "memUsedMax", text: "Max. Memory Used", order: "DESC" }, - { field: "flopsAnyAvg", text: "Avg. FLOPs", order: "DESC" }, - { field: "memBwAvg", text: "Avg. Memory Bandwidth", order: "DESC" }, - { field: "netBwAvg", text: "Avg. Network Bandwidth", order: "DESC" }, - ]; + let sortableColumns = []; + let activeColumnIdx; - let activeColumnIdx = sortableColumns.findIndex( - (col) => col.field == sorting.field, - ); - sortableColumns[activeColumnIdx].order = sorting.order; + const initialized = getContext("initialized"); + + function loadSortables(isInitialized) { + if (!isInitialized) return; + sortableColumns = [ + { field: "startTime", type: "col", text: "Start Time", order: "DESC" }, + { field: "duration", type: "col", text: "Duration", order: "DESC" }, + { field: "numNodes", type: "col", text: "Number of Nodes", order: "DESC" }, + { field: "numHwthreads", type: "col", text: "Number of HWThreads", order: "DESC" }, + { field: "numAcc", type: "col", text: "Number of Accelerators", order: "DESC" }, + ...getSortItems() + ] + } + + function loadActiveIndex(isInitialized) { + if (!isInitialized) return; + activeColumnIdx = sortableColumns.findIndex( + (col) => col.field == sorting.field, + ); + sortableColumns[activeColumnIdx].order = sorting.order; + } + + $: loadSortables($initialized); + $: loadActiveIndex($initialized) - export function formatTime(t, forNode = false) { + function formatTime(t, forNode = false) { if (t !== null) { if (isNaN(t)) { return t; @@ -15,7 +15,7 @@ } } - export function timeIncrs(timestep, maxX, forNode) { + function timeIncrs(timestep, maxX, forNode) { if (forNode === true) { return [60, 300, 900, 1800, 3600, 7200, 14400, 21600]; // forNode fixed increments } else { @@ -27,93 +27,63 @@ } } - export function findThresholds( + // removed arg "subcluster": input metricconfig and topology now directly derived from subcluster + function findThresholds( + subClusterTopology, metricConfig, scope, - subCluster, isShared, numhwthreads, numaccs ) { - // console.log('NAME ' + metricConfig.name + ' / SCOPE ' + scope + ' / SUBCLUSTER ' + subCluster.name) - if (!metricConfig || !scope || !subCluster) { + + if (!subClusterTopology || !metricConfig || !scope) { console.warn("Argument missing for findThresholds!"); return null; } if ( (scope == "node" && isShared == false) || - metricConfig.aggregation == "avg" + metricConfig?.aggregation == "avg" ) { - if (metricConfig.subClusters && metricConfig.subClusters.length === 0) { - // console.log('subClusterConfigs array empty, use metricConfig defaults') return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert, peak: metricConfig.peak, }; - } else if ( - metricConfig.subClusters && - metricConfig.subClusters.length > 0 - ) { - // console.log('subClusterConfigs found, use subCluster Settings if matching jobs subcluster:') - let forSubCluster = metricConfig.subClusters.find( - (sc) => sc.name == subCluster.name, - ); - if ( - forSubCluster && - forSubCluster.normal && - forSubCluster.caution && - forSubCluster.alert && - forSubCluster.peak - ) - return forSubCluster; - else - return { - normal: metricConfig.normal, - caution: metricConfig.caution, - alert: metricConfig.alert, - peak: metricConfig.peak, - }; - } else { - console.warn("metricConfig.subClusters not found!"); + } + + + if (metricConfig?.aggregation == "sum") { + let divisor = 1 + if (isShared == true) { // Shared + if (numaccs > 0) divisor = subClusterTopology.accelerators.length / numaccs; + else if (numhwthreads > 0) divisor = subClusterTopology.node.length / numhwthreads; + } + else if (scope == 'socket') divisor = subClusterTopology.socket.length; + else if (scope == "core") divisor = subClusterTopology.core.length; + else if (scope == "accelerator") + divisor = subClusterTopology.accelerators.length; + else if (scope == "hwthread") divisor = subClusterTopology.node.length; + else { + // console.log('TODO: how to calc thresholds for ', scope) return null; } + + return { + peak: metricConfig.peak / divisor, + normal: metricConfig.normal / divisor, + caution: metricConfig.caution / divisor, + alert: metricConfig.alert / divisor, + }; } - if (metricConfig.aggregation != "sum") { - console.warn( - "Missing or unkown aggregation mode (sum/avg) for metric:", - metricConfig, - ); - return null; - } - - let divisor = 1 - if (isShared == true) { // Shared - if (numaccs > 0) divisor = subCluster.topology.accelerators.length / numaccs; - else if (numhwthreads > 0) divisor = subCluster.topology.node.length / numhwthreads; - } - else if (scope == 'socket') divisor = subCluster.topology.socket.length; - else if (scope == "core") divisor = subCluster.topology.core.length; - else if (scope == "accelerator") - divisor = subCluster.topology.accelerators.length; - else if (scope == "hwthread") divisor = subCluster.topology.node.length; - else { - // console.log('TODO: how to calc thresholds for ', scope) - return null; - } - - let mc = - metricConfig?.subClusters?.find((sc) => sc.name == subCluster.name) || - metricConfig; - return { - peak: mc.peak / divisor, - normal: mc.normal / divisor, - caution: mc.caution / divisor, - alert: mc.alert / divisor, - }; + console.warn( + "Missing or unkown aggregation mode (sum/avg) for metric:", + metricConfig, + ); + return null; } @@ -165,7 +135,8 @@ if (useStatsSeries == false && series == null) useStatsSeries = true; - const metricConfig = getContext("metrics")(cluster, metric); + const subClusterTopology = getContext("getHardwareTopology")(cluster, subCluster); + const metricConfig = getContext("getMetricConfig")(cluster, subCluster, metric); const clusterCockpitConfig = getContext("cc-config"); const resizeSleepTime = 250; const normalLineColor = "#000000"; @@ -178,11 +149,9 @@ alert: "rgba(255, 0, 0, 0.3)", }; const thresholds = findThresholds( + subClusterTopology, metricConfig, scope, - typeof subCluster == "string" - ? cluster.subClusters.find((sc) => sc.name == subCluster) - : subCluster, isShared, numhwthreads, numaccs @@ -479,8 +448,6 @@ cursor: { drag: { x: true, y: true } }, }; - // console.log(opts) - let plotWrapper = null; let uplot = null; let timeoutId = null; diff --git a/web/frontend/src/plots/Polar.svelte b/web/frontend/src/plots/Polar.svelte index 59f89f3..ae0e249 100644 --- a/web/frontend/src/plots/Polar.svelte +++ b/web/frontend/src/plots/Polar.svelte @@ -24,10 +24,11 @@ export let metrics export let cluster + export let subCluster export let jobMetrics export let height = 365 - const metricConfig = getContext('metrics') + const getMetricConfig = getContext("getMetricConfig") const labels = metrics.filter(name => { if (!jobMetrics.find(m => m.name == name && m.scope == "node")) { @@ -38,7 +39,7 @@ }) const getValuesForStat = (getStat) => labels.map(name => { - const peak = metricConfig(cluster, name).peak + const peak = getMetricConfig(cluster, subCluster, name).peak const metric = jobMetrics.find(m => m.name == name && m.scope == "node") const value = getStat(metric.metric) / peak return value <= 1. ? value : 1. diff --git a/web/frontend/src/plots/Roofline.svelte b/web/frontend/src/plots/Roofline.svelte index 11d1d25..a05eec3 100644 --- a/web/frontend/src/plots/Roofline.svelte +++ b/web/frontend/src/plots/Roofline.svelte @@ -209,7 +209,6 @@ draw: [ (u) => { // draw roofs when cluster set - // console.log(u) if (cluster != null) { const padding = u._padding; // [top, right, bottom, left] @@ -237,9 +236,6 @@ true, ); - // Debug get zoomLevel from browser - // console.log("Zoom", Math.round(window.devicePixelRatio * 100)) - if ( scalarKneeX < width * window.devicePixelRatio - @@ -323,7 +319,7 @@ }; uplot = new uPlot(opts, plotData, plotWrapper); } else { - console.log("No data for roofline!"); + // console.log("No data for roofline!"); } } diff --git a/web/frontend/src/units.js b/web/frontend/src/units.js index 9a4defd..4c1fea4 100644 --- a/web/frontend/src/units.js +++ b/web/frontend/src/units.js @@ -31,3 +31,4 @@ export function scaleNumbers(x, y , p = '') { return Math.abs(rawYValue) >= 1000 ? `${rawXValue.toExponential()} / ${rawYValue.toExponential()}` : `${rawYValue.toString()} / ${rawYValue.toString()}` } +// export const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000); diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 3ab86da..7510ace 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -6,7 +6,6 @@ import { } from "@urql/svelte"; import { setContext, getContext, hasContext, onDestroy, tick } from "svelte"; import { readable } from "svelte/store"; -// import { formatNumber } from './units.js' /* * Call this function only at component initialization time! @@ -16,7 +15,9 @@ import { readable } from "svelte/store"; * - Creates a readable store 'initialization' which indicates when the values below can be used. * - Adds 'tags' to the context (list of all tags) * - Adds 'clusters' to the context (object with cluster names as keys) - * - Adds 'metrics' to the context, a function that takes a cluster and metric name and returns the MetricConfig (or undefined) + * - Adds 'globalMetrics' to the context (list of globally available metric infos) + * - Adds 'getMetricConfig' to the context, a function that takes a cluster, subCluster and metric name and returns the MetricConfig (or undefined) + * - Adds 'getHardwareTopology' to the context, a function that takes a cluster nad subCluster and returns the subCluster topology (or undefined) */ export function init(extraInitQuery = "") { const jwt = hasContext("jwt") @@ -71,11 +72,19 @@ export function init(extraInitQuery = "") { normal caution alert + lowerIsBetter } footprint } } tags { id, name, type } + globalMetrics { + name + scope + footprint + unit { base, prefix } + availability { cluster, subClusters } + } ${extraInitQuery} }` ) @@ -91,12 +100,13 @@ export function init(extraInitQuery = "") { }; }; - const tags = [], - clusters = []; - const allMetrics = []; + const tags = [] + const clusters = [] + const globalMetrics = [] + setContext("tags", tags); setContext("clusters", clusters); - setContext("allmetrics", allMetrics); + setContext("globalMetrics", globalMetrics); setContext("getMetricConfig", (cluster, subCluster, metric) => { if (typeof cluster !== "object") cluster = clusters.find((c) => c.name == cluster); @@ -106,6 +116,15 @@ export function init(extraInitQuery = "") { return subCluster.metricConfig.find((m) => m.name == metric); }); + setContext("getHardwareTopology", (cluster, subCluster) => { + if (typeof cluster !== "object") + cluster = clusters.find((c) => c.name == cluster); + + if (typeof subCluster !== "object") + subCluster = cluster.subClusters.find((sc) => sc.name == subCluster); + + return subCluster?.topology; + }); setContext("on-init", (callback) => state.fetching ? subscribers.push(callback) : callback(state) ); @@ -124,32 +143,11 @@ export function init(extraInitQuery = "") { } for (let tag of data.tags) tags.push(tag); + for (let cluster of data.clusters) clusters.push(cluster); + for (let gm of data.globalMetrics) globalMetrics.push(gm); - let globalmetrics = []; - for (let cluster of data.clusters) { - // Add full info to context object - clusters.push(cluster); - // Build global metric list with availability for joblist metricselect - for (let subcluster of cluster.subClusters) { - for (let scm of subcluster.metricConfig) { - let match = globalmetrics.find((gm) => gm.name == scm.name); - if (match) { - let submatch = match.availability.find((av) => av.cluster == cluster.name); - if (submatch) { - submatch.subclusters.push(subcluster.name) - } else { - match.availability.push({cluster: cluster.name, subclusters: [subcluster.name]}) - } - } else { - globalmetrics.push({name: scm.name, availability: [{cluster: cluster.name, subclusters: [subcluster.name]}]}); - } - } - } - } - // Add to ctx object - for (let gm of globalmetrics) allMetrics.push(gm); - - console.log('All Metrics List', allMetrics); + // Unified Sort + globalMetrics.sort((a, b) => a.name.localeCompare(b.name)) state.data = data; tick().then(() => subscribers.forEach((cb) => cb(state))); @@ -159,6 +157,7 @@ export function init(extraInitQuery = "") { query: { subscribe }, tags, clusters, + globalMetrics }; } @@ -171,6 +170,11 @@ function fuzzyMatch(term, string) { return string.toLowerCase().includes(term); } +// Use in filter() function to return only unique values +export function distinct(value, index, array) { + return array.indexOf(value) === index; +} + export function fuzzySearchTags(term, tags) { if (!tags) return []; @@ -260,56 +264,6 @@ export function minScope(scopes) { return sm; } -export async function fetchMetrics(job, metrics, scopes) { - if (job.monitoringStatus == 0) return null; - - let query = []; - if (metrics != null) { - for (let metric of metrics) { - query.push(`metric=${metric}`); - } - } - if (scopes != null) { - for (let scope of scopes) { - query.push(`scope=${scope}`); - } - } - - try { - let res = await fetch( - `/frontend/jobs/metrics/${job.id}${query.length > 0 ? "?" : ""}${query.join( - "&" - )}` - ); - if (res.status != 200) { - return { error: { status: res.status, message: await res.text() } }; - } - - return await res.json(); - } catch (e) { - return { error: e }; - } -} - -export function fetchMetricsStore() { - let set = null; - let prev = { fetching: true, error: null, data: null }; - return [ - readable(prev, (_set) => { - set = _set; - }), - (job, metrics, scopes) => - fetchMetrics(job, metrics, scopes).then((res) => { - let next = { fetching: false, error: res.error, data: res.data }; - if (prev.data && next.data) - next.data.jobMetrics.push(...prev.data.jobMetrics); - - prev = next; - set(next); - }), - ]; -} - export function stickyHeader(datatableHeaderSelector, updatePading) { const header = document.querySelector("header > nav.navbar"); if (!header) return; @@ -336,22 +290,98 @@ export function stickyHeader(datatableHeaderSelector, updatePading) { onDestroy(() => document.removeEventListener("scroll", onscroll)); } -// Outdated: Frontend Will Now Receive final MetricList from backend export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubcluster - const mc = getContext("metrics"); - const thisConfig = mc(c, m); - let thisSCIndex = -1; - if (thisConfig) { - thisSCIndex = thisConfig.subClusters.findIndex( - (subcluster) => subcluster.name == s - ); - }; - if (thisSCIndex >= 0) { - if (thisConfig.subClusters[thisSCIndex].remove == true) { - return true; + const metrics = getContext("globalMetrics"); + const result = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) + return !result +} + +export function getStatsItems() { + // console.time('stats') + // console.log('getStatsItems ...') + const globalMetrics = getContext("globalMetrics") + const result = globalMetrics.map((gm) => { + if (gm?.footprint) { + // Footprint contains suffix naming the used stat-type + // console.time('deep') + // console.log('Deep Config for', gm.name) + const mc = getMetricConfigDeep(gm.name, null, null) + // console.timeEnd('deep') + return { + field: gm.name + '_' + gm.footprint, + text: gm.name + ' (' + gm.footprint + ')', + metric: gm.name, + from: 0, + to: mc.peak, + peak: mc.peak, + enabled: false + } } + return null + }).filter((r) => r != null) + // console.timeEnd('stats') + return [...result]; +}; + +export function getSortItems() { + //console.time('sort') + //console.log('getSortItems ...') + const globalMetrics = getContext("globalMetrics") + const result = globalMetrics.map((gm) => { + if (gm?.footprint) { + // Footprint contains suffix naming the used stat-type + return { + field: gm.name + '_' + gm.footprint, + type: 'foot', + text: gm.name + ' (' + gm.footprint + ')', + order: 'DESC' + } + } + return null + }).filter((r) => r != null) + //console.timeEnd('sort') + return [...result]; +}; + +function getMetricConfigDeep(metric, cluster, subCluster) { + const clusters = getContext("clusters"); + if (cluster != null) { + let c = clusters.find((c) => c.name == cluster); + if (subCluster != null) { + let sc = c.subClusters.find((sc) => sc.name == subCluster); + return sc.metricConfig.find((mc) => mc.name == metric) + } else { + let result; + for (let sc of c.subClusters) { + const mc = sc.metricConfig.find((mc) => mc.name == metric) + if (result) { // If lowerIsBetter: Peak is still maximum value, no special case required + result.alert = (mc.alert > result.alert) ? mc.alert : result.alert + result.caution = (mc.caution > result.caution) ? mc.caution : result.caution + result.normal = (mc.normal > result.normal) ? mc.normal : result.normal + result.peak = (mc.peak > result.peak) ? mc.peak : result.peak + } else { + if (mc) result = {...mc}; + } + } + return result + } + } else { + let result; + for (let c of clusters) { + for (let sc of c.subClusters) { + const mc = sc.metricConfig.find((mc) => mc.name == metric) + if (result) { // If lowerIsBetter: Peak is still maximum value, no special case required + result.alert = (mc.alert > result.alert) ? mc.alert : result.alert + result.caution = (mc.caution > result.caution) ? mc.caution : result.caution + result.normal = (mc.normal > result.normal) ? mc.normal : result.normal + result.peak = (mc.peak > result.peak) ? mc.peak : result.peak + } else { + if (mc) result = {...mc}; + } + } + } + return result } - return false; } export function convert2uplot(canvasData) { @@ -413,14 +443,14 @@ export function binsFromFootprint(weights, scope, values, numBins) { } export function transformDataForRoofline(flopsAny, memBw) { // Uses Metric Objects: {series:[{},{},...], timestep:60, name:$NAME} - const nodes = flopsAny.series.length - const timesteps = flopsAny.series[0].data.length - /* c will contain values from 0 to 1 representing the time */ let data = null const x = [], y = [], c = [] if (flopsAny && memBw) { + const nodes = flopsAny.series.length + const timesteps = flopsAny.series[0].data.length + for (let i = 0; i < nodes; i++) { const flopsData = flopsAny.series[i].data const memBwData = memBw.series[i].data @@ -446,7 +476,7 @@ export function transformDataForRoofline(flopsAny, memBw) { // Uses Metric Objec // Return something to be plotted. The argument shall be the result of the // `nodeMetrics` GraphQL query. -// Remove "hardcoded" here or deemed necessary? +// Hardcoded metric names required for correct render export function transformPerNodeDataForRoofline(nodes) { let data = null const x = [], y = [] diff --git a/web/templates/monitoring/job.tmpl b/web/templates/monitoring/job.tmpl index 1e3b09c..9b344f9 100644 --- a/web/templates/monitoring/job.tmpl +++ b/web/templates/monitoring/job.tmpl @@ -9,8 +9,6 @@ @@ -312,7 +319,7 @@ {filterPresets} disableClusterSelection={true} startTimeQuickSelect={true} - on:update={({ detail }) => { + on:update-filters={({ detail }) => { jobFilters = detail.filters; }} /> @@ -445,7 +452,7 @@ width={colWidth2} height={300} tiles={$rooflineQuery.data.rooflineHeatmap} - cluster={cluster.subClusters.length == 1 + subCluster={cluster.subClusters.length == 1 ? cluster.subClusters[0] : null} maxY={rooflineMaxY} diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index fde9342..0852464 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -1,3 +1,12 @@ + + - {#if view === "job"} + {#if displayTitle} Core Metrics Footprint diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index d0e4fca..f21a228 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -1,4 +1,13 @@ - @@ -77,11 +86,11 @@ { + on:update-filters={({ detail }) => { selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null; - jobList.update(detail.filters); + jobList.queryJobs(detail.filters); }} /> @@ -91,11 +100,14 @@ {presetProject} bind:authlevel bind:roles - on:update={({ detail }) => filterComponent.update(detail)} + on:set-filter={({ detail }) => filterComponent.updateFilters(detail)} /> - jobList.refresh()} /> + { + jobList.refreshJobs() + jobList.refreshAllMetrics() + }} />
@@ -119,5 +131,5 @@ bind:metrics bind:isOpen={isMetricsSelectionOpen} bind:showFootprint - view="list" + footprintSelect={true} /> diff --git a/web/frontend/src/List.root.svelte b/web/frontend/src/List.root.svelte index bc1ac6f..a7fe237 100644 --- a/web/frontend/src/List.root.svelte +++ b/web/frontend/src/List.root.svelte @@ -1,9 +1,14 @@ + @@ -113,7 +117,7 @@ {filterPresets} startTimeQuickSelect={true} menuText="Only {type.toLowerCase()}s with jobs that match the filters will show up" - on:update={({ detail }) => { + on:update-filters={({ detail }) => { jobFilters = detail.filters; }} /> diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index 0e1359f..08badba 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -1,3 +1,17 @@ + + diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 76398af..ac2deed 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -1,10 +1,14 @@ {#each links as item} diff --git a/web/frontend/src/NavbarTools.svelte b/web/frontend/src/NavbarTools.svelte index f44b4e9..fa9cac9 100644 --- a/web/frontend/src/NavbarTools.svelte +++ b/web/frontend/src/NavbarTools.svelte @@ -1,3 +1,13 @@ + +
filterComponent.update(detail)} + on:set-filter={({ detail }) => filterComponent.updateFilters(detail)} /> - jobList.refresh()} /> + { + jobList.refreshJobs() + jobList.refreshAllMetrics() + }} />
@@ -273,7 +283,7 @@ bind:metrics bind:isOpen={isMetricsSelectionOpen} bind:showFootprint - view="list" + footprintSelect={true} /> + - handleSettingSubmit(e)}/> - handleSettingSubmit(e)}/> - handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> + handleSettingSubmit(e)}/> diff --git a/web/frontend/src/config/admin/AddUser.svelte b/web/frontend/src/config/admin/AddUser.svelte index 154bb3e..6c20d7a 100644 --- a/web/frontend/src/config/admin/AddUser.svelte +++ b/web/frontend/src/config/admin/AddUser.svelte @@ -1,4 +1,14 @@ -
@@ -143,7 +150,7 @@ {job} width={plotWidth} height="{plotHeight}px" - view="list" + displayTitle={false} /> {/if} diff --git a/web/frontend/src/joblist/SortSelection.svelte b/web/frontend/src/joblist/SortSelection.svelte index ba6f9b8..a77f03c 100644 --- a/web/frontend/src/joblist/SortSelection.svelte +++ b/web/frontend/src/joblist/SortSelection.svelte @@ -1,5 +1,5 @@ - {#if series[0].data.length > 0} diff --git a/web/frontend/src/plots/Pie.svelte b/web/frontend/src/plots/Pie.svelte index 11dc2c9..89c333c 100644 --- a/web/frontend/src/plots/Pie.svelte +++ b/web/frontend/src/plots/Pie.svelte @@ -1,3 +1,17 @@ + + + +
+ +
\ No newline at end of file diff --git a/web/frontend/src/plots/Scatter.svelte b/web/frontend/src/plots/Scatter.svelte index 911d27d..1b260a6 100644 --- a/web/frontend/src/plots/Scatter.svelte +++ b/web/frontend/src/plots/Scatter.svelte @@ -1,6 +1,16 @@ -
- -
+ + +
+ +
diff --git a/web/frontend/src/plots/Scatteruplot.svelte b/web/frontend/src/plots/Scatteruplot.svelte new file mode 100644 index 0000000..0fac0b7 --- /dev/null +++ b/web/frontend/src/plots/Scatteruplot.svelte @@ -0,0 +1,627 @@ + + +{#if data != null} +
+{:else} + Cannot render scatter: No data! +{/if} \ No newline at end of file diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index 7510ace..bb63a4f 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -108,18 +108,18 @@ export function init(extraInitQuery = "") { setContext("clusters", clusters); setContext("globalMetrics", globalMetrics); setContext("getMetricConfig", (cluster, subCluster, metric) => { + // Load objects if input is string if (typeof cluster !== "object") cluster = clusters.find((c) => c.name == cluster); - if (typeof subCluster !== "object") subCluster = cluster.subClusters.find((sc) => sc.name == subCluster); return subCluster.metricConfig.find((m) => m.name == metric); }); setContext("getHardwareTopology", (cluster, subCluster) => { + // Load objects if input is string if (typeof cluster !== "object") cluster = clusters.find((c) => c.name == cluster); - if (typeof subCluster !== "object") subCluster = cluster.subClusters.find((sc) => sc.name == subCluster); @@ -175,6 +175,17 @@ export function distinct(value, index, array) { return array.indexOf(value) === index; } +// Load Local Bool and Handle Scrambling of input string +export const scrambleNames = window.localStorage.getItem("cc-scramble-names"); +export const scramble = function (str) { + if (str === "-") return str; + else + return [...str] + .reduce((x, c, i) => x * 7 + c.charCodeAt(0) * i * 21, 5) + .toString(32) + .substr(0, 6); +}; + export function fuzzySearchTags(term, tags) { if (!tags) return []; From 18369da5bc9726a5717f09b7b230c5ef2b366160 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 26 Jul 2024 10:46:13 +0200 Subject: [PATCH 068/443] Fix small oversight. remove wip plot component --- .../src/filters/DoubleRangeSlider.svelte | 2 +- web/frontend/src/plots/Scatteruplot.svelte | 627 ------------------ 2 files changed, 1 insertion(+), 628 deletions(-) delete mode 100644 web/frontend/src/plots/Scatteruplot.svelte diff --git a/web/frontend/src/filters/DoubleRangeSlider.svelte b/web/frontend/src/filters/DoubleRangeSlider.svelte index 689f529..b9c1d0b 100644 --- a/web/frontend/src/filters/DoubleRangeSlider.svelte +++ b/web/frontend/src/filters/DoubleRangeSlider.svelte @@ -4,7 +4,7 @@ Originally created by Michael Keller (https://github.com/mhkeller/svelte-double- Changes: remove dependency, text inputs, configurable value ranges, on:change event --> diff --git a/web/templates/monitoring/taglist.tmpl b/web/templates/monitoring/taglist.tmpl index 6e487dd..ea29cd7 100644 --- a/web/templates/monitoring/taglist.tmpl +++ b/web/templates/monitoring/taglist.tmpl @@ -7,8 +7,16 @@ {{ $tagType }}
{{ range $tagList }} - - {{ .name }} {{ .count }} + {{if eq .scope "global"}} + + {{ .name }} {{ .count }} + {{else if eq .scope "admin"}} + + {{ .name }} {{ .count }} + {{else}} + + {{ .name }} {{ .count }} + {{end}} {{end}} {{end}} From ff3502c87a1af7641a103e06e60278a93c0b86fc Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 2 Aug 2024 16:11:47 +0200 Subject: [PATCH 072/443] fix: fix tag filter results - displayed multiple identical entries before - job count was incorrect before --- internal/repository/jobQuery.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 7b575ef..b0a846c 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -89,7 +89,7 @@ func (r *JobRepository) CountJobs( ctx context.Context, filters []*model.JobFilter, ) (int, error) { - query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job")) + query, qerr := SecurityCheck(ctx, sq.Select("count(DISTINCT job.id)").From("job")) if qerr != nil { return 0, qerr } @@ -136,7 +136,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde // Build a sq.SelectBuilder out of a schema.JobFilter. func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { - query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}) + query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}).Distinct() } if filter.JobID != nil { query = buildStringCondition("job.job_id", filter.JobID, query) From e02575aad7d88ca5396e28df2c0276fb03201e68 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 2 Aug 2024 16:42:55 +0200 Subject: [PATCH 073/443] adds comments --- internal/repository/jobQuery.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index b0a846c..843d797 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -89,6 +89,7 @@ func (r *JobRepository) CountJobs( ctx context.Context, filters []*model.JobFilter, ) (int, error) { + // DISTICT count for tags filters, does not affect other queries query, qerr := SecurityCheck(ctx, sq.Select("count(DISTINCT job.id)").From("job")) if qerr != nil { return 0, qerr @@ -136,6 +137,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde // Build a sq.SelectBuilder out of a schema.JobFilter. func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { + // This is an OR-Logic query: Returns all distinct jobs with at least one of the requested tags; TODO: AND-Logic query? query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}).Distinct() } if filter.JobID != nil { From 2551921ed63280cfdbf5ec9fe96cdc76f10e7667 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 2 Aug 2024 18:14:24 +0200 Subject: [PATCH 074/443] fix: wrong display of tag after filter select - exitent pills were non-updated on change of key --- web/frontend/src/generic/Filters.svelte | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index a1839c7..d01c16b 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -322,7 +322,9 @@ {#if filters.tags.length != 0} (isTagsOpen = true)}> {#each filters.tags as tagId} - + {#key tagId} + + {/key} {/each} {/if} From e6ebec8c1e9a1c56a54bfb093a50354402d2a434 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 5 Aug 2024 10:19:00 +0200 Subject: [PATCH 075/443] fix TestGetTags test, was missing scope and ctx --- internal/repository/job_test.go | 16 +++++++++++++++- internal/repository/testdata/job.db | Bin 114688 -> 118784 bytes 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/repository/job_test.go b/internal/repository/job_test.go index 2589fb9..f7b3783 100644 --- a/internal/repository/job_test.go +++ b/internal/repository/job_test.go @@ -5,9 +5,11 @@ package repository import ( + "context" "fmt" "testing" + "github.com/ClusterCockpit/cc-backend/pkg/schema" _ "github.com/mattn/go-sqlite3" ) @@ -45,7 +47,19 @@ func TestFindById(t *testing.T) { func TestGetTags(t *testing.T) { r := setup(t) - tags, counts, err := r.CountTags(nil) + const contextUserKey ContextKey = "user" + contextUserValue := &schema.User{ + Username: "testuser", + Projects: make([]string, 0), + Roles: []string{"user"}, + AuthType: 0, + AuthSource: 2, + } + + ctx := context.WithValue(getContext(t), contextUserKey, contextUserValue) + + // Test Tag has Scope "global" + tags, counts, err := r.CountTags(ctx) if err != nil { t.Fatal(err) } diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db index c345327e67e5c6dff1500c214831d7da28019c13..23eba6fb5066702a5bf6c48539e75e69da8cfd53 100644 GIT binary patch delta 466 zcmZo@U~gE!K0#VgjDdkc7K)jGwAMr&V^%Q+y{roxQx@5?Tw>tAv{_K#1i!QnBO8OL zyrgkyNoGz`VqQvlW=cs$dQN^)V$SBb{&EhRWhOk~pSVGA(*ni`(o6vi?2OtB{8RbW z7`6E>asTGN#I_0ybtg7dZJ%bqxQmg6x0fXfXm%LO_Elz#kJ*^`{%`NMWh`K10Xjo{ z;zH%^YW9rx3Y2)c<}vZ#VgNsusGewEh z(=Wu;-8D!D$OVeWmsA#{DsfIXG-nj&fr#WK=E6iYAtJ@e`5;9ht`Q*$e*Qol>f-~_ zpP5&jT2vBWQmn)|xt~$Nmop&9)7LR5Qo-9bQUhd}CQy~Lzh7`jkR#Aq1*N3a_|m-0 z!qQYFg-}1wz))8Wgd23gOt9seT$+|LllyrswhQlJe9O!O3?L9-1Y)kuf)n2JPYhrM E01o4Tu>b%7 delta 281 zcmZozz~0cnK0#Vgl!1Xk28g+Um=TDTC+ZlpiZbYxb#6>qWY2Pwf&b=aL4ou9BC3q6 z45ISJr6rj;Nr`zW<(VlZ8Jpkw%Q-M|Zf2bDg@4lm#tG8QP7Iuk+6??t`PG;L`2I6$ z^IhWp&3%b&6`StFhN|t;3>bGYP7YxD#KPOloUvVUFXIX(uI@xZc5z{0#_5`q&Dm7B zx{V;*Y2FYnGlV-e7tHMnfpYml-0iE(7$37SF)(cJw`D9~ocy2f&-UN;jGP5h+}t;r z_;2zz@LTdd=WF9L;l0M&$ScYt!VU7xI&OaNP<=**%)FG;isHf?OPR_2ycXMq_b|R? F1_0e7O=SQ8 From 9b5c6e3164186e9d48b9c0bb062a4ea9abd71a4d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 5 Aug 2024 10:37:42 +0200 Subject: [PATCH 076/443] fix StartJobTest, add tag_scope to migration --- internal/api/api_test.go | 4 ++-- .../repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 80a7e64..423bf5c 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -212,7 +212,7 @@ func TestRestApi(t *testing.T) { "exclusive": 1, "monitoringStatus": 1, "smt": 1, - "tags": [{ "type": "testTagType", "name": "testTagName" }], + "tags": [{ "type": "testTagType", "name": "testTagName", "scope": "testuser" }], "resources": [ { "hostname": "host123", @@ -280,7 +280,7 @@ func TestRestApi(t *testing.T) { t.Fatalf("unexpected job properties: %#v", job) } - if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" { + if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" || job.Tags[0].Scope != "testuser" { t.Fatalf("unexpected tags: %#v", job.Tags) } diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 643b87e..cf9c2b8 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,6 +1,6 @@ ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; - ALTER TABLE job ADD COLUMN footprint TEXT DEFAULT NULL; +ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global'; UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); From 0afaea95139480136135b2a3c47cd22256857e20 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 8 Aug 2024 12:28:36 +0200 Subject: [PATCH 077/443] initial commit with example event dispatch --- web/frontend/src/Job.root.svelte | 8 +++++++- web/frontend/src/Node.root.svelte | 2 +- web/frontend/src/job/Metric.svelte | 9 +++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index f991e4f..31ca6e7 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -75,7 +75,7 @@ duration, numNodes, numHWThreads, numAcc, SMT, exclusive, partition, subCluster, arrayJobId, monitoringStatus, state, walltime, - tags { id, type, name, scope }, + tags { id, type, name }, resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, @@ -229,6 +229,11 @@ $initq.data.job.subCluster, ), })); + + + const loadRes = ({ detail }) => { + console.log(">>> UPPER RES REQUEST", detail) + } @@ -358,6 +363,7 @@ gm.name == item.metric)?.unit} diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index ad6983b..2d58540 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -27,7 +27,7 @@ import { init, checkMetricDisabled, - } from "./utils.js"; + } from "./generic/utils.js"; import PlotTable from "./generic/PlotTable.svelte"; import MetricPlot from "./generic/plots/MetricPlot.svelte"; import TimeSelection from "./generic/select/TimeSelection.svelte"; diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 88c6da8..8184e50 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -40,11 +40,15 @@ fetching = false, error = null; let selectedScope = minScope(scopes); + let selectedResolution = 60 + $: dispatch("new-res", selectedResolution) let statsPattern = /(.*)-stat$/ let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) let selectedScopeIndex + const resolutions = [60, 240, 600] + $: availableScopes = scopes; $: patternMatches = statsPattern.exec(selectedScope) $: if (!patternMatches) { @@ -83,6 +87,11 @@ {/each} {/if} + {#key series} {#if fetching == true} From ce9995dac7acf19dec01615393938eb08898747c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 8 Aug 2024 12:29:45 +0200 Subject: [PATCH 078/443] fix: fix wrongly inserted gql request and import path error --- web/frontend/src/Job.root.svelte | 2 +- web/frontend/src/Node.root.svelte | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index f991e4f..213898e 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -75,7 +75,7 @@ duration, numNodes, numHWThreads, numAcc, SMT, exclusive, partition, subCluster, arrayJobId, monitoringStatus, state, walltime, - tags { id, type, name, scope }, + tags { id, type, name }, resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index ad6983b..2d58540 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -27,7 +27,7 @@ import { init, checkMetricDisabled, - } from "./utils.js"; + } from "./generic/utils.js"; import PlotTable from "./generic/PlotTable.svelte"; import MetricPlot from "./generic/plots/MetricPlot.svelte"; import TimeSelection from "./generic/select/TimeSelection.svelte"; From 561fd41d5d3b9877dc7be3980bda50e4fcb1f46a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 13 Aug 2024 17:49:28 +0200 Subject: [PATCH 079/443] fix: add accelerator scope to to-be archived scopes - if numAcc > 0 - fixes Add accelerator scope to archive requests #282 --- internal/metricdata/metricdata.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index c826113..eba9dee 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -307,6 +307,10 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { scopes = append(scopes, schema.MetricScopeCore) } + if job.NumAcc > 0 { + scopes = append(scopes, schema.MetricScopeAccelerator) + } + jobData, err := LoadData(job, allMetrics, scopes, ctx) if err != nil { log.Error("Error wile loading job data for archiving") From 9b6db4684adde310b29edb56d298c3faacbcd1b3 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 08:53:49 +0200 Subject: [PATCH 080/443] Refactor: Remove redundant code --- cmd/cc-backend/server.go | 49 +++++++++++----------------------------- 1 file changed, 13 insertions(+), 36 deletions(-) diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 5531415..d2b62e2 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -38,6 +38,15 @@ var ( apiHandle *api.RestApi ) +func onFailureResponse(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusUnauthorized) + json.NewEncoder(rw).Encode(map[string]string{ + "status": http.StatusText(http.StatusUnauthorized), + "error": err.Error(), + }) +} + func serverInit() { // Setup the http.Handler/Router used by the server graph.Init() @@ -166,64 +175,32 @@ func serverInit() { return authHandle.AuthApi( // On success; next, - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) + onFailureResponse) }) userapi.Use(func(next http.Handler) http.Handler { return authHandle.AuthUserApi( // On success; next, - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) + onFailureResponse) }) configapi.Use(func(next http.Handler) http.Handler { return authHandle.AuthConfigApi( // On success; next, - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) + onFailureResponse) }) frontendapi.Use(func(next http.Handler) http.Handler { return authHandle.AuthFrontendApi( // On success; next, - // On failure: JSON Response - func(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusUnauthorized) - json.NewEncoder(rw).Encode(map[string]string{ - "status": http.StatusText(http.StatusUnauthorized), - "error": err.Error(), - }) - }) + onFailureResponse) }) } From ba2f406bc08b9ca75d9b03773269f8ada89c7e2b Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 09:41:54 +0200 Subject: [PATCH 081/443] Extend sqlite db migration --- .../sqlite3/08_add-footprint.down.sql | 21 +++++++++++++++++++ .../sqlite3/08_add-footprint.up.sql | 10 +++++++++ pkg/schema/job.go | 2 +- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql index e69de29..8c99eb5 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql @@ -0,0 +1,21 @@ +ALTER TABLE job DROP energy; +ALTER TABLE job DROP energy_footprint; +ALTER TABLE job ADD COLUMN flops_any_avg; +ALTER TABLE job ADD COLUMN mem_bw_avg; +ALTER TABLE job ADD COLUMN mem_used_max; +ALTER TABLE job ADD COLUMN load_avg; +ALTER TABLE job ADD COLUMN net_bw_avg; +ALTER TABLE job ADD COLUMN net_data_vol_total; +ALTER TABLE job ADD COLUMN file_bw_avg; +ALTER TABLE job ADD COLUMN file_data_vol_total; + +UPDATE job SET flops_any_avg = json_extract(footprint, '$.flops_any_avg'); +UPDATE job SET mem_bw_avg = json_extract(footprint, '$.mem_bw_avg'); +UPDATE job SET mem_used_max = json_extract(footprint, '$.mem_used_max'); +UPDATE job SET load_avg = json_extract(footprint, '$.cpu_load_avg'); +UPDATE job SET net_bw_avg = json_extract(footprint, '$.net_bw_avg'); +UPDATE job SET net_data_vol_total = json_extract(footprint, '$.net_data_vol_total'); +UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); +UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); + +ALTER TABLE job DROP footprint; diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 643b87e..e5af149 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,4 +1,5 @@ ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; +ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; ALTER TABLE job ADD COLUMN footprint TEXT DEFAULT NULL; UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; @@ -6,7 +7,16 @@ UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_ UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); +UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg); +UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total); +UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg); +UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total); + ALTER TABLE job DROP flops_any_avg; ALTER TABLE job DROP mem_bw_avg; ALTER TABLE job DROP mem_used_max; ALTER TABLE job DROP load_avg; +ALTER TABLE job DROP net_bw_avg; +ALTER TABLE job DROP net_data_vol_total; +ALTER TABLE job DROP file_bw_avg; +ALTER TABLE job DROP file_data_vol_total; diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 83064c7..2a2ea95 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -32,7 +32,7 @@ type BaseJob struct { Footprint map[string]float64 `json:"footprint"` MetaData map[string]string `json:"metaData"` ConcurrentJobs JobLinkResultList `json:"concurrentJobs"` - Energy float64 `json:"energy"` + Energy float64 `json:"energy" db:"energy"` ArrayJobId int64 `json:"arrayJobId,omitempty" db:"array_job_id" example:"123000"` Walltime int64 `json:"walltime,omitempty" db:"walltime" example:"86400" minimum:"1"` JobID int64 `json:"jobId" db:"job_id" example:"123000"` From e1faba0ff2334d7eabaa2d4aa566c55ca4c55f78 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 10:39:32 +0200 Subject: [PATCH 082/443] Update cluster json schema --- pkg/schema/schemas/cluster.schema.json | 597 +++++++++++++------------ 1 file changed, 316 insertions(+), 281 deletions(-) diff --git a/pkg/schema/schemas/cluster.schema.json b/pkg/schema/schemas/cluster.schema.json index e745f99..81b138a 100644 --- a/pkg/schema/schemas/cluster.schema.json +++ b/pkg/schema/schemas/cluster.schema.json @@ -1,284 +1,319 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://cluster.schema.json", - "title": "HPC cluster description", - "description": "Meta data information of a HPC cluster", - "type": "object", - "properties": { - "name": { - "description": "The unique identifier of a cluster", - "type": "string" - }, - "metricConfig": { - "description": "Metric specifications", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Metric name", - "type": "string" - }, - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "scope": { - "description": "Native measurement resolution", - "type": "string" - }, - "timestep": { - "description": "Frequency of timeseries points", - "type": "integer" - }, - "aggregation": { - "description": "How the metric is aggregated", - "type": "string", - "enum": [ - "sum", - "avg" - ] - }, - "peak": { - "description": "Metric peak threshold (Upper metric limit)", - "type": "number" - }, - "normal": { - "description": "Metric normal threshold", - "type": "number" - }, - "caution": { - "description": "Metric caution threshold (Suspicious but does not require immediate action)", - "type": "number" - }, - "alert": { - "description": "Metric alert threshold (Requires immediate action)", - "type": "number" - }, - "subClusters": { - "description": "Array of cluster hardware partition metric thresholds", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Hardware partition name", - "type": "string" - }, - "peak": { - "type": "number" - }, - "normal": { - "type": "number" - }, - "caution": { - "type": "number" - }, - "alert": { - "type": "number" - }, - "remove": { - "type": "boolean" - } - }, - "required": [ - "name" - ] - } - } - }, - "required": [ - "name", - "unit", - "scope", - "timestep", - "aggregation", - "peak", - "normal", - "caution", - "alert" - ] - }, - "minItems": 1 - }, - "subClusters": { - "description": "Array of cluster hardware partitions", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "description": "Hardware partition name", - "type": "string" - }, - "processorType": { - "description": "Processor type", - "type": "string" - }, - "socketsPerNode": { - "description": "Number of sockets per node", - "type": "integer" - }, - "coresPerSocket": { - "description": "Number of cores per socket", - "type": "integer" - }, - "threadsPerCore": { - "description": "Number of SMT threads per core", - "type": "integer" - }, - "flopRateScalar": { - "description": "Theoretical node peak flop rate for scalar code in GFlops/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "flopRateSimd": { - "description": "Theoretical node peak flop rate for SIMD code in GFlops/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "memoryBandwidth": { - "description": "Theoretical node peak memory bandwidth in GB/s", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "value": { - "type": "number" - } - } - }, - "nodes": { - "description": "Node list expression", - "type": "string" - }, - "topology": { - "description": "Node topology", - "type": "object", - "properties": { - "node": { - "description": "HwTread lists of node", - "type": "array", - "items": { - "type": "integer" - } - }, - "socket": { - "description": "HwTread lists of sockets", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "memoryDomain": { - "description": "HwTread lists of memory domains", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "die": { - "description": "HwTread lists of dies", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "core": { - "description": "HwTread lists of cores", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } - }, - "accelerators": { - "type": "array", - "description": "List of of accelerator devices", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique device id" - }, - "type": { - "type": "string", - "description": "The accelerator type", - "enum": [ - "Nvidia GPU", - "AMD GPU", - "Intel GPU" - ] - }, - "model": { - "type": "string", - "description": "The accelerator model" - } - }, - "required": [ - "id", - "type", - "model" - ] - } - } - }, - "required": [ - "node", - "socket", - "memoryDomain" - ] - } - }, - "required": [ - "name", - "nodes", - "topology", - "processorType", - "socketsPerNode", - "coresPerSocket", - "threadsPerCore", - "flopRateScalar", - "flopRateSimd", - "memoryBandwidth" - ] - }, - "minItems": 1 - } + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://cluster.schema.json", + "title": "HPC cluster description", + "description": "Meta data information of a HPC cluster", + "type": "object", + "properties": { + "name": { + "description": "The unique identifier of a cluster", + "type": "string" }, - "required": [ - "name", - "metricConfig", - "subClusters" - ] + "metricConfig": { + "description": "Metric specifications", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "description": "Metric name", + "type": "string" + }, + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" + }, + "scope": { + "description": "Native measurement resolution", + "type": "string" + }, + "timestep": { + "description": "Frequency of timeseries points", + "type": "integer" + }, + "aggregation": { + "description": "How the metric is aggregated", + "type": "string", + "enum": [ + "sum", + "avg" + ] + }, + "footprint": { + "description": "Is it a footprint metric and what type", + "type": "string", + "enum": [ + "avg", + "max", + "min" + ] + }, + "energy": { + "description": "Is it used to calculate job energy", + "type": "boolean" + }, + "lowerIsBetter": { + "description": "Is lower better.", + "type": "boolean" + }, + "peak": { + "description": "Metric peak threshold (Upper metric limit)", + "type": "number" + }, + "normal": { + "description": "Metric normal threshold", + "type": "number" + }, + "caution": { + "description": "Metric caution threshold (Suspicious but does not require immediate action)", + "type": "number" + }, + "alert": { + "description": "Metric alert threshold (Requires immediate action)", + "type": "number" + }, + "subClusters": { + "description": "Array of cluster hardware partition metric thresholds", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "description": "Hardware partition name", + "type": "string" + }, + "footprint": { + "description": "Is it a footprint metric and what type. Overwrite global setting", + "type": "string", + "enum": [ + "avg", + "max", + "min" + ] + }, + "energy": { + "description": "Is it used to calculate job energy. Overwrite global", + "type": "boolean" + }, + "lowerIsBetter": { + "description": "Is lower better. Overwrite global", + "type": "boolean" + }, + "peak": { + "type": "number" + }, + "normal": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "alert": { + "type": "number" + }, + "remove": { + "description": "Remove this metric for this subcluster", + "type": "boolean" + } + }, + "required": [ + "name" + ] + } + } + }, + "required": [ + "name", + "unit", + "scope", + "timestep", + "aggregation", + "peak", + "normal", + "caution", + "alert" + ] + }, + "minItems": 1 + }, + "subClusters": { + "description": "Array of cluster hardware partitions", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "description": "Hardware partition name", + "type": "string" + }, + "processorType": { + "description": "Processor type", + "type": "string" + }, + "socketsPerNode": { + "description": "Number of sockets per node", + "type": "integer" + }, + "coresPerSocket": { + "description": "Number of cores per socket", + "type": "integer" + }, + "threadsPerCore": { + "description": "Number of SMT threads per core", + "type": "integer" + }, + "flopRateScalar": { + "description": "Theoretical node peak flop rate for scalar code in GFlops/s", + "type": "object", + "properties": { + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" + }, + "value": { + "type": "number" + } + } + }, + "flopRateSimd": { + "description": "Theoretical node peak flop rate for SIMD code in GFlops/s", + "type": "object", + "properties": { + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" + }, + "value": { + "type": "number" + } + } + }, + "memoryBandwidth": { + "description": "Theoretical node peak memory bandwidth in GB/s", + "type": "object", + "properties": { + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" + }, + "value": { + "type": "number" + } + } + }, + "nodes": { + "description": "Node list expression", + "type": "string" + }, + "topology": { + "description": "Node topology", + "type": "object", + "properties": { + "node": { + "description": "HwTread lists of node", + "type": "array", + "items": { + "type": "integer" + } + }, + "socket": { + "description": "HwTread lists of sockets", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "memoryDomain": { + "description": "HwTread lists of memory domains", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "die": { + "description": "HwTread lists of dies", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "core": { + "description": "HwTread lists of cores", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "accelerators": { + "type": "array", + "description": "List of of accelerator devices", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique device id" + }, + "type": { + "type": "string", + "description": "The accelerator type", + "enum": [ + "Nvidia GPU", + "AMD GPU", + "Intel GPU" + ] + }, + "model": { + "type": "string", + "description": "The accelerator model" + } + }, + "required": [ + "id", + "type", + "model" + ] + } + } + }, + "required": [ + "node", + "socket", + "memoryDomain" + ] + } + }, + "required": [ + "name", + "nodes", + "topology", + "processorType", + "socketsPerNode", + "coresPerSocket", + "threadsPerCore", + "flopRateScalar", + "flopRateSimd", + "memoryBandwidth" + ] + }, + "minItems": 1 + } + }, + "required": [ + "name", + "metricConfig", + "subClusters" + ] } From 5c99f5f8bbb44366dc13d0250bff1087dfa2e0cd Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 12:35:11 +0200 Subject: [PATCH 083/443] Only add footprint columns if not 0 --- .../repository/migrations/sqlite3/08_add-footprint.up.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index e5af149..0ffbf37 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -7,10 +7,10 @@ UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_ UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); -UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg); -UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total); -UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg); -UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total); +UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) IF job.net_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) IF job.net_data_vol_total != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) IF job.file_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) IF job.file_data_vol_total != 0; ALTER TABLE job DROP flops_any_avg; ALTER TABLE job DROP mem_bw_avg; From d6a88896d059023eeac8dbad415a0ce065f328fe Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 12:36:21 +0200 Subject: [PATCH 084/443] Refactor: Reduce struct memory size --- pkg/schema/cluster.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index a77bd32..e9aa178 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -47,11 +47,11 @@ type SubCluster struct { type SubClusterConfig struct { Name string `json:"name"` + Footprint string `json:"footprint,omitempty"` Peak float64 `json:"peak"` Normal float64 `json:"normal"` Caution float64 `json:"caution"` Alert float64 `json:"alert"` - Footprint string `json:"footprint,omitempty"` Remove bool `json:"remove"` LowerIsBetter bool `json:"lowerIsBetter"` Energy bool `json:"energy"` @@ -62,14 +62,14 @@ type MetricConfig struct { Name string `json:"name"` Scope MetricScope `json:"scope"` Aggregation string `json:"aggregation"` + Footprint string `json:"footprint,omitempty"` SubClusters []*SubClusterConfig `json:"subClusters,omitempty"` - Timestep int `json:"timestep"` Peak float64 `json:"peak"` Normal float64 `json:"normal"` Caution float64 `json:"caution"` Alert float64 `json:"alert"` + Timestep int `json:"timestep"` LowerIsBetter bool `json:"lowerIsBetter"` - Footprint string `json:"footprint,omitempty"` Energy bool `json:"energy"` } From 5e074dad1029062a39241daaa8319f20f5f36736 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 15 Aug 2024 12:39:14 +0200 Subject: [PATCH 085/443] Resolve error in migration --- .../repository/migrations/sqlite3/08_add-footprint.up.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 0ffbf37..93f0659 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -7,10 +7,10 @@ UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_ UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); -UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) IF job.net_bw_avg != 0; -UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) IF job.net_data_vol_total != 0; -UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) IF job.file_bw_avg != 0; -UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) IF job.file_data_vol_total != 0; +UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) WHERE job.net_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) WHERE job.net_data_vol_total != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) WHERE job.file_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) WHERE job.file_data_vol_total != 0; ALTER TABLE job DROP flops_any_avg; ALTER TABLE job DROP mem_bw_avg; From 49e0a2c0550c208264747c8dce3c5812b9a0f921 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 15 Aug 2024 14:33:04 +0200 Subject: [PATCH 086/443] fix: add compatibility for footprint metrics without config --- .../src/generic/helper/JobFootprint.svelte | 222 ++++++++++-------- 1 file changed, 129 insertions(+), 93 deletions(-) diff --git a/web/frontend/src/generic/helper/JobFootprint.svelte b/web/frontend/src/generic/helper/JobFootprint.svelte index 82818e3..4e1abb0 100644 --- a/web/frontend/src/generic/helper/JobFootprint.svelte +++ b/web/frontend/src/generic/helper/JobFootprint.svelte @@ -67,62 +67,74 @@ export let height = "310px"; const footprintData = job?.footprint?.map((jf) => { - // Unit const fmc = getContext("getMetricConfig")(job.cluster, job.subCluster, jf.name); - const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "") + if (fmc) { + // Unit + const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "") - // Threshold / -Differences - const fmt = findJobThresholds(job, fmc); - if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); + // Threshold / -Differences + const fmt = findJobThresholds(job, fmc); + if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); - // Define basic data -> Value: Use as Provided - const fmBase = { - name: jf.name + ' (' + jf.stat + ')', - avg: jf.value, - unit: unit, - max: fmt.peak, - dir: fmc.lowerIsBetter - }; + // Define basic data -> Value: Use as Provided + const fmBase = { + name: jf.name + ' (' + jf.stat + ')', + avg: jf.value, + unit: unit, + max: fmt.peak, + dir: fmc.lowerIsBetter + }; - if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "alert")) { + if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "alert")) { + return { + ...fmBase, + color: "danger", + message: `Metric average way ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + impact: 3 + }; + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "caution")) { + return { + ...fmBase, + color: "warning", + message: `Metric average ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + impact: 2, + }; + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "normal")) { + return { + ...fmBase, + color: "success", + message: "Metric average within expected thresholds.", + impact: 1, + }; + } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "peak")) { + return { + ...fmBase, + color: "info", + message: + "Metric average above expected normal thresholds: Check for artifacts recommended.", + impact: 0, + }; + } else { + return { + ...fmBase, + color: "secondary", + message: + "Metric average above expected peak threshold: Check for artifacts!", + impact: -1, + }; + } + } else { // No matching metric config: display as single value return { - ...fmBase, - color: "danger", - message: `Metric average way ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, - impact: 3 - }; - } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "caution")) { - return { - ...fmBase, - color: "warning", - message: `Metric average ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, - impact: 2, - }; - } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "normal")) { - return { - ...fmBase, - color: "success", - message: "Metric average within expected thresholds.", - impact: 1, - }; - } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "peak")) { - return { - ...fmBase, - color: "info", + name: jf.name + ' (' + jf.stat + ')', + avg: jf.value, message: - "Metric average above expected normal thresholds: Check for artifacts recommended.", - impact: 0, - }; - } else { - return { - ...fmBase, - color: "secondary", - message: - "Metric average above expected peak threshold: Check for artifacts!", - impact: -1, + `No config for metric ${jf.name} found.`, + impact: 4, }; } - }); + }).sort(function (a, b) { // Sort by impact value primarily, within impact sort name alphabetically + return a.impact - b.impact || ((a.name > b.name) ? 1 : ((b.name > a.name) ? -1 : 0)); + });; function evalFootprint(mean, thresholds, lowerIsBetter, level) { // Handle Metrics in which less value is better @@ -159,37 +171,76 @@ {/if} {#each footprintData as fpd, index} -
-
 {fpd.name}
- -
-
- - {#if fpd.impact === 3 || fpd.impact === -1} - - {:else if fpd.impact === 2} - - {/if} - - {#if fpd.impact === 3} - - {:else if fpd.impact === 2} - - {:else if fpd.impact === 1} - - {:else if fpd.impact === 0} - - {:else if fpd.impact === -1} - - {/if} + {#if fpd.impact !== 4} +
+
 {fpd.name}
+ +
+
+ + {#if fpd.impact === 3 || fpd.impact === -1} + + {:else if fpd.impact === 2} + + {/if} + + {#if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === 1} + + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + + {/if} +
+
+ + {fpd.avg} / {fpd.max} + {fpd.unit}   +
+ {fpd.message} +
+ + {#if fpd.dir} +
+ + + {/if} + + + + {#if !fpd.dir} + + + + {/if} + + {:else} +
- - {fpd.avg} / {fpd.max} - {fpd.unit}   +  {fpd.name} +
+
+
+ +
+
+ {fpd.avg}  +
{fpd.message} - - - {#if fpd.dir} -
- - - {/if} - - - - {#if !fpd.dir} - - - - {/if} - + {/if} {/each} {#if job?.metaData?.message}
From b1fd07cd30b61ab00808f2b47fdb95fa107f9880 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 16 Aug 2024 14:50:31 +0200 Subject: [PATCH 087/443] add single update gql queries to metric wrapper --- api/schema.graphqls | 2 +- internal/api/rest.go | 2 +- internal/graph/generated/generated.go | 19 ++++-- internal/graph/schema.resolvers.go | 8 ++- web/frontend/src/Job.root.svelte | 23 +++---- web/frontend/src/job/Metric.svelte | 98 +++++++++++++++++++++++---- 6 files changed, 114 insertions(+), 38 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 568c15d..8c27504 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -223,7 +223,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/api/rest.go b/internal/api/rest.go index c8f4e7a..7946ab7 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1110,7 +1110,7 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { } resolver := graph.GetResolverInstance() - data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes) + data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes, nil) if err != nil { json.NewEncoder(rw).Encode(Respone{ Error: &struct { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 9ca0a60..23d31e7 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -246,7 +246,7 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int + JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int @@ -368,7 +368,7 @@ type QueryResolver interface { User(ctx context.Context, username string) (*model.User, error) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) - JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) + JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) @@ -1290,7 +1290,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true case "Query.jobs": if e.complexity.Query.Jobs == nil { @@ -2059,7 +2059,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2370,6 +2370,15 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw } } args["scopes"] = arg2 + var arg3 *int + if tmp, ok := rawArgs["resolution"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + arg3, err = ec.unmarshalOInt2ᚖint(ctx, tmp) + if err != nil { + return nil, err + } + } + args["resolution"] = arg3 return args, nil } @@ -8499,7 +8508,7 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope), fc.Args["resolution"].(*int)) }) if err != nil { ec.Error(ctx, err) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index f36e25a..9e7bd3d 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -224,13 +224,19 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) } // JobMetrics is the resolver for the jobMetrics field. -func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) { +func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) { + defaultRes := 600 + if resolution == nil { + resolution = &defaultRes + } + job, err := r.Query().Job(ctx, id) if err != nil { log.Warn("Error while querying job for metrics") return nil, err } + log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) data, err := metricdata.LoadData(job, metrics, scopes, ctx) if err != nil { log.Warn("Error while loading job data") diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 31ca6e7..d183920 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -122,14 +122,14 @@ variables: { dbid, selectedMetrics, selectedScopes }, }); - function loadAllScopes() { - selectedScopes = [...selectedScopes, "socket", "core"] - jobMetrics = queryStore({ - client: client, - query: query, - variables: { dbid, selectedMetrics, selectedScopes}, - }); - } + // function loadAllScopes() { + // selectedScopes = [...selectedScopes, "socket", "core"] + // jobMetrics = queryStore({ + // client: client, + // query: query, + // variables: { dbid, selectedMetrics, selectedScopes}, + // }); + // } // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { @@ -229,11 +229,6 @@ $initq.data.job.subCluster, ), })); - - - const loadRes = ({ detail }) => { - console.log(">>> UPPER RES REQUEST", detail) - } @@ -362,8 +357,6 @@ {#if item.data} gm.name == item.metric)?.unit} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 8184e50..d57fcd6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -13,7 +13,12 @@ --> @@ -69,13 +132,13 @@ {metricName} ({unit}) @@ -87,14 +150,19 @@ {/each} {/if} - { + scopes = ["node"] + selectedScope = "node" + selectedScopes = [...scopes] + loadUpdate + }}> {#each resolutions as res} {/each} {#key series} - {#if fetching == true} + {#if $metricData?.fetching == true} {:else if error != null} {error.message} From b70de5a4be12f3d7bcc18e70700c9940b759d104 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 16 Aug 2024 16:35:17 +0200 Subject: [PATCH 088/443] Handle single update data --- web/frontend/src/job/Metric.svelte | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index d57fcd6..bfa3adc 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -25,7 +25,7 @@ Spinner, Card, } from "@sveltestrap/sveltestrap"; - import { minScope } from "../generic/utils"; + import { minScope } from "../generic/utils.js"; import Timeseries from "../generic/plots/MetricPlot.svelte"; export let job; @@ -87,20 +87,13 @@ const selectedMetrics = [metricName] function loadUpdate() { - - // useQuery('repoData', () => - // fetch('https://api.github.com/repos/SvelteStack/svelte-query').then(res => - // res.json() - // ) - + console.log('S> OLD DATA:', rawData) metricData = queryStore({ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, }); - console.log('S> OLD DATA:', rawData) - // rawData = {...$metricData?.data?.singleUpdate} }; $: if (selectedScope == "load-all") { @@ -121,10 +114,11 @@ (series) => selectedHost == null || series.hostname == selectedHost, ); - $: if ($metricData && !$metricData.fetching) console.log('S> NEW DATA:', rawData) - // $: console.log('Pattern', patternMatches) + $: if ($metricData && !$metricData.fetching) { + rawData = $metricData.data.singleUpdate.map((x) => x.metric) + console.log('S> NEW DATA:', rawData) + } $: console.log('SelectedScope', selectedScope) - $: console.log('ScopeIndex', selectedScopeIndex) @@ -154,7 +148,7 @@ scopes = ["node"] selectedScope = "node" selectedScopes = [...scopes] - loadUpdate + loadUpdate() }}> {#each resolutions as res} From a8a27c9b51cb9002498634cf625423e0896c985c Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 19 Aug 2024 12:11:53 +0200 Subject: [PATCH 089/443] Add project index to job table --- .../repository/migrations/sqlite3/08_add-footprint.up.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 93f0659..bcd6494 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,8 +1,12 @@ +CREATE INDEX IF NOT EXISTS job_by_project ON job (project); +CREATE INDEX IF NOT EXISTS job_list_projects ON job (project, job_state); + ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; ALTER TABLE job ADD COLUMN footprint TEXT DEFAULT NULL; UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; + UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); From e4f8022b7a16687ee6d87a78c3ceab95f1403a2b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 11:39:19 +0200 Subject: [PATCH 090/443] change to one reactive metric data load on two variables --- web/frontend/src/job/Metric.svelte | 104 ++++++++++++++++++----------- 1 file changed, 65 insertions(+), 39 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index bfa3adc..f47a3c7 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -41,14 +41,16 @@ plot, error = null; let selectedScope = minScope(scopes); - let selectedResolution = 600 - let statsPattern = /(.*)-stat$/ - let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) - let selectedScopeIndex + let selectedResolution; + let pendingResolution = 600; + let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); + const statsPattern = /(.*)-stat$/; + let patternMatches = false; + let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); // const dispatch = createEventDispatcher(); - const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : "") - const resolutions = [600, 240, 60] + const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); + const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); const subQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!, $selectedResolution: Int) { @@ -86,39 +88,68 @@ const dbid = job.id; const selectedMetrics = [metricName] - function loadUpdate() { - console.log('S> OLD DATA:', rawData) - metricData = queryStore({ - client: client, - query: subQuery, - variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - }); - - }; - - $: if (selectedScope == "load-all") { - scopes = [...scopes, "socket", "core"] - selectedScope = nativeScope - selectedScopes = [...scopes] - loadUpdate() - }; - - $: patternMatches = statsPattern.exec(selectedScope) - $: if (!patternMatches) { - selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + $: if (selectedScope == "load-all" || pendingResolution) { + + if (selectedScope == "load-all") { + console.log('Triggered load-all') + selectedScopes = [...scopes, "socket", "core"] } else { - selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); + console.log("Triggered scope switch:", selectedScope, pendingResolution) } + + // What if accelerator scope / native core scopes? + if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { + selectedScope = String("node") + selectedScopes = ["node"] + console.log("New Resolution: Reset to node scope") + } else { + console.log("New Resolution: No change in Res or just node scope") + } + + if (!selectedResolution) { + selectedResolution = Number(pendingResolution) + } else { + selectedResolution = Number(pendingResolution) + + metricData = queryStore({ + client: client, + query: subQuery, + variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, + // requestPolicy: "network-only", + }); + + if ($metricData && !$metricData.fetching) { + console.log('Trigger Data Handling') + + rawData = $metricData.data.singleUpdate.map((x) => x.metric) + scopes = $metricData.data.singleUpdate.map((x) => x.scope) + statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) + + // Handle Selected Scope on load-all + if (selectedScope == "load-all") { + selectedScope = minScope(scopes) + console.log('Set New SelectedScope after Load-All', selectedScope, scopes) + } else { + console.log('Set New SelectedScope', selectedScope) + } + + patternMatches = statsPattern.exec(selectedScope) + if (!patternMatches) { + selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + console.log("Selected Index # from Array", selectedScopeIndex, scopes) + } else { + selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); + console.log("Selected Stats Index # from Array", selectedScopeIndex, scopes) + } + } + } + } + $: data = rawData[selectedScopeIndex]; + $: series = data?.series.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); - - $: if ($metricData && !$metricData.fetching) { - rawData = $metricData.data.singleUpdate.map((x) => x.metric) - console.log('S> NEW DATA:', rawData) - } - $: console.log('SelectedScope', selectedScope) @@ -144,12 +175,7 @@ {/each} {/if} - {#each resolutions as res} {/each} From 613e128cab900e4c4d9e2ab9338aee010c0adbca Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 11:51:38 +0200 Subject: [PATCH 091/443] cleanup dev logging --- web/frontend/src/job/Metric.svelte | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index f47a3c7..eb4c90b 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -88,25 +88,20 @@ const dbid = job.id; const selectedMetrics = [metricName] - $: if (selectedScope == "load-all" || pendingResolution) { + $: if (selectedScope || pendingResolution) { if (selectedScope == "load-all") { - console.log('Triggered load-all') selectedScopes = [...scopes, "socket", "core"] - } else { - console.log("Triggered scope switch:", selectedScope, pendingResolution) } // What if accelerator scope / native core scopes? if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { selectedScope = String("node") selectedScopes = ["node"] - console.log("New Resolution: Reset to node scope") - } else { - console.log("New Resolution: No change in Res or just node scope") } if (!selectedResolution) { + // Skips reactive data load on init selectedResolution = Number(pendingResolution) } else { selectedResolution = Number(pendingResolution) @@ -115,31 +110,25 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - // requestPolicy: "network-only", }); if ($metricData && !$metricData.fetching) { - console.log('Trigger Data Handling') rawData = $metricData.data.singleUpdate.map((x) => x.metric) scopes = $metricData.data.singleUpdate.map((x) => x.scope) statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) - // Handle Selected Scope on load-all + // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) - console.log('Set New SelectedScope after Load-All', selectedScope, scopes) - } else { - console.log('Set New SelectedScope', selectedScope) } patternMatches = statsPattern.exec(selectedScope) + if (!patternMatches) { selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); - console.log("Selected Index # from Array", selectedScopeIndex, scopes) } else { selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); - console.log("Selected Stats Index # from Array", selectedScopeIndex, scopes) } } } From 599a36466a064102ba461195c4c30fdc69e019d8 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 14:52:13 +0200 Subject: [PATCH 092/443] fix new data reactivity for accelerators --- web/frontend/src/job/Metric.svelte | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index eb4c90b..5c5a87a 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -18,7 +18,6 @@ gql, getContextClient } from "@urql/svelte"; - // import { createEventDispatcher } from "svelte"; import { InputGroup, InputGroupText, @@ -44,11 +43,10 @@ let selectedResolution; let pendingResolution = 600; let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); - const statsPattern = /(.*)-stat$/; let patternMatches = false; let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); - // const dispatch = createEventDispatcher(); + const statsPattern = /(.*)-stat$/; const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); @@ -89,21 +87,23 @@ const selectedMetrics = [metricName] $: if (selectedScope || pendingResolution) { - - if (selectedScope == "load-all") { - selectedScopes = [...scopes, "socket", "core"] - } - - // What if accelerator scope / native core scopes? - if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { - selectedScope = String("node") - selectedScopes = ["node"] - } - if (!selectedResolution) { // Skips reactive data load on init selectedResolution = Number(pendingResolution) + } else { + + if (selectedScope == "load-all") { + selectedScopes = [...scopes, "socket", "core", "accelerator"] + } + + if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { + selectedScope = String("node") + selectedScopes = ["node"] + // Instead of adding acc to load-all: always add by default if native is acc + // selectedScopes = nativeScope == "accelerator" ? ["node", "accelerator"] : ["node"] + } + selectedResolution = Number(pendingResolution) metricData = queryStore({ From e74e506ffe6148076e6aa70621436e0712b71a5c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 16:41:35 +0200 Subject: [PATCH 093/443] cleanup outdated code --- web/frontend/src/Job.root.svelte | 9 --------- 1 file changed, 9 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index d183920..5c1d004 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -122,15 +122,6 @@ variables: { dbid, selectedMetrics, selectedScopes }, }); - // function loadAllScopes() { - // selectedScopes = [...selectedScopes, "socket", "core"] - // jobMetrics = queryStore({ - // client: client, - // query: query, - // variables: { dbid, selectedMetrics, selectedScopes}, - // }); - // } - // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { let job = $initq.data.job; From 1758275f115cef35c17ca255ade6fbd4d3db4c11 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 14:01:27 +0200 Subject: [PATCH 094/443] fix: fix getMetricConfigDeep util function - threw error for mismatching metric availability between clusters --- web/frontend/src/generic/utils.js | 43 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/web/frontend/src/generic/utils.js b/web/frontend/src/generic/utils.js index bb63a4f..3aaafa1 100644 --- a/web/frontend/src/generic/utils.js +++ b/web/frontend/src/generic/utils.js @@ -301,7 +301,7 @@ export function stickyHeader(datatableHeaderSelector, updatePading) { onDestroy(() => document.removeEventListener("scroll", onscroll)); } -export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubcluster +export function checkMetricDisabled(m, c, s) { // [m]etric, [c]luster, [s]ubcluster const metrics = getContext("globalMetrics"); const result = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) return !result @@ -309,23 +309,22 @@ export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubclust export function getStatsItems() { // console.time('stats') - // console.log('getStatsItems ...') const globalMetrics = getContext("globalMetrics") const result = globalMetrics.map((gm) => { if (gm?.footprint) { - // Footprint contains suffix naming the used stat-type // console.time('deep') - // console.log('Deep Config for', gm.name) const mc = getMetricConfigDeep(gm.name, null, null) // console.timeEnd('deep') - return { - field: gm.name + '_' + gm.footprint, - text: gm.name + ' (' + gm.footprint + ')', - metric: gm.name, - from: 0, - to: mc.peak, - peak: mc.peak, - enabled: false + if (mc) { + return { + field: gm.name + '_' + gm.footprint, + text: gm.name + ' (' + gm.footprint + ')', + metric: gm.name, + from: 0, + to: mc.peak, + peak: mc.peak, + enabled: false + } } } return null @@ -336,11 +335,9 @@ export function getStatsItems() { export function getSortItems() { //console.time('sort') - //console.log('getSortItems ...') const globalMetrics = getContext("globalMetrics") const result = globalMetrics.map((gm) => { if (gm?.footprint) { - // Footprint contains suffix naming the used stat-type return { field: gm.name + '_' + gm.footprint, type: 'foot', @@ -357,21 +354,22 @@ export function getSortItems() { function getMetricConfigDeep(metric, cluster, subCluster) { const clusters = getContext("clusters"); if (cluster != null) { - let c = clusters.find((c) => c.name == cluster); + const c = clusters.find((c) => c.name == cluster); if (subCluster != null) { - let sc = c.subClusters.find((sc) => sc.name == subCluster); + const sc = c.subClusters.find((sc) => sc.name == subCluster); return sc.metricConfig.find((mc) => mc.name == metric) } else { let result; for (let sc of c.subClusters) { const mc = sc.metricConfig.find((mc) => mc.name == metric) - if (result) { // If lowerIsBetter: Peak is still maximum value, no special case required + if (result && mc) { // update result; If lowerIsBetter: Peak is still maximum value, no special case required result.alert = (mc.alert > result.alert) ? mc.alert : result.alert result.caution = (mc.caution > result.caution) ? mc.caution : result.caution result.normal = (mc.normal > result.normal) ? mc.normal : result.normal result.peak = (mc.peak > result.peak) ? mc.peak : result.peak - } else { - if (mc) result = {...mc}; + } else if (mc) { + // start new result + result = {...mc}; } } return result @@ -381,13 +379,14 @@ function getMetricConfigDeep(metric, cluster, subCluster) { for (let c of clusters) { for (let sc of c.subClusters) { const mc = sc.metricConfig.find((mc) => mc.name == metric) - if (result) { // If lowerIsBetter: Peak is still maximum value, no special case required + if (result && mc) { // update result; If lowerIsBetter: Peak is still maximum value, no special case required result.alert = (mc.alert > result.alert) ? mc.alert : result.alert result.caution = (mc.caution > result.caution) ? mc.caution : result.caution result.normal = (mc.normal > result.normal) ? mc.normal : result.normal result.peak = (mc.peak > result.peak) ? mc.peak : result.peak - } else { - if (mc) result = {...mc}; + } else if (mc) { + // Start new result + result = {...mc}; } } } From ceb3a095d80914c0af9793afc1b52b40025499f8 Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Thu, 22 Aug 2024 14:29:51 +0200 Subject: [PATCH 095/443] Sampling Feature for archived and fresh data --- internal/api/api_test.go | 5 +- internal/api/rest.go | 18 ++- internal/graph/schema.resolvers.go | 2 +- internal/graph/util.go | 10 +- internal/metricdata/cc-metric-store.go | 175 ++++++++++++++----------- internal/metricdata/influxdb-v2.go | 3 +- internal/metricdata/metricdata.go | 39 +++++- internal/metricdata/prometheus.go | 3 +- internal/metricdata/utils.go | 45 ++++++- pkg/archive/json.go | 2 +- pkg/resampler/resampler.go | 113 ++++++++++++++++ pkg/resampler/util.go | 25 ++++ sample.txt | 12 ++ web/frontend/src/job/Metric.svelte | 1 + 14 files changed, 358 insertions(+), 95 deletions(-) create mode 100644 pkg/resampler/resampler.go create mode 100644 pkg/resampler/util.go create mode 100644 sample.txt diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 80a7e64..acf609f 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -172,7 +172,6 @@ func cleanup() { func TestRestApi(t *testing.T) { restapi := setup(t) t.Cleanup(cleanup) - testData := schema.JobData{ "load_one": map[schema.MetricScope]*schema.JobMetric{ schema.MetricScopeNode: { @@ -189,7 +188,7 @@ func TestRestApi(t *testing.T) { }, } - metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { + metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { return testData, nil } @@ -341,7 +340,7 @@ func TestRestApi(t *testing.T) { } t.Run("CheckArchive", func(t *testing.T) { - data, err := metricdata.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background()) + data, err := metricdata.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60) if err != nil { t.Fatal(err) } diff --git a/internal/api/rest.go b/internal/api/rest.go index 7946ab7..1695c0f 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -514,8 +514,15 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) var data schema.JobData + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + if r.URL.Query().Get("all-metrics") == "true" { - data, err = metricdata.LoadData(job, nil, scopes, r.Context()) + data, err = metricdata.LoadData(job, nil, scopes, r.Context(), resolution) if err != nil { log.Warn("Error while loading job data") return @@ -604,7 +611,14 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { scopes = []schema.MetricScope{"node"} } - data, err := metricdata.LoadData(job, metrics, scopes, r.Context()) + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + + data, err := metricdata.LoadData(job, metrics, scopes, r.Context(), resolution) if err != nil { log.Warn("Error while loading job data") return diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e7bd3d..0eba013 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -237,7 +237,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str } log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) - data, err := metricdata.LoadData(job, metrics, scopes, ctx) + data, err := metricdata.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { log.Warn("Error while loading job data") return nil, err diff --git a/internal/graph/util.go b/internal/graph/util.go index 3e65b6c..29e282c 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -12,6 +12,7 @@ import ( "github.com/99designs/gqlgen/graphql" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" // "github.com/ClusterCockpit/cc-backend/pkg/archive" @@ -47,7 +48,14 @@ func (r *queryResolver) rooflineHeatmap( continue } - jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx) + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + + jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, resolution) if err != nil { log.Errorf("Error while loading roofline metrics for job %d", job.ID) return nil, err diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index e564db6..53469f0 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -55,6 +55,7 @@ type ApiQuery struct { SubType *string `json:"subtype,omitempty"` Metric string `json:"metric"` Hostname string `json:"host"` + Resolution int `json:"resolution"` TypeIds []string `json:"type-ids,omitempty"` SubTypeIds []string `json:"subtype-ids,omitempty"` Aggregate bool `json:"aggreg"` @@ -66,13 +67,14 @@ type ApiQueryResponse struct { } type ApiMetricData struct { - Error *string `json:"error"` - Data []schema.Float `json:"data"` - From int64 `json:"from"` - To int64 `json:"to"` - Avg schema.Float `json:"avg"` - Min schema.Float `json:"min"` - Max schema.Float `json:"max"` + Error *string `json:"error"` + Data []schema.Float `json:"data"` + From int64 `json:"from"` + To int64 `json:"to"` + Resolution int `json:"resolution"` + Avg schema.Float `json:"avg"` + Min schema.Float `json:"min"` + Max schema.Float `json:"max"` } func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { @@ -83,7 +85,7 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { } ccms.url = config.Url - ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.Url) + ccms.queryEndpoint = fmt.Sprintf("%s/api/query/", config.Url) ccms.jwt = config.Token ccms.client = http.Client{ Timeout: 10 * time.Second, @@ -129,7 +131,7 @@ func (ccms *CCMetricStore) doRequest( return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) if err != nil { log.Warn("Error while building request body") return nil, err @@ -162,8 +164,9 @@ func (ccms *CCMetricStore) LoadData( metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) + queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) if err != nil { log.Warn("Error while building queries") return nil, err @@ -196,10 +199,11 @@ func (ccms *CCMetricStore) LoadData( } jobMetric, ok := jobData[metric][scope] + if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, - Timestep: mc.Timestep, + Timestep: row[0].Resolution, Series: make([]schema.Series, 0), } jobData[metric][scope] = jobMetric @@ -251,7 +255,6 @@ func (ccms *CCMetricStore) LoadData( /* Returns list for "partial errors" */ return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) } - return jobData, nil } @@ -267,6 +270,7 @@ func (ccms *CCMetricStore) buildQueries( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) ([]ApiQuery, []schema.MetricScope, error) { queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) assignedScope := []schema.MetricScope{} @@ -318,11 +322,12 @@ func (ccms *CCMetricStore) buildQueries( } queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &acceleratorString, - TypeIds: host.Accelerators, + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &acceleratorString, + TypeIds: host.Accelerators, + Resolution: resolution, }) assignedScope = append(assignedScope, schema.MetricScopeAccelerator) continue @@ -335,11 +340,12 @@ func (ccms *CCMetricStore) buildQueries( } queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &acceleratorString, - TypeIds: host.Accelerators, + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &acceleratorString, + TypeIds: host.Accelerators, + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -348,11 +354,12 @@ func (ccms *CCMetricStore) buildQueries( // HWThread -> HWThead if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &hwthreadString, + TypeIds: intToStringSlice(hwthreads), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -363,11 +370,12 @@ func (ccms *CCMetricStore) buildQueries( cores, _ := topology.GetCoresFromHWThreads(hwthreads) for _, core := range cores { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Core[core]), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Core[core]), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) } @@ -379,11 +387,12 @@ func (ccms *CCMetricStore) buildQueries( sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) for _, socket := range sockets { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Socket[socket]), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Socket[socket]), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) } @@ -393,11 +402,12 @@ func (ccms *CCMetricStore) buildQueries( // HWThread -> Node if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(hwthreads), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -407,11 +417,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &coreString, - TypeIds: intToStringSlice(cores), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -421,11 +432,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { cores, _ := topology.GetCoresFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(cores), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -435,11 +447,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -449,11 +462,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -463,11 +477,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &socketString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -477,11 +492,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &socketString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -490,8 +506,9 @@ func (ccms *CCMetricStore) buildQueries( // Node -> Node if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, + Metric: remoteName, + Hostname: host.Hostname, + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -510,7 +527,15 @@ func (ccms *CCMetricStore) LoadStats( metrics []string, ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? + + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 9000 + + for _, mc := range metricConfigs { + resolution = min(resolution, mc.Timestep) + } + + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, resolution) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index b95f07e..b416fa5 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -60,7 +60,8 @@ func (idb *InfluxDBv2DataRepository) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { + ctx context.Context, + resolution int) (schema.JobData, error) { measurementsConds := make([]string, 0, len(metrics)) for _, m := range metrics { diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index eba9dee..e79261b 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -15,6 +15,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" + "github.com/ClusterCockpit/cc-backend/pkg/resampler" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -24,7 +25,7 @@ type MetricDataRepository interface { Init(rawConfig json.RawMessage) error // Return the JobData for the given job, only with the requested metrics. - LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) + LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) @@ -80,8 +81,9 @@ func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - data := cache.Get(cacheKey(job, metrics, scopes), func() (_ interface{}, ttl time.Duration, size int) { + data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ interface{}, ttl time.Duration, size int) { var jd schema.JobData var err error @@ -106,7 +108,7 @@ func LoadData(job *schema.Job, } } - jd, err = repo.LoadData(job, metrics, scopes, ctx) + jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) if err != nil { if len(jd) != 0 { log.Warnf("partial error: %s", err.Error()) @@ -118,12 +120,31 @@ func LoadData(job *schema.Job, } size = jd.Size() } else { - jd, err = archive.GetHandle().LoadJobData(job) + var jd_temp schema.JobData + jd_temp, err = archive.GetHandle().LoadJobData(job) if err != nil { log.Error("Error while loading job data from archive") return err, 0, 0 } + //Deep copy the cached arhive hashmap + jd = DeepCopy(jd_temp) + + //Resampling for archived data. + //Pass the resolution from frontend here. + for _, v := range jd { + for _, v_ := range v { + timestep := 0 + for i := 0; i < len(v_.Series); i += 1 { + v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, v_.Timestep, resolution) + if err != nil { + return err, 0, 0 + } + } + v_.Timestep = timestep + } + } + // Avoid sending unrequested data to the client: if metrics != nil || scopes != nil { if metrics == nil { @@ -254,11 +275,12 @@ func cacheKey( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) string { // Duration and StartTime do not need to be in the cache key as StartTime is less unique than // job.ID and the TTL of the cache entry makes sure it does not stay there forever. - return fmt.Sprintf("%d(%s):[%v],[%v]", - job.ID, job.State, metrics, scopes) + return fmt.Sprintf("%d(%s):[%v],[%v]-%d", + job.ID, job.State, metrics, scopes, resolution) } // For /monitoring/job/ and some other places, flops_any and mem_bw need @@ -297,8 +319,11 @@ func prepareJobData( func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { allMetrics := make([]string, 0) metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + for _, mc := range metricConfigs { allMetrics = append(allMetrics, mc.Name) + resolution = mc.Timestep } // TODO: Talk about this! What resolutions to store data at... @@ -311,7 +336,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { scopes = append(scopes, schema.MetricScopeAccelerator) } - jobData, err := LoadData(job, allMetrics, scopes, ctx) + jobData, err := LoadData(job, allMetrics, scopes, ctx, resolution) if err != nil { log.Error("Error wile loading job data for archiving") return nil, err diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index a8d9f39..0611824 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -265,6 +265,7 @@ func (pdb *PrometheusDataRepository) LoadData( metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { // TODO respect requested scope if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { @@ -356,7 +357,7 @@ func (pdb *PrometheusDataRepository) LoadStats( // map of metrics of nodes of stats stats := map[string]map[string]schema.MetricStatistics{} - data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx) + data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) if err != nil { log.Warn("Error while loading job for stats") return nil, err diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 6d490fe..f480e40 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -12,7 +12,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/schema" ) -var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { +var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { panic("TODO") } @@ -27,9 +27,10 @@ func (tmdr *TestMetricDataRepository) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { + ctx context.Context, + resolution int) (schema.JobData, error) { - return TestLoadDataCallback(job, metrics, scopes, ctx) + return TestLoadDataCallback(job, metrics, scopes, ctx, resolution) } func (tmdr *TestMetricDataRepository) LoadStats( @@ -48,3 +49,41 @@ func (tmdr *TestMetricDataRepository) LoadNodeData( panic("TODO") } + +func DeepCopy(jd_temp schema.JobData) schema.JobData { + var jd schema.JobData + + jd = make(schema.JobData, len(jd_temp)) + for k, v := range jd_temp { + jd[k] = make(map[schema.MetricScope]*schema.JobMetric, len(jd_temp[k])) + for k_, v_ := range v { + jd[k][k_] = new(schema.JobMetric) + jd[k][k_].Series = make([]schema.Series, len(v_.Series)) + for i := 0; i < len(v_.Series); i += 1 { + jd[k][k_].Series[i].Data = make([]schema.Float, len(v_.Series[i].Data)) + copy(jd[k][k_].Series[i].Data, v_.Series[i].Data) + jd[k][k_].Series[i].Hostname = v_.Series[i].Hostname + jd[k][k_].Series[i].Id = v_.Series[i].Id + jd[k][k_].Series[i].Statistics.Avg = v_.Series[i].Statistics.Avg + jd[k][k_].Series[i].Statistics.Min = v_.Series[i].Statistics.Min + jd[k][k_].Series[i].Statistics.Max = v_.Series[i].Statistics.Max + } + jd[k][k_].Timestep = v_.Timestep + jd[k][k_].Unit.Base = v_.Unit.Base + jd[k][k_].Unit.Prefix = v_.Unit.Prefix + if v_.StatisticsSeries != nil { + jd[k][k_].StatisticsSeries = new(schema.StatsSeries) + copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max) + copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min) + copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median) + copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean) + for k__, v__ := range v_.StatisticsSeries.Percentiles { + jd[k][k_].StatisticsSeries.Percentiles[k__] = v__ + } + } else { + jd[k][k_].StatisticsSeries = v_.StatisticsSeries + } + } + } + return jd +} diff --git a/pkg/archive/json.go b/pkg/archive/json.go index ff2c6d9..1219658 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -9,8 +9,8 @@ import ( "io" "time" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" ) func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go new file mode 100644 index 0000000..2c06b38 --- /dev/null +++ b/pkg/resampler/resampler.go @@ -0,0 +1,113 @@ +package resampler + +import ( + "errors" + "fmt" + "math" + + "github.com/ClusterCockpit/cc-backend/pkg/schema" +) + +func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, error) { + if old_frequency == 0 || new_frequency == 0 { + return nil, errors.New("either old or new frequency is set to 0") + } + + if new_frequency%old_frequency != 0 { + return nil, errors.New("new sampling frequency should be multiple of the old frequency") + } + + var step int = int(new_frequency / old_frequency) + var new_data_length = len(data) / step + + if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { + return data, nil + } + + new_data := make([]schema.Float, new_data_length) + + for i := 0; i < new_data_length; i++ { + new_data[i] = data[i*step] + } + + return new_data, nil +} + +// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf +// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go +func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) { + + if old_frequency == 0 || new_frequency == 0 { + return data, old_frequency, nil + } + + if new_frequency%old_frequency != 0 { + return nil, 0, errors.New(fmt.Sprintf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency)) + } + + var step int = int(new_frequency / old_frequency) + var new_data_length = len(data) / step + + if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { + return data, old_frequency, nil + } + + new_data := make([]schema.Float, 0, new_data_length) + + // Bucket size. Leave room for start and end data points + bucketSize := float64(len(data)-2) / float64(new_data_length-2) + + new_data = append(new_data, data[0]) // Always add the first point + + // We have 3 pointers represent for + // > bucketLow - the current bucket's beginning location + // > bucketMiddle - the current bucket's ending location, + // also the beginning location of next bucket + // > bucketHight - the next bucket's ending location. + bucketLow := 1 + bucketMiddle := int(math.Floor(bucketSize)) + 1 + + var prevMaxAreaPoint int + + for i := 0; i < new_data_length-2; i++ { + + bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1 + if bucketHigh >= len(data)-1 { + bucketHigh = len(data) - 2 + } + + // Calculate point average for next bucket (containing c) + avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle)) + + // Get the range for current bucket + currBucketStart := bucketLow + currBucketEnd := bucketMiddle + + // Point a + pointX := prevMaxAreaPoint + pointY := data[prevMaxAreaPoint] + + maxArea := -1.0 + + var maxAreaPoint int + for ; currBucketStart < currBucketEnd; currBucketStart++ { + + area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart]) + if area > maxArea { + maxArea = area + maxAreaPoint = currBucketStart + } + } + + new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket + prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint + + //move to the next window + bucketLow = bucketMiddle + bucketMiddle = bucketHigh + } + + new_data = append(new_data, data[len(data)-1]) // Always add last + + return new_data, new_frequency, nil +} diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go new file mode 100644 index 0000000..605f638 --- /dev/null +++ b/pkg/resampler/util.go @@ -0,0 +1,25 @@ +package resampler + +import ( + "math" + + "github.com/ClusterCockpit/cc-backend/pkg/schema" +) + +func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 { + area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5 + return math.Abs(float64(area)) +} + +func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) { + + for _, point := range points { + avgX += schema.Float(xStart) + avgY += point + xStart++ + } + l := schema.Float(len(points)) + avgX /= l + avgY /= l + return avgX, avgY +} diff --git a/sample.txt b/sample.txt new file mode 100644 index 0000000..953def6 --- /dev/null +++ b/sample.txt @@ -0,0 +1,12 @@ +HTTP server listening at 127.0.0.1:8080...Key : "demo" +Loading data with res : 600 +Key : "255(completed):[[]],[[]]-600" +Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" +Key : "partitions:fritz" +Key : "partitions:alex" +Key : "metadata:255" +Key : "footprint:255" +Loading data with res : 600 +Key : "255(completed):[[flops_any mem_bw core_power acc_mem_used cpu_load mem_used acc_power cpu_power nv_sm_clock ipc cpu_user clock nv_mem_util nv_temp acc_utilization]],[[node accelerator socket core]]-600" +Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" +Existing key : "var/job-archive/alex/679/951/1675866122/data.json.gz" in cache with value diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 5c5a87a..c551750 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,6 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, + requestPolicy:"network-only" }); if ($metricData && !$metricData.fetching) { From 084f89fa32b55467030f452b227ffd96adaefd35 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 14:46:27 +0200 Subject: [PATCH 096/443] fix: fix svelte source paths in makefile --- Makefile | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 508c9fb..b673e79 100644 --- a/Makefile +++ b/Makefile @@ -22,11 +22,21 @@ SVELTE_COMPONENTS = status \ header SVELTE_TARGETS = $(addprefix $(FRONTEND)/public/build/,$(addsuffix .js, $(SVELTE_COMPONENTS))) -SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \ - $(wildcard $(FRONTEND)/src/*.js) \ - $(wildcard $(FRONTEND)/src/filters/*.svelte) \ - $(wildcard $(FRONTEND)/src/plots/*.svelte) \ - $(wildcard $(FRONTEND)/src/joblist/*.svelte) +SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \ + $(wildcard $(FRONTEND)/src/*.js) \ + $(wildcard $(FRONTEND)/src/analysis/*.svelte) \ + $(wildcard $(FRONTEND)/src/config/*.svelte) \ + $(wildcard $(FRONTEND)/src/config/admin/*.svelte) \ + $(wildcard $(FRONTEND)/src/config/user/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/*.js) \ + $(wildcard $(FRONTEND)/src/generic/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/filters/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/plots/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/joblist/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/helper/*.svelte) \ + $(wildcard $(FRONTEND)/src/generic/select/*.svelte) \ + $(wildcard $(FRONTEND)/src/header/*.svelte) \ + $(wildcard $(FRONTEND)/src/job/*.svelte) .PHONY: clean distclean test tags frontend swagger graphql $(TARGET) From 708eaf4178f6eb739478baeabef146def7cec100 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 17:55:21 +0200 Subject: [PATCH 097/443] fix dev leftovers --- sample.txt | 12 ------------ web/frontend/src/job/Metric.svelte | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) delete mode 100644 sample.txt diff --git a/sample.txt b/sample.txt deleted file mode 100644 index 953def6..0000000 --- a/sample.txt +++ /dev/null @@ -1,12 +0,0 @@ -HTTP server listening at 127.0.0.1:8080...Key : "demo" -Loading data with res : 600 -Key : "255(completed):[[]],[[]]-600" -Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" -Key : "partitions:fritz" -Key : "partitions:alex" -Key : "metadata:255" -Key : "footprint:255" -Loading data with res : 600 -Key : "255(completed):[[flops_any mem_bw core_power acc_mem_used cpu_load mem_used acc_power cpu_power nv_sm_clock ipc cpu_user clock nv_mem_util nv_temp acc_utilization]],[[node accelerator socket core]]-600" -Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" -Existing key : "var/job-archive/alex/679/951/1675866122/data.json.gz" in cache with value diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index c551750..d3fe0d6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,7 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - requestPolicy:"network-only" + // requestPolicy:"network-only" }); if ($metricData && !$metricData.fetching) { From 01845a0cb71fec382df774afb44a41809a9c82ea Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 18:33:18 +0200 Subject: [PATCH 098/443] add comment regarding metric data load --- web/frontend/src/job/Metric.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index d3fe0d6..f5e1851 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,7 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - // requestPolicy:"network-only" + // Never user network-only: causes reactive load-loop! }); if ($metricData && !$metricData.fetching) { From 95fe36964841e538410063d2fe633d6b1283bed1 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:26:56 +0200 Subject: [PATCH 099/443] fix: add additionally loaded scopes to statsTable again --- web/frontend/src/Job.root.svelte | 1 + web/frontend/src/job/Metric.svelte | 11 ++++++++++- web/frontend/src/job/StatsTable.svelte | 7 +++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 5c1d004..57600b7 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -348,6 +348,7 @@ {#if item.data} statsTable.moreLoaded(detail)} job={$initq.data.job} metricName={item.metric} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index f5e1851..e6d7af6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -13,6 +13,9 @@ -->
From adc3502b6b31535fd015defc5f046b3a8d87cb0d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:37:42 +0200 Subject: [PATCH 100/443] cleanup dev logline --- internal/graph/schema.resolvers.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 0eba013..fc3ff42 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -236,7 +236,6 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return nil, err } - log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) data, err := metricdata.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { log.Warn("Error while loading job data") From 9fe7cdca9215220a19930779a60c8afc910276a3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:53:15 +0200 Subject: [PATCH 101/443] fix: fix plot labeling if specific host selected, hide loadall if only node returned --- web/frontend/src/job/Metric.svelte | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index e6d7af6..10a533a 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -47,6 +47,7 @@ let pendingResolution = 600; let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); let patternMatches = false; + let nodeOnly = false; // If, after load-all, still only node scope returned let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); const dispatch = createEventDispatcher(); @@ -126,6 +127,7 @@ // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) + nodeOnly = (selectedScope == "node") // "node" still only scope after load-all } const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node") @@ -146,9 +148,14 @@ $: data = rawData[selectedScopeIndex]; - $: series = data?.series.filter( + $: series = data?.series?.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); + + $: resources = job?.resources?.filter( + (resource) => selectedHost == null || resource.hostname == selectedHost, + ); + @@ -162,7 +169,7 @@ {/if} {/each} - {#if scopes.length == 1 && nativeScope != "node"} + {#if scopes.length == 1 && nativeScope != "node" && !nodeOnly} {/if} @@ -197,7 +204,7 @@ metric={metricName} {series} {isShared} - resources={job.resources} + {resources} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} From d34e0d9348041d6856be6c777b84419bd046da54 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 16:59:45 +0200 Subject: [PATCH 102/443] fix: omit resources prop from metricPlot, use series for legend instead --- web/frontend/src/Node.root.svelte | 1 - web/frontend/src/Systems.root.svelte | 1 - web/frontend/src/generic/joblist/JobListRow.svelte | 1 - web/frontend/src/generic/plots/MetricPlot.svelte | 9 +++------ web/frontend/src/job/Metric.svelte | 6 ------ 5 files changed, 3 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index 2d58540..0553035 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -207,7 +207,6 @@ cluster={clusters.find((c) => c.name == cluster)} subCluster={$nodeMetricsData.data.nodeMetrics[0].subCluster} series={item.metric.series} - resources={[{ hostname: hostname }]} forNode={true} /> {:else if item.disabled === true && item.metric} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index c483401..0d5e70e 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -206,7 +206,6 @@ metric={item.data.name} cluster={clusters.find((c) => c.name == cluster)} subCluster={item.subCluster} - resources={[{ hostname: item.host }]} forNode={true} /> {:else if item.disabled === true && item.data} diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 1d8529e..274e4f0 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -169,7 +169,6 @@ {cluster} subCluster={job.subCluster} isShared={job.exclusive != 1} - resources={job.resources} numhwthreads={job.numHWThreads} numaccs={job.numAcc} /> diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 8dd1dbf..d092413 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -6,7 +6,6 @@ Properties: - `metric String`: The metric name - `scope String?`: Scope of the displayed data [Default: node] - - `resources [GraphQL.Resource]`: List of resources used for parent job - `width Number`: The plot width - `height Number`: The plot height - `timestep Number`: The timestep used for X-axis rendering @@ -16,7 +15,7 @@ - `cluster GraphQL.Cluster`: Cluster Object of the parent job - `subCluster String`: Name of the subCluster of the parent job - `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly [Default: false] - - `forNode Bool?`: If this plot is used for node data display; will render x-axis as negative time with $now as maximum [Default: false] + - `forNode Bool?`: If this plot is used for node data display; will ren[data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)](https://github.com/ClusterCockpit/cc-backend/blob/9fe7cdca9215220a19930779a60c8afc910276a3/internal/graph/schema.resolvers.go#L391-L392)der x-axis as negative time with $now as maximum [Default: false] - `numhwthreads Number?`: Number of job HWThreads [Default: 0] - `numaccs Number?`: Number of job Accelerators [Default: 0] --> @@ -118,7 +117,6 @@ export let metric; export let scope = "node"; - export let resources = []; export let width; export let height; export let timestep; @@ -363,9 +361,8 @@ plotSeries.push({ label: scope === "node" - ? resources[i].hostname - : // scope === 'accelerator' ? resources[0].accelerators[i] : - scope + " #" + (i + 1), + ? series[i].hostname + : scope + " #" + (i + 1), scale: "y", width: lineWidth, stroke: lineColor(i, series.length), diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 10a533a..71cf2e7 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -151,11 +151,6 @@ $: series = data?.series?.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); - - $: resources = job?.resources?.filter( - (resource) => selectedHost == null || resource.hostname == selectedHost, - ); - @@ -204,7 +199,6 @@ metric={metricName} {series} {isShared} - {resources} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} Date: Sun, 25 Aug 2024 16:13:43 +0200 Subject: [PATCH 103/443] Fix for resampler --- internal/metricdata/cc-metric-store.go | 12 ++++++------ pkg/resampler/resampler.go | 14 ++++++++++++-- pkg/resampler/util.go | 14 ++++++++++++-- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 53469f0..4a86352 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -528,14 +528,14 @@ func (ccms *CCMetricStore) LoadStats( ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - metricConfigs := archive.GetCluster(job.Cluster).MetricConfig - resolution := 9000 + // metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + // resolution := 9000 - for _, mc := range metricConfigs { - resolution = min(resolution, mc.Timestep) - } + // for _, mc := range metricConfigs { + // resolution = min(resolution, mc.Timestep) + // } - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, resolution) // #166 Add scope shere for analysis view accelerator normalization? + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go index 2c06b38..26cead0 100644 --- a/pkg/resampler/resampler.go +++ b/pkg/resampler/resampler.go @@ -90,6 +90,7 @@ func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_freq maxArea := -1.0 var maxAreaPoint int + flag_ := 0 for ; currBucketStart < currBucketEnd; currBucketStart++ { area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart]) @@ -97,10 +98,19 @@ func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_freq maxArea = area maxAreaPoint = currBucketStart } + if math.IsNaN(float64(avgPointY)) { + flag_ = 1 + + } } - new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket - prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint + if flag_ == 1 { + new_data = append(new_data, schema.NaN) // Pick this point from the bucket + + } else { + new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket + } + prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint //move to the next window bucketLow = bucketMiddle diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go index 605f638..36d8bed 100644 --- a/pkg/resampler/util.go +++ b/pkg/resampler/util.go @@ -12,14 +12,24 @@ func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 { } func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) { - + flag := 0 for _, point := range points { avgX += schema.Float(xStart) avgY += point xStart++ + if math.IsNaN(float64(point)) { + flag = 1 + } } + l := schema.Float(len(points)) + avgX /= l avgY /= l - return avgX, avgY + + if flag == 1 { + return avgX, schema.NaN + } else { + return avgX, avgY + } } From 55027cb63030ae255dbcefd6aba47b4bef074556 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 26 Aug 2024 09:55:33 +0200 Subject: [PATCH 104/443] fix: add resolution 60 default to ccms nodeData query --- internal/metricdata/cc-metric-store.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 53469f0..eedb601 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -613,8 +613,9 @@ func (ccms *CCMetricStore) LoadNodeData( for _, node := range nodes { for _, metric := range metrics { req.Queries = append(req.Queries, ApiQuery{ - Hostname: node, - Metric: ccms.toRemoteName(metric), + Hostname: node, + Metric: ccms.toRemoteName(metric), + Resolution: 60, // Default for Node Queries }) } } From a59df12595d0a7ae43a5a87d2f252746311fa958 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 26 Aug 2024 17:37:23 +0200 Subject: [PATCH 105/443] init basic proof of concept --- .../src/generic/joblist/JobListRow.svelte | 4 ++++ .../src/generic/plots/MetricPlot.svelte | 20 ++++++++++++++++++- web/frontend/src/job/Metric.svelte | 8 ++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 274e4f0..197b8b3 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -159,6 +159,10 @@ {#if metric.disabled == false && metric.data} { + // filterComponent.updateFilters(detail) + console.log("Upstream New Res:", detail) + }} width={plotWidth} height={plotHeight} timestep={metric.data.metric.timestep} diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index d092413..4fc1b77 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -112,7 +112,7 @@ {#each links as item} - {#if !item.perCluster} + {#if item.listOptions} + + + + {item.title} + + + + All Clusters + + + {#each clusters as cluster} + + + {cluster.name} + + + + Running Jobs + + + + {/each} + + + {:else if !item.perCluster} {item.title} From 54f3a261c5d4af0d313c54d1cae465762b6fe82b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:20:32 +0200 Subject: [PATCH 121/443] Rewrite sqlite indices from scratch for v8 migration --- .../sqlite3/08_add-footprint.up.sql | 52 ++++++++++++++++++- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index bcd6494..c101c6e 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,5 +1,11 @@ -CREATE INDEX IF NOT EXISTS job_by_project ON job (project); -CREATE INDEX IF NOT EXISTS job_list_projects ON job (project, job_state); +DROP INDEX job_stats; +DROP INDEX job_by_user; +DROP INDEX job_by_starttime; +DROP INDEX job_by_job_id; +DROP INDEX job_list; +DROP INDEX job_list_user; +DROP INDEX job_list_users; +DROP INDEX job_list_users_start; ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; @@ -24,3 +30,45 @@ ALTER TABLE job DROP net_bw_avg; ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; + +CREATE INDEX jobs_cluster IF NOT EXISTS ON job (cluster); +CREATE INDEX jobs_cluster_starttime IF NOT EXISTS ON job (cluster, start_time); +CREATE INDEX jobs_cluster_user IF NOT EXISTS ON job (cluster, user); +CREATE INDEX jobs_cluster_project IF NOT EXISTS ON job (cluster, project); +CREATE INDEX jobs_cluster_subcluster IF NOT EXISTS ON job (cluster, subcluster); + +CREATE INDEX jobs_cluster_partition IF NOT EXISTS ON job (cluster, partition); +CREATE INDEX jobs_cluster_partition_starttime IF NOT EXISTS ON job (cluster, partition, start_time); +CREATE INDEX jobs_cluster_partition_jobstate IF NOT EXISTS ON job (cluster, partition, job_state); +CREATE INDEX jobs_cluster_partition_jobstate_user IF NOT EXISTS ON job (cluster, partition, job_state, user); +CREATE INDEX jobs_cluster_partition_jobstate_project IF NOT EXISTS ON job (cluster, partition, job_state, project); +CREATE INDEX jobs_cluster_partition_jobstate_starttime IF NOT EXISTS ON job (cluster, partition, job_state, start_time); + +CREATE INDEX jobs_cluster_jobstate IF NOT EXISTS ON job (cluster, job_state); +CREATE INDEX jobs_cluster_jobstate_starttime IF NOT EXISTS ON job (cluster, job_state, starttime); +CREATE INDEX jobs_cluster_jobstate_user IF NOT EXISTS ON job (cluster, job_state, user); +CREATE INDEX jobs_cluster_jobstate_project IF NOT EXISTS ON job (cluster, job_state, project); + +CREATE INDEX jobs_user IF NOT EXISTS ON job (user); +CREATE INDEX jobs_user_starttime IF NOT EXISTS ON job (user, start_time); + +CREATE INDEX jobs_project IF NOT EXISTS ON job (project); +CREATE INDEX jobs_project_starttime IF NOT EXISTS ON job (project, start_time); +CREATE INDEX jobs_project_user IF NOT EXISTS ON job (project, user); + +CREATE INDEX jobs_jobstate IF NOT EXISTS ON job (job_state); +CREATE INDEX jobs_jobstate_user IF NOT EXISTS ON job (job_state, user); +CREATE INDEX jobs_jobstate_project IF NOT EXISTS ON job (job_state, project); +CREATE INDEX jobs_jobstate_cluster IF NOT EXISTS ON job (job_state, cluster); +CREATE INDEX jobs_jobstate_starttime IF NOT EXISTS ON job (job_state, start_time); + +CREATE INDEX jobs_arrayjobid_starttime IF NOT EXISTS ON job (array_job_id, start_time); +CREATE INDEX jobs_cluster_arrayjobid_starttime IF NOT EXISTS ON job (cluster, array_job_id, start_time); + +CREATE INDEX jobs_starttime IF NOT EXISTS ON job (start_time); +CREATE INDEX jobs_duration IF NOT EXISTS ON job (duration); +CREATE INDEX jobs_numnodes IF NOT EXISTS ON job (num_nodes); +CREATE INDEX jobs_numhwthreads IF NOT EXISTS ON job (num_hwthreads); +CREATE INDEX jobs_numacc IF NOT EXISTS ON job (num_acc); + +PRAGMA optimize; From 7602641909c15576a4a626c3069ec020ac7badf7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:22:34 +0200 Subject: [PATCH 122/443] feat: change to resolution increase on zoom --- .../src/generic/joblist/JobListRow.svelte | 29 ++++++++--- .../src/generic/plots/MetricPlot.svelte | 52 ++++++++++++++----- web/frontend/src/job/Metric.svelte | 50 ++++++++++-------- 3 files changed, 90 insertions(+), 41 deletions(-) diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 197b8b3..5581903 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -32,12 +32,14 @@ ? ["core", "accelerator"] : ["core"] : ["node"]; + let selectedResolution = 600; + let zoomStates = {}; const cluster = getContext("clusters").find((c) => c.name == job.cluster); const client = getContextClient(); const query = gql` - query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { - jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { + query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!, $selectedResolution: Int) { + jobMetrics(id: $id, metrics: $metrics, scopes: $scopes, resolution: $selectedResolution) { name scope metric { @@ -66,17 +68,30 @@ } `; + function handleZoom(detail, metric) { + if ( + (zoomStates[metric]?.x?.min !== detail?.lastZoomState?.x?.min) && + (zoomStates[metric]?.y?.max !== detail?.lastZoomState?.y?.max) + ) { + zoomStates[metric] = {...detail.lastZoomState} + } + + if (detail?.newRes) { // Triggers GQL + selectedResolution = detail.newRes + } + } + $: metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes }, + variables: { id, metrics, scopes, selectedResolution }, }); function refreshMetrics() { metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes }, + variables: { id, metrics, scopes, selectedResolution }, // requestPolicy: 'network-only' // use default cache-first for refresh }); } @@ -159,10 +174,7 @@ {#if metric.disabled == false && metric.data} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail, metric.data.name) }} width={plotWidth} height={plotHeight} timestep={metric.data.metric.timestep} @@ -175,6 +187,7 @@ isShared={job.exclusive != 1} numhwthreads={job.numHWThreads} numaccs={job.numAcc} + zoomState={zoomStates[metric.data.name]} /> {:else if metric.disabled == true && metric.data} { + u.over.addEventListener("dblclick", (e) => { + console.log('Dispatch Reset') + dispatch('zoom', { + lastZoomState: { + x: { time: false }, + y: { auto: true } + } + }); + }); + } + ], draw: [ (u) => { // Draw plot type label: @@ -437,17 +453,26 @@ setScale: [ (u, key) => { if (key === 'x') { - // Start - console.log('setScale X', key); - - // Decide which resolution to request - - // Dispatch request - const res = 1337; - dispatch('zoom-in', { - newres: res, - }); - + const numX = (u.series[0].idxs[1] - u.series[0].idxs[0]) + if (numX <= 20 && timestep !== 60) { // Zoom IN if not at MAX + console.log('Dispatch Zoom') + if (timestep == 600) { + dispatch('zoom', { + newRes: 240, + lastZoomState: u?.scales + }); + } else if (timestep === 240) { + dispatch('zoom', { + newRes: 60, + lastZoomState: u?.scales + }); + } + } else { + console.log('Dispatch Update') + dispatch('zoom', { + lastZoomState: u?.scales + }); + } }; } ] @@ -481,6 +506,10 @@ if (!uplot) { opts.width = width; opts.height = height; + if (zoomState) { + // console.log('Use last state for uPlot init:', metric, scope, zoomState) + opts.scales = {...zoomState} + } uplot = new uPlot(opts, plotData, plotWrapper); } else { uplot.setSize({ width, height }); @@ -489,7 +518,6 @@ function onSizeChange() { if (!uplot) return; - if (timeoutId != null) clearTimeout(timeoutId); timeoutId = setTimeout(() => { diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 41e3046..ceacca5 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -27,7 +27,9 @@ Spinner, Card, } from "@sveltestrap/sveltestrap"; - import { minScope } from "../generic/utils.js"; + import { + minScope, + } from "../generic/utils.js"; import Timeseries from "../generic/plots/MetricPlot.svelte"; export let job; @@ -39,9 +41,8 @@ export let rawData; export let isShared = false; - let selectedHost = null, - plot, - error = null; + let selectedHost = null; + let error = null; let selectedScope = minScope(scopes); let selectedResolution; let pendingResolution = 600; @@ -49,11 +50,12 @@ let patternMatches = false; let nodeOnly = false; // If, after load-all, still only node scope returned let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); + let zoomState = null; + let pendingZoomState = null; const dispatch = createEventDispatcher(); const statsPattern = /(.*)-stat$/; const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); - const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); const subQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!, $selectedResolution: Int) { @@ -86,6 +88,19 @@ } `; + function handleZoom(detail) { + if ( // States have to differ, causes deathloop if just set + (pendingZoomState?.x?.min !== detail?.lastZoomState?.x?.min) && + (pendingZoomState?.y?.max !== detail?.lastZoomState?.y?.max) + ) { + pendingZoomState = {...detail.lastZoomState} + } + + if (detail?.newRes) { // Triggers GQL + pendingResolution = detail.newRes + } + } + let metricData; let selectedScopes = [...scopes] const dbid = job.id; @@ -119,11 +134,15 @@ }); if ($metricData && !$metricData.fetching) { - rawData = $metricData.data.singleUpdate.map((x) => x.metric) scopes = $metricData.data.singleUpdate.map((x) => x.scope) statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) + // Keep Zoomlevel if ResChange By Zoom + if (pendingZoomState) { + zoomState = {...pendingZoomState} + } + // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) @@ -176,11 +195,6 @@ {/each} {/if} - {#key series} {#if $metricData?.fetching == true} @@ -189,11 +203,7 @@ {error.message} {:else if series != null && !patternMatches} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail) }} {width} height={300} cluster={job.cluster} @@ -203,14 +213,11 @@ metric={metricName} {series} {isShared} + {zoomState} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail) }} {width} height={300} cluster={job.cluster} @@ -220,6 +227,7 @@ metric={metricName} {series} {isShared} + {zoomState} statisticsSeries={statsSeries[selectedScopeIndex]} useStatsSeries={!!statsSeries[selectedScopeIndex]} /> From 5eb6f7d307502c20459d480b4d58abd39c36743c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:45:33 +0200 Subject: [PATCH 123/443] fix: user name join not required for normal jobStats --- internal/repository/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ef1cf9e..aa38d29 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -89,7 +89,7 @@ func (r *JobRepository) buildStatsQuery( ).From("job").Join("user ON user.username = job.user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours - query = sq.Select("COUNT(job.id)", "name", + query = sq.Select("COUNT(job.id)", fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType), @@ -97,7 +97,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s)`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s)`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s)`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user") + ).From("job") } for _, f := range filter { From 6443541a79a5a31fc4b2fac2dcc5c0d2764d11b0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 09:34:45 +0200 Subject: [PATCH 124/443] fix SQL migration syntax --- .../sqlite3/08_add-footprint.up.sql | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index c101c6e..59ab747 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -31,44 +31,44 @@ ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; -CREATE INDEX jobs_cluster IF NOT EXISTS ON job (cluster); -CREATE INDEX jobs_cluster_starttime IF NOT EXISTS ON job (cluster, start_time); -CREATE INDEX jobs_cluster_user IF NOT EXISTS ON job (cluster, user); -CREATE INDEX jobs_cluster_project IF NOT EXISTS ON job (cluster, project); -CREATE INDEX jobs_cluster_subcluster IF NOT EXISTS ON job (cluster, subcluster); +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); -CREATE INDEX jobs_cluster_partition IF NOT EXISTS ON job (cluster, partition); -CREATE INDEX jobs_cluster_partition_starttime IF NOT EXISTS ON job (cluster, partition, start_time); -CREATE INDEX jobs_cluster_partition_jobstate IF NOT EXISTS ON job (cluster, partition, job_state); -CREATE INDEX jobs_cluster_partition_jobstate_user IF NOT EXISTS ON job (cluster, partition, job_state, user); -CREATE INDEX jobs_cluster_partition_jobstate_project IF NOT EXISTS ON job (cluster, partition, job_state, project); -CREATE INDEX jobs_cluster_partition_jobstate_starttime IF NOT EXISTS ON job (cluster, partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); -CREATE INDEX jobs_cluster_jobstate IF NOT EXISTS ON job (cluster, job_state); -CREATE INDEX jobs_cluster_jobstate_starttime IF NOT EXISTS ON job (cluster, job_state, starttime); -CREATE INDEX jobs_cluster_jobstate_user IF NOT EXISTS ON job (cluster, job_state, user); -CREATE INDEX jobs_cluster_jobstate_project IF NOT EXISTS ON job (cluster, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, starttime); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); -CREATE INDEX jobs_user IF NOT EXISTS ON job (user); -CREATE INDEX jobs_user_starttime IF NOT EXISTS ON job (user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); -CREATE INDEX jobs_project IF NOT EXISTS ON job (project); -CREATE INDEX jobs_project_starttime IF NOT EXISTS ON job (project, start_time); -CREATE INDEX jobs_project_user IF NOT EXISTS ON job (project, user); +CREATE INDEX IF NOT EXISTS jobs_project ON job (project); +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); -CREATE INDEX jobs_jobstate IF NOT EXISTS ON job (job_state); -CREATE INDEX jobs_jobstate_user IF NOT EXISTS ON job (job_state, user); -CREATE INDEX jobs_jobstate_project IF NOT EXISTS ON job (job_state, project); -CREATE INDEX jobs_jobstate_cluster IF NOT EXISTS ON job (job_state, cluster); -CREATE INDEX jobs_jobstate_starttime IF NOT EXISTS ON job (job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); +CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); -CREATE INDEX jobs_arrayjobid_starttime IF NOT EXISTS ON job (array_job_id, start_time); -CREATE INDEX jobs_cluster_arrayjobid_starttime IF NOT EXISTS ON job (cluster, array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); +CREATE INDEX jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); -CREATE INDEX jobs_starttime IF NOT EXISTS ON job (start_time); -CREATE INDEX jobs_duration IF NOT EXISTS ON job (duration); -CREATE INDEX jobs_numnodes IF NOT EXISTS ON job (num_nodes); -CREATE INDEX jobs_numhwthreads IF NOT EXISTS ON job (num_hwthreads); -CREATE INDEX jobs_numacc IF NOT EXISTS ON job (num_acc); +CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); +CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); +CREATE INDEX IF NOT EXISTS jobs_numhwthreads ON job (num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_numacc ON job (num_acc); PRAGMA optimize; From 275a77807eac22df6db41464bf94d7aa463cdd03 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 09:40:00 +0200 Subject: [PATCH 125/443] fix typo in migration --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 59ab747..4fb5e94 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -45,7 +45,7 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (clust CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, starttime); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); From 39c09f8565fe0f63f808416ad018fea3e6453d13 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Sep 2024 10:03:38 +0200 Subject: [PATCH 126/443] Introduce job duration update task --- internal/repository/job.go | 17 +++++++++++- internal/taskManager/taskManager.go | 1 + internal/taskManager/updateDurationService.go | 26 +++++++++++++++++++ ...ntService.go => updateFootprintService.go} | 0 4 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 internal/taskManager/updateDurationService.go rename internal/taskManager/{footprintService.go => updateFootprintService.go} (100%) diff --git a/internal/repository/job.go b/internal/repository/job.go index 7cfe4fd..01dc0af 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -205,7 +205,10 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er return err } - if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil { + if _, err = sq.Update("job"). + Set("meta_data", job.RawMetaData). + Where("job.id = ?", job.ID). + RunWith(r.stmtCache).Exec(); err != nil { log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID) return err } @@ -480,6 +483,18 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) { return jobs, nil } +func (r *JobRepository) UpdateDuration() error { + if _, err := sq.Update("job"). + Set("duration", sq.Expr("? - job.start_time", time.Now().Unix())). + Where("job_state = running"). + RunWith(r.stmtCache).Exec(); err != nil { + log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID) + return err + } + + return nil +} + func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64) ([]*schema.Job, error) { var query sq.SelectBuilder diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index b31a1a1..101fc4a 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -80,6 +80,7 @@ func Start() { } RegisterFootprintWorker() + RegisterUpdateDurationWorker() s.Start() } diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go new file mode 100644 index 0000000..afc1045 --- /dev/null +++ b/internal/taskManager/updateDurationService.go @@ -0,0 +1,26 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package taskManager + +import ( + "time" + + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/go-co-op/gocron/v2" +) + +func RegisterUpdateDurationWorker() { + log.Info("Register duration update service") + + d, _ := time.ParseDuration("5m") + s.NewJob(gocron.DurationJob(d), + gocron.NewTask( + func() { + start := time.Now() + log.Printf("Update duration started at %s", start.Format(time.RFC3339)) + jobRepo.UpdateDuration() + log.Print("Update duration is done and took %s", time.Since(start)) + })) +} diff --git a/internal/taskManager/footprintService.go b/internal/taskManager/updateFootprintService.go similarity index 100% rename from internal/taskManager/footprintService.go rename to internal/taskManager/updateFootprintService.go From 4b1b34d8a7efc5fbd8570c28fe7f8828ee370e82 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 13:10:44 +0200 Subject: [PATCH 127/443] remove logging, remove forced change to node scope --- web/frontend/src/generic/plots/MetricPlot.svelte | 6 +++--- web/frontend/src/job/Metric.svelte | 7 ------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 05ebc52..a73f993 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -399,7 +399,7 @@ init: [ (u) => { u.over.addEventListener("dblclick", (e) => { - console.log('Dispatch Reset') + // console.log('Dispatch Reset') dispatch('zoom', { lastZoomState: { x: { time: false }, @@ -455,7 +455,7 @@ if (key === 'x') { const numX = (u.series[0].idxs[1] - u.series[0].idxs[0]) if (numX <= 20 && timestep !== 60) { // Zoom IN if not at MAX - console.log('Dispatch Zoom') + // console.log('Dispatch Zoom') if (timestep == 600) { dispatch('zoom', { newRes: 240, @@ -468,7 +468,7 @@ }); } } else { - console.log('Dispatch Update') + // console.log('Dispatch Update') dispatch('zoom', { lastZoomState: u?.scales }); diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index ceacca5..cb41f2b 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -117,13 +117,6 @@ selectedScopes = [...scopes, "socket", "core", "accelerator"] } - if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { - selectedScope = String("node") - selectedScopes = ["node"] - // Instead of adding acc to load-all: always add by default if native is acc - // selectedScopes = nativeScope == "accelerator" ? ["node", "accelerator"] : ["node"] - } - selectedResolution = Number(pendingResolution) metricData = queryStore({ From 6568b6d72355974eacec1d43e638c9bd6f53722a Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Sep 2024 13:40:11 +0200 Subject: [PATCH 128/443] Prepare transaction API for general usage --- internal/importer/initDB.go | 2 +- internal/repository/transaction.go | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index fe49e94..7e9fed5 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -27,7 +27,7 @@ func InitDB() error { starttime := time.Now() log.Print("Building job table...") - t, err := r.TransactionInit() + t, err := r.TransactionInit(repository.NamedJobInsert) if err != nil { log.Warn("Error while initializing SQL transactions") return err diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go index 9398354..992b423 100644 --- a/internal/repository/transaction.go +++ b/internal/repository/transaction.go @@ -15,20 +15,19 @@ type Transaction struct { stmt *sqlx.NamedStmt } -func (r *JobRepository) TransactionInit() (*Transaction, error) { +func (r *JobRepository) TransactionInit(sqlStmt string) (*Transaction, error) { var err error t := new(Transaction) - // Inserts are bundled into transactions because in sqlite, - // that speeds up inserts A LOT. + t.tx, err = r.DB.Beginx() if err != nil { log.Warn("Error while bundling transactions") return nil, err } - t.stmt, err = t.tx.PrepareNamed(NamedJobInsert) + t.stmt, err = t.tx.PrepareNamed(sqlStmt) if err != nil { - log.Warn("Error while preparing namedJobInsert") + log.Warn("Error while preparing SQL statement in transaction") return nil, err } @@ -63,8 +62,8 @@ func (r *JobRepository) TransactionEnd(t *Transaction) error { return nil } -func (r *JobRepository) TransactionAdd(t *Transaction, job schema.Job) (int64, error) { - res, err := t.stmt.Exec(job) +func (r *JobRepository) TransactionAdd(t *Transaction, obj interface{}) (int64, error) { + res, err := t.stmt.Exec(obj) if err != nil { log.Errorf("repository initDB(): %v", err) return 0, err From f58efa28711a9df1460c110bb78bb3d90ea9cfb9 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Sep 2024 13:41:00 +0200 Subject: [PATCH 129/443] Allow to combine job update queries --- internal/archiver/archiveWorker.go | 12 ++-- internal/repository/job.go | 60 ++++++++----------- internal/taskManager/updateDurationService.go | 2 +- .../taskManager/updateFootprintService.go | 16 +++-- 4 files changed, 46 insertions(+), 44 deletions(-) diff --git a/internal/archiver/archiveWorker.go b/internal/archiver/archiveWorker.go index 4de5032..628e36e 100644 --- a/internal/archiver/archiveWorker.go +++ b/internal/archiver/archiveWorker.go @@ -12,6 +12,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" + sq "github.com/Masterminds/squirrel" ) var ( @@ -53,17 +54,20 @@ func archivingWorker() { continue } - if err := jobRepo.UpdateFootprint(jobMeta); err != nil { + stmt := sq.Update("job").Where("job.id = ?", job.ID) + + if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil { log.Errorf("archiving job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) continue } - if err := jobRepo.UpdateEnergy(jobMeta); err != nil { + if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil { log.Errorf("archiving job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) continue } // Update the jobs database entry one last time: - if err := jobRepo.MarkArchived(jobMeta, schema.MonitoringStatusArchivingSuccessful); err != nil { - log.Errorf("archiving job (dbid: %d) failed at marking archived step: %s", job.ID, err.Error()) + stmt = jobRepo.MarkArchived(stmt, schema.MonitoringStatusArchivingSuccessful) + if err := jobRepo.Execute(stmt); err != nil { + log.Errorf("archiving job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) continue } log.Debugf("archiving job %d took %s", job.JobID, time.Since(start)) diff --git a/internal/repository/job.go b/internal/repository/job.go index 01dc0af..e5e2569 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -488,7 +488,6 @@ func (r *JobRepository) UpdateDuration() error { Set("duration", sq.Expr("? - job.start_time", time.Now().Unix())). Where("job_state = running"). RunWith(r.stmtCache).Exec(); err != nil { - log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID) return err } @@ -542,27 +541,29 @@ func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32 return } -// FIXME: Combine the next three queries into one providing the db statement as function argument! -func (r *JobRepository) MarkArchived( - jobMeta *schema.JobMeta, - monitoringStatus int32, -) error { - stmt := sq.Update("job"). - Set("monitoring_status", monitoringStatus). - Where("job.id = ?", jobMeta.JobID) - +func (r *JobRepository) Execute(stmt sq.UpdateBuilder) error { if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { - log.Warn("Error while marking job as archived") return err } + return nil } -func (r *JobRepository) UpdateEnergy(jobMeta *schema.JobMeta) error { +func (r *JobRepository) MarkArchived( + stmt sq.UpdateBuilder, + monitoringStatus int32, +) sq.UpdateBuilder { + return stmt.Set("monitoring_status", monitoringStatus) +} + +func (r *JobRepository) UpdateEnergy( + stmt sq.UpdateBuilder, + jobMeta *schema.JobMeta, +) (sq.UpdateBuilder, error) { sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) if err != nil { log.Errorf("cannot get subcluster: %s", err.Error()) - return err + return stmt, err } energyFootprint := make(map[string]float64) var totalEnergy float64 @@ -586,26 +587,23 @@ func (r *JobRepository) UpdateEnergy(jobMeta *schema.JobMeta) error { if rawFootprint, err = json.Marshal(energyFootprint); err != nil { log.Warnf("Error while marshaling energy footprint for job, DB ID '%v'", jobMeta.ID) - return err + return stmt, err } - stmt := sq.Update("job"). - Set("energy_footprint", rawFootprint). - Set("energy", totalEnergy). - Where("job.id = ?", jobMeta.JobID) + stmt.Set("energy_footprint", rawFootprint). + Set("energy", totalEnergy) - if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { - log.Warn("Error while updating job energy footprint") - return err - } - return nil + return stmt, nil } -func (r *JobRepository) UpdateFootprint(jobMeta *schema.JobMeta) error { +func (r *JobRepository) UpdateFootprint( + stmt sq.UpdateBuilder, + jobMeta *schema.JobMeta, +) (sq.UpdateBuilder, error) { sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster) if err != nil { log.Errorf("cannot get subcluster: %s", err.Error()) - return err + return stmt, err } footprint := make(map[string]float64) @@ -624,15 +622,9 @@ func (r *JobRepository) UpdateFootprint(jobMeta *schema.JobMeta) error { if rawFootprint, err = json.Marshal(footprint); err != nil { log.Warnf("Error while marshaling footprint for job, DB ID '%v'", jobMeta.ID) - return err + return stmt, err } - stmt := sq.Update("job").Set("footprint", rawFootprint). - Where("job.id = ?", jobMeta.JobID) - - if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil { - log.Warn("Error while updating job footprint") - return err - } - return nil + stmt.Set("footprint", rawFootprint) + return stmt, nil } diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go index afc1045..6023547 100644 --- a/internal/taskManager/updateDurationService.go +++ b/internal/taskManager/updateDurationService.go @@ -21,6 +21,6 @@ func RegisterUpdateDurationWorker() { start := time.Now() log.Printf("Update duration started at %s", start.Format(time.RFC3339)) jobRepo.UpdateDuration() - log.Print("Update duration is done and took %s", time.Since(start)) + log.Printf("Update duration is done and took %s", time.Since(start)) })) } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index ff76e25..510c73e 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -13,6 +13,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" + sq "github.com/Masterminds/squirrel" "github.com/go-co-op/gocron/v2" ) @@ -22,8 +23,8 @@ func RegisterFootprintWorker() { s.NewJob(gocron.DurationJob(d), gocron.NewTask( func() { - t := time.Now() - log.Printf("Update Footprints started at %s", t.Format(time.RFC3339)) + s := time.Now() + log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) for _, cluster := range archive.Clusters { jobs, err := jobRepo.FindRunningJobs(cluster.Name) if err != nil { @@ -77,16 +78,21 @@ func RegisterFootprintWorker() { } } - if err := jobRepo.UpdateFootprint(jobMeta); err != nil { + stmt := sq.Update("job").Where("job.id = ?", job.ID) + if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil { log.Errorf("Update job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) continue } - if err := jobRepo.UpdateEnergy(jobMeta); err != nil { + if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil { log.Errorf("Update job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) continue } + if err := jobRepo.Execute(stmt); err != nil { + log.Errorf("Update job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + continue + } } } - log.Print("Update Footprints done") + log.Printf("Update Footprints is done and took %s", time.Since(s)) })) } From 193bee5ac8f4718bc42de4efe63d68a7eb5a9842 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 14:16:16 +0200 Subject: [PATCH 130/443] fix: prevent addition of existing scopes to table --- web/frontend/src/job/StatsTable.svelte | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index e6a03e3..aa5aa5f 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -25,8 +25,8 @@ export let job; export let jobMetrics; - const allMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort(), - scopesForMetric = (metric) => + const allMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort() + const scopesForMetric = (metric) => jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope); let hosts = job.resources.map((r) => r.hostname).sort(), @@ -87,8 +87,12 @@ } export function moreLoaded(moreJobMetrics) { - jobMetrics = [...jobMetrics, ...moreJobMetrics] - } + moreJobMetrics.forEach(function (newMetric) { + if (!jobMetrics.some((m) => m.scope == newMetric.scope)) { + jobMetrics = [...jobMetrics, newMetric] + } + }); + };
From e267481f7191a4974c99403fb5e80c7520fe319b Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Sep 2024 15:40:02 +0200 Subject: [PATCH 131/443] Cleanup transaction api --- internal/importer/initDB.go | 18 +++++++++--- internal/repository/transaction.go | 45 ++++++++++-------------------- 2 files changed, 28 insertions(+), 35 deletions(-) diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 7e9fed5..5f06f36 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -16,6 +16,11 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/schema" ) +const ( + addTagQuery = "INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)" + setTagQuery = "INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)" +) + // Delete the tables "job", "tag" and "jobtag" from the database and // repopulate them using the jobs found in `archive`. func InitDB() error { @@ -27,7 +32,7 @@ func InitDB() error { starttime := time.Now() log.Print("Building job table...") - t, err := r.TransactionInit(repository.NamedJobInsert) + t, err := r.TransactionInit() if err != nil { log.Warn("Error while initializing SQL transactions") return err @@ -105,7 +110,8 @@ func InitDB() error { continue } - id, err := r.TransactionAdd(t, job) + id, err := r.TransactionAddNamed(t, + repository.NamedJobInsert, job) if err != nil { log.Errorf("repository initDB(): %v", err) errorOccured++ @@ -116,7 +122,9 @@ func InitDB() error { tagstr := tag.Name + ":" + tag.Type tagId, ok := tags[tagstr] if !ok { - tagId, err = r.TransactionAddTag(t, tag) + tagId, err = r.TransactionAdd(t, + addTagQuery, + tag.Name, tag.Type) if err != nil { log.Errorf("Error adding tag: %v", err) errorOccured++ @@ -125,7 +133,9 @@ func InitDB() error { tags[tagstr] = tagId } - r.TransactionSetTag(t, id, tagId) + r.TransactionAdd(t, + setTagQuery, + id, tagId) } if err == nil { diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go index 992b423..8c5d357 100644 --- a/internal/repository/transaction.go +++ b/internal/repository/transaction.go @@ -6,7 +6,6 @@ package repository import ( "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/jmoiron/sqlx" ) @@ -15,7 +14,7 @@ type Transaction struct { stmt *sqlx.NamedStmt } -func (r *JobRepository) TransactionInit(sqlStmt string) (*Transaction, error) { +func (r *JobRepository) TransactionInit() (*Transaction, error) { var err error t := new(Transaction) @@ -24,13 +23,6 @@ func (r *JobRepository) TransactionInit(sqlStmt string) (*Transaction, error) { log.Warn("Error while bundling transactions") return nil, err } - - t.stmt, err = t.tx.PrepareNamed(sqlStmt) - if err != nil { - log.Warn("Error while preparing SQL statement in transaction") - return nil, err - } - return t, nil } @@ -49,7 +41,6 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error { return err } - t.stmt = t.tx.NamedStmt(t.stmt) return nil } @@ -62,10 +53,14 @@ func (r *JobRepository) TransactionEnd(t *Transaction) error { return nil } -func (r *JobRepository) TransactionAdd(t *Transaction, obj interface{}) (int64, error) { - res, err := t.stmt.Exec(obj) +func (r *JobRepository) TransactionAddNamed( + t *Transaction, + query string, + args ...interface{}, +) (int64, error) { + res, err := t.tx.NamedExec(query, args) if err != nil { - log.Errorf("repository initDB(): %v", err) + log.Errorf("Named Exec failed: %v", err) return 0, err } @@ -78,26 +73,14 @@ func (r *JobRepository) TransactionAdd(t *Transaction, obj interface{}) (int64, return id, nil } -func (r *JobRepository) TransactionAddTag(t *Transaction, tag *schema.Tag) (int64, error) { - res, err := t.tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type) +func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) { + res := t.tx.MustExec(query, args) + + id, err := res.LastInsertId() if err != nil { - log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type) - return 0, err - } - tagId, err := res.LastInsertId() - if err != nil { - log.Warn("Error while getting last insert ID") + log.Errorf("repository initDB(): %v", err) return 0, err } - return tagId, nil -} - -func (r *JobRepository) TransactionSetTag(t *Transaction, jobId int64, tagId int64) error { - if _, err := t.tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, jobId, tagId); err != nil { - log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", jobId, tagId) - return err - } - - return nil + return id, nil } From 508978d586a9e9f98e6c1080160b520fc2bad441 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Sep 2024 15:59:01 +0200 Subject: [PATCH 132/443] Initial attempt to update footprints in transaction --- internal/archiver/archiver.go | 3 ++- .../taskManager/updateFootprintService.go | 20 +++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index abaecd6..de84cf0 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -24,7 +24,8 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { } scopes := []schema.MetricScope{schema.MetricScopeNode} - if job.NumNodes <= 8 { // FIXME: Add a config option for this + // FIXME: Add a config option for this + if job.NumNodes <= 8 { // This will add the native scope if core scope is not available scopes = append(scopes, schema.MetricScopeCore) } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 510c73e..2fdd6b9 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -25,6 +25,12 @@ func RegisterFootprintWorker() { func() { s := time.Now() log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) + + t, err := jobRepo.TransactionInit() + if err != nil { + log.Errorf("Failed TransactionInit %v", err) + } + for _, cluster := range archive.Clusters { jobs, err := jobRepo.FindRunningJobs(cluster.Name) if err != nil { @@ -87,12 +93,22 @@ func RegisterFootprintWorker() { log.Errorf("Update job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) continue } - if err := jobRepo.Execute(stmt); err != nil { - log.Errorf("Update job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + + query, args, err := stmt.ToSql() + if err != nil { + log.Errorf("Failed in ToSQL conversion %v", err) continue } + jobRepo.TransactionAdd(t, query, args) + // if err := jobRepo.Execute(stmt); err != nil { + // log.Errorf("Update job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + // continue + // } } + + jobRepo.TransactionCommit(t) } + jobRepo.TransactionEnd(t) log.Printf("Update Footprints is done and took %s", time.Since(s)) })) } From 398e3c1b91f2e96ac4a22cbaa9f306973449f6e1 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Sep 2024 10:23:23 +0200 Subject: [PATCH 133/443] feat: split concurrent jobs list to own scrollable component --- web/frontend/src/Job.root.svelte | 40 +--------- .../src/generic/helper/ConcurrentJobs.svelte | 79 +++++++++++++++++++ 2 files changed, 83 insertions(+), 36 deletions(-) create mode 100644 web/frontend/src/generic/helper/ConcurrentJobs.svelte diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 213898e..ab0bfee 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -38,6 +38,7 @@ import TagManagement from "./job/TagManagement.svelte"; import StatsTable from "./job/StatsTable.svelte"; import JobFootprint from "./generic/helper/JobFootprint.svelte"; + import ConcurrentJobs from "./generic/helper/ConcurrentJobs.svelte"; import PlotTable from "./generic/PlotTable.svelte"; import Polar from "./generic/plots/Polar.svelte"; import Roofline from "./generic/plots/Roofline.svelte"; @@ -250,42 +251,9 @@ {/if} {#if $initq?.data && $jobMetrics?.data?.jobMetrics} {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} - {#if authlevel > roles.manager} - -
- Concurrent Jobs -
-
    -
  • - See All -
  • - {#each $initq.data.job.concurrentJobs.items as pjob, index} -
  • - {pjob.jobId} -
  • - {/each} -
- - {:else} - -
- {$initq.data.job.concurrentJobs.items.length} Concurrent Jobs -
-

- Number of shared jobs on the same node with overlapping runtimes. -

- - {/if} + + roles.manager)}/> + {/if} + + + + + {#if displayTitle} + + + {cJobs.items.length} Concurrent Jobs + + + + {/if} + + {#if showLinks} + + {:else} + {#if displayTitle} +

+ Jobs running on the same node with overlapping runtimes using shared resources. +

+ {:else} +

+ {cJobs.items.length} + Jobs running on the same node with overlapping runtimes using shared resources. +

+ {/if} + {/if} +
+
+ + From 53ca38ce530861e501965d011828843cf4b0c1db Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Sep 2024 11:18:00 +0200 Subject: [PATCH 134/443] Add debug output to duration query --- internal/repository/job.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index e5e2569..1cb4c62 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -484,10 +484,14 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) { } func (r *JobRepository) UpdateDuration() error { - if _, err := sq.Update("job"). + stmnt := sq.Update("job"). Set("duration", sq.Expr("? - job.start_time", time.Now().Unix())). - Where("job_state = running"). - RunWith(r.stmtCache).Exec(); err != nil { + Where("job_state = running") + sql, _, err := stmnt.ToSql() + log.Infof("Duration Update query %s", sql) + + _, err = stmnt.RunWith(r.stmtCache).Exec() + if err != nil { return err } From 5e65e21f0bb9387523f46887de607319e0df831a Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Sep 2024 12:38:39 +0200 Subject: [PATCH 135/443] Add quotes in duration query --- internal/repository/job.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index 1cb4c62..c411ab2 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -486,7 +486,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) { func (r *JobRepository) UpdateDuration() error { stmnt := sq.Update("job"). Set("duration", sq.Expr("? - job.start_time", time.Now().Unix())). - Where("job_state = running") + Where("job_state = 'running'") sql, _, err := stmnt.ToSql() log.Infof("Duration Update query %s", sql) From 7c33dcf630f13c376f56ec5f72f83db9887c5274 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Sep 2024 14:58:08 +0200 Subject: [PATCH 136/443] Bugfix in footprint update --- internal/repository/job.go | 4 +--- internal/taskManager/updateFootprintService.go | 7 +++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index c411ab2..9bad866 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -487,10 +487,8 @@ func (r *JobRepository) UpdateDuration() error { stmnt := sq.Update("job"). Set("duration", sq.Expr("? - job.start_time", time.Now().Unix())). Where("job_state = 'running'") - sql, _, err := stmnt.ToSql() - log.Infof("Duration Update query %s", sql) - _, err = stmnt.RunWith(r.stmtCache).Exec() + _, err := stmnt.RunWith(r.stmtCache).Exec() if err != nil { return err } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2fdd6b9..2434fd1 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -85,11 +85,14 @@ func RegisterFootprintWorker() { } stmt := sq.Update("job").Where("job.id = ?", job.ID) - if stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta); err != nil { + stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta) + if err != nil { log.Errorf("Update job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) continue } - if stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta); err != nil { + + stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta) + if err != nil { log.Errorf("Update job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) continue } From b04bf6a9517f3f7d2b326a2e13e4a798033bb8fb Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 5 Sep 2024 15:00:43 +0200 Subject: [PATCH 137/443] fix missing condition in migration --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 4fb5e94..7f0d578 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -63,7 +63,7 @@ CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); -CREATE INDEX jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); From 7ea4086807cc13b08422e303e9d962fdc5fec2ff Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 5 Sep 2024 15:06:38 +0200 Subject: [PATCH 138/443] Rework sqlite indices in v8 migration --- .../sqlite3/08_add-footprint.up.sql | 54 +++++++++++++++++-- 1 file changed, 49 insertions(+), 5 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 4895258..7f0d578 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,7 +1,11 @@ -CREATE INDEX IF NOT EXISTS jobs_cluster_orderby_starttime ON job (cluster, start_time DESC); -CREATE INDEX IF NOT EXISTS jobs_cluster_count ON job (cluster, job_state, start_time); -CREATE INDEX IF NOT EXISTS jobs_project_orderby_starttime ON job (project, start_time DESC); -CREATE INDEX IF NOT EXISTS jobs_project_count ON job (project, job_state, start_time); +DROP INDEX job_stats; +DROP INDEX job_by_user; +DROP INDEX job_by_starttime; +DROP INDEX job_by_job_id; +DROP INDEX job_list; +DROP INDEX job_list_user; +DROP INDEX job_list_users; +DROP INDEX job_list_users_start; ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; @@ -27,4 +31,44 @@ ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; -PRAGMA optimize; \ No newline at end of file +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); + +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); + +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); + +CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); + +CREATE INDEX IF NOT EXISTS jobs_project ON job (project); +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); + +CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); +CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); + +CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); + +CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); +CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); +CREATE INDEX IF NOT EXISTS jobs_numhwthreads ON job (num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_numacc ON job (num_acc); + +PRAGMA optimize; From df484dc816e66bae078f74f457c8c7cb5992b8a2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 5 Sep 2024 16:44:03 +0200 Subject: [PATCH 139/443] rework job view header, change footprint to summary component --- web/frontend/src/Job.root.svelte | 264 ++++++++------ .../src/generic/helper/ConcurrentJobs.svelte | 94 +++-- .../src/generic/joblist/JobInfo.svelte | 2 +- web/frontend/src/generic/plots/Polar.svelte | 91 +++-- .../src/generic/plots/Roofline.svelte | 8 +- web/frontend/src/job/JobSummary.svelte | 340 ++++++++++++++++++ web/frontend/src/job/StatsTable.svelte | 4 +- 7 files changed, 626 insertions(+), 177 deletions(-) create mode 100644 web/frontend/src/job/JobSummary.svelte diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index ab0bfee..8ea259a 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -37,10 +37,9 @@ import Metric from "./job/Metric.svelte"; import TagManagement from "./job/TagManagement.svelte"; import StatsTable from "./job/StatsTable.svelte"; - import JobFootprint from "./generic/helper/JobFootprint.svelte"; + import JobSummary from "./job/JobSummary.svelte"; import ConcurrentJobs from "./generic/helper/ConcurrentJobs.svelte"; import PlotTable from "./generic/PlotTable.svelte"; - import Polar from "./generic/plots/Polar.svelte"; import Roofline from "./generic/plots/Roofline.svelte"; import JobInfo from "./generic/joblist/JobInfo.svelte"; import MetricSelection from "./generic/select/MetricSelection.svelte"; @@ -232,62 +231,96 @@ })); - -
+ + + {#if $initq.error} {$initq.error.message} {:else if $initq.data} - + + + + + + + + {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} + + + {$initq.data.job.concurrentJobs.items.length} Concurrent Jobs + + + roles.manager)}/> + + + {/if} + {#if $initq.data?.job?.metaData?.message} + + +

This note was added by administrators:

+
+

{@html $initq.data.job.metaData.message}

+
+
+ {/if} +
+
{:else} {/if} - {#if $initq.data && showFootprint} -
- - - {/if} - {#if $initq?.data && $jobMetrics?.data?.jobMetrics} - {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} - - roles.manager)}/> - - {/if} - - - - - c.name == $initq.data.job.cluster) - .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} - data={transformDataForRoofline( - $jobMetrics.data.jobMetrics.find( - (m) => m.name == "flops_any" && m.scope == "node", - )?.metric, - $jobMetrics.data.jobMetrics.find( - (m) => m.name == "mem_bw" && m.scope == "node", - )?.metric, - )} - /> + + + {#if showFootprint} + + {#if $initq.error} + {$initq.error.message} + {:else if $initq?.data && $jobMetrics?.data} + + {:else} + + {/if} {:else} - - - + {/if} + + + + + + + {#if $initq.error || $jobMetrics.error} + +

Initq Error: {$initq.error?.message}

+

jobMetrics Error: {$jobMetrics.error?.message}

+
+ {:else if $initq?.data && $jobMetrics?.data} + + c.name == $initq.data.job.cluster) + .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} + data={transformDataForRoofline( + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "flops_any" && m.scope == "node", + )?.metric, + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "mem_bw" && m.scope == "node", + )?.metric, + )} + /> + + {:else} + + {/if} + - + +
+ +
{#if $initq.data} @@ -344,76 +377,81 @@ {/if} - + +
+ +
{#if $initq.data} - - {#if somethingMissing} - -
- - - Missing Metrics/Resources - - - {#if missingMetrics.length > 0} -

- No data at all is available for the metrics: {missingMetrics.join( - ", ", - )} -

- {/if} - {#if missingHosts.length > 0} -

Some metrics are missing for the following hosts:

-
    - {#each missingHosts as missing} -
  • - {missing.hostname}: {missing.metrics.join(", ")} -
  • - {/each} -
- {/if} -
-
+ + + {#if somethingMissing} + +
+ + + Missing Metrics/Resources + + + {#if missingMetrics.length > 0} +

+ No data at all is available for the metrics: {missingMetrics.join( + ", ", + )} +

+ {/if} + {#if missingHosts.length > 0} +

Some metrics are missing for the following hosts:

+
    + {#each missingHosts as missing} +
  • + {missing.hostname}: {missing.metrics.join(", ")} +
  • + {/each} +
+ {/if} +
+
+
+
+ {/if} + + {#if $jobMetrics?.data?.jobMetrics} + {#key $jobMetrics.data.jobMetrics} + + {/key} + {/if} + + +
+ {#if $initq.data.job.metaData?.jobScript} +
{$initq.data.job.metaData?.jobScript}
+ {:else} + No job script available + {/if}
- {/if} - - {#if $jobMetrics?.data?.jobMetrics} - {#key $jobMetrics.data.jobMetrics} - - {/key} - {/if} - - -
- {#if $initq.data.job.metaData?.jobScript} -
{$initq.data.job.metaData?.jobScript}
- {:else} - No job script available - {/if} -
-
- -
- {#if $initq.data.job.metaData?.slurmInfo} -
{$initq.data.job.metaData?.slurmInfo}
- {:else} - No additional slurm information available - {/if} -
-
-
+ +
+ {#if $initq.data.job.metaData?.slurmInfo} +
{$initq.data.job.metaData?.slurmInfo}
+ {:else} + No additional slurm information available + {/if} +
+
+ +
{/if} diff --git a/web/frontend/src/generic/helper/ConcurrentJobs.svelte b/web/frontend/src/generic/helper/ConcurrentJobs.svelte index 79e1886..c0de0b6 100644 --- a/web/frontend/src/generic/helper/ConcurrentJobs.svelte +++ b/web/frontend/src/generic/helper/ConcurrentJobs.svelte @@ -13,62 +13,86 @@ import { Card, CardHeader, - CardTitle, CardBody, Icon } from "@sveltestrap/sveltestrap"; export let cJobs; export let showLinks = false; - export let displayTitle = true; + export let renderCard = false; export let width = "auto"; - export let height = "310px"; + export let height = "400px"; - - {#if displayTitle} - - +{#if renderCard} + + {cJobs.items.length} Concurrent Jobs - - {/if} - - {#if showLinks} -
    -
  • - See All -
  • - {#each cJobs.items as cJob} + + {#if showLinks} + - {:else} - {#if displayTitle} -

    - Jobs running on the same node with overlapping runtimes using shared resources. -

    + {#each cJobs.items as cJob} +
  • + {cJob.jobId} +
  • + {/each} +
{:else} -

- {cJobs.items.length} - Jobs running on the same node with overlapping runtimes using shared resources. -

+
    + {#each cJobs.items as cJob} +
  • + {cJob.jobId} +
  • + {/each} +
{/if} - {/if} -
-
+ + +{:else} +

+ Jobs running on the same node with overlapping runtimes using shared resources. +

+
+ {#if showLinks} + + {:else} +
    + {#each cJobs.items as cJob} +
  • + {cJob.jobId} +
  • + {/each} +
+ {/if} +{/if} diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index 88777f6..606d05b 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -84,7 +84,7 @@ } -
+
@@ -146,8 +146,6 @@
-
- Date: Thu, 5 Sep 2024 17:27:18 +0200 Subject: [PATCH 140/443] Manual merge changes not staged last time ... --- internal/archiver/archiver.go | 2 +- internal/metricDataDispatcher/dataLoader.go | 33 ++++++++++++++++--- .../taskManager/updateFootprintService.go | 2 +- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index de84cf0..1c4a3ec 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -34,7 +34,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { scopes = append(scopes, schema.MetricScopeAccelerator) } - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx) + jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { log.Error("Error wile loading job data for archiving") return nil, err diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index 2c7cfa6..121fbf4 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -14,6 +14,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" + "github.com/ClusterCockpit/cc-backend/pkg/resampler" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -23,11 +24,12 @@ func cacheKey( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) string { // Duration and StartTime do not need to be in the cache key as StartTime is less unique than // job.ID and the TTL of the cache entry makes sure it does not stay there forever. - return fmt.Sprintf("%d(%s):[%v],[%v]", - job.ID, job.State, metrics, scopes) + return fmt.Sprintf("%d(%s):[%v],[%v]-%d", + job.ID, job.State, metrics, scopes, resolution) } // Fetches the metric data for a job. @@ -35,8 +37,9 @@ func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - data := cache.Get(cacheKey(job, metrics, scopes), func() (_ interface{}, ttl time.Duration, size int) { + data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ interface{}, ttl time.Duration, size int) { var jd schema.JobData var err error @@ -60,7 +63,7 @@ func LoadData(job *schema.Job, } } - jd, err = repo.LoadData(job, metrics, scopes, ctx) + jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) if err != nil { if len(jd) != 0 { log.Warnf("partial error: %s", err.Error()) @@ -72,12 +75,31 @@ func LoadData(job *schema.Job, } size = jd.Size() } else { - jd, err = archive.GetHandle().LoadJobData(job) + var jd_temp schema.JobData + jd_temp, err = archive.GetHandle().LoadJobData(job) if err != nil { log.Error("Error while loading job data from archive") return err, 0, 0 } + //Deep copy the cached archive hashmap + jd = metricdata.DeepCopy(jd_temp) + + //Resampling for archived data. + //Pass the resolution from frontend here. + for _, v := range jd { + for _, v_ := range v { + timestep := 0 + for i := 0; i < len(v_.Series); i += 1 { + v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, v_.Timestep, resolution) + if err != nil { + return err, 0, 0 + } + } + v_.Timestep = timestep + } + } + // Avoid sending unrequested data to the client: if metrics != nil || scopes != nil { if metrics == nil { @@ -117,6 +139,7 @@ func LoadData(job *schema.Job, } // FIXME: Review: Is this really necessary or correct. + // Note: Lines 142-170 formerly known as prepareJobData(jobData, scoeps) // For /monitoring/job/ and some other places, flops_any and mem_bw need // to be available at the scope 'node'. If a job has a lot of nodes, // statisticsSeries should be available so that a min/median/max Graph can be diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2434fd1..2de3bd4 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -47,7 +47,7 @@ func RegisterFootprintWorker() { scopes = append(scopes, schema.MetricScopeAccelerator) for _, job := range jobs { - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background()) + jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background(), 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { log.Error("Error wile loading job data for footprint update") continue From 5482b9be2c603571bea15fa49856b0d75a3bc2de Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 6 Sep 2024 11:24:54 +0200 Subject: [PATCH 141/443] Add debug output --- internal/taskManager/updateFootprintService.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2434fd1..55f4cef 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -47,9 +47,10 @@ func RegisterFootprintWorker() { scopes = append(scopes, schema.MetricScopeAccelerator) for _, job := range jobs { + log.Debugf("Try job %d", job.JobID) jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background()) if err != nil { - log.Error("Error wile loading job data for footprint update") + log.Errorf("Error wile loading job data for footprint update: %v", err) continue } @@ -107,6 +108,7 @@ func RegisterFootprintWorker() { // log.Errorf("Update job (dbid: %d) failed at db execute: %s", job.ID, err.Error()) // continue // } + log.Debugf("Finish job %d", job.JobID) } jobRepo.TransactionCommit(t) From 8e1c5a485faa93bd48c207cd011103cb3e000320 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 6 Sep 2024 12:00:33 +0200 Subject: [PATCH 142/443] Improve grid scaling --- web/frontend/src/Job.root.svelte | 50 +++++++++---------- .../src/generic/helper/ConcurrentJobs.svelte | 20 ++++---- .../src/generic/plots/Roofline.svelte | 2 - 3 files changed, 34 insertions(+), 38 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 8ea259a..2afa5a8 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -59,7 +59,8 @@ let plots = {}, jobTags, - statsTable + statsTable, + roofWidth let missingMetrics = [], missingHosts = [], @@ -231,9 +232,9 @@ })); - + - + {#if $initq.error} {$initq.error.message} {:else if $initq.data} @@ -272,7 +273,7 @@ {#if showFootprint} - + {#if $initq.error} {$initq.error.message} {:else if $initq?.data && $jobMetrics?.data} @@ -281,15 +282,10 @@ {/if} - {:else} - {/if} - - - - - + + {#if $initq.error || $jobMetrics.error}

Initq Error: {$initq.error?.message}

@@ -297,20 +293,24 @@
{:else if $initq?.data && $jobMetrics?.data} - c.name == $initq.data.job.cluster) - .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} - data={transformDataForRoofline( - $jobMetrics.data.jobMetrics.find( - (m) => m.name == "flops_any" && m.scope == "node", - )?.metric, - $jobMetrics.data.jobMetrics.find( - (m) => m.name == "mem_bw" && m.scope == "node", - )?.metric, - )} - /> +
+ c.name == $initq.data.job.cluster) + .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} + data={transformDataForRoofline( + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "flops_any" && m.scope == "node", + )?.metric, + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "mem_bw" && m.scope == "node", + )?.metric, + )} + /> +
{:else} diff --git a/web/frontend/src/generic/helper/ConcurrentJobs.svelte b/web/frontend/src/generic/helper/ConcurrentJobs.svelte index c0de0b6..85bac83 100644 --- a/web/frontend/src/generic/helper/ConcurrentJobs.svelte +++ b/web/frontend/src/generic/helper/ConcurrentJobs.svelte @@ -4,7 +4,7 @@ Properties: - `cJobs JobLinkResultList`: List of concurrent Jobs - `showLinks Bool?`: Show list as clickable links [Default: false] - - `displayTitle Bool?`: If to display cardHeader with title [Default: true] + - `renderCard Bool?`: If to render component as content only or with card wrapping [Default: true] - `width String?`: Width of the card [Default: 'auto'] - `height String?`: Height of the card [Default: '310px'] --> @@ -64,17 +64,15 @@ {:else}

- Jobs running on the same node with overlapping runtimes using shared resources. + {cJobs.items.length} Jobs running on the same node with overlapping runtimes using shared resources. + ( See All )


{#if showLinks}
@@ -246,7 +246,7 @@ {fpd.message}{fpd.message} {/if} {/each} diff --git a/web/frontend/src/job/JobSummary.svelte b/web/frontend/src/job/JobSummary.svelte index 48f6e99..3da86a5 100644 --- a/web/frontend/src/job/JobSummary.svelte +++ b/web/frontend/src/job/JobSummary.svelte @@ -265,7 +265,7 @@ {fpd.message}{fpd.message}

@@ -303,7 +303,7 @@ {fpd.message}{fpd.message} {/if} {/each} diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index 606d05b..a71f60f 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -89,7 +89,7 @@ {#each selectedMetrics as metric} diff --git a/web/frontend/src/job/TagManagement.svelte b/web/frontend/src/job/TagManagement.svelte index 408cc70..2147a8d 100644 --- a/web/frontend/src/job/TagManagement.svelte +++ b/web/frontend/src/job/TagManagement.svelte @@ -7,21 +7,28 @@ - `username String`: Empty string if auth. is disabled, otherwise the username as string - `authlevel Number`: The current users authentication level - `roles [Number]`: Enum containing available roles + - `renderModal Bool?`: If component is rendered as bootstrap modal button [Default: true] --> - (isOpen = !isOpen)}> - - Manage Tags - {#if pendingChange !== false} - - {:else} - - {/if} - - +{#if renderModal} + (isOpen = !isOpen)}> + + Manage Tags + {#if pendingChange !== false} + + {:else} + + {/if} + + + + + + Search using "type: name". If no tag matches your search, a + button for creating a new one will appear. + +
+
    + {#each allTagsFiltered as tag} + + + + + {#if pendingChange === tag.id} + + {:else if job.tags.find((t) => t.id == tag.id)} + + {:else} + + {/if} + + + {:else} + + No tags matching + + {/each} +
+
+ {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} +
+ + {#if roles && authlevel >= roles.admin} + + {/if} +
+ {:else if allTagsFiltered.length == 0} + Search Term is not a valid Tag (type: name) + {/if} +
+ + + +
+ + +{:else} + + + + + + + Search using "type: name". If no tag matches your search, a + button for creating a new one will appear. + + -
- - - Search using "type: name". If no tag matches your search, a - button for creating a new one will appear. - - -
    - {#each allTagsFiltered as tag} - - + {#if usedTagsFiltered.length > 0} + + {#each usedTagsFiltered as utag} + + - {#if pendingChange === tag.id} + {#if pendingChange === utag.id} + + {:else} + + {/if} + + + {/each} + + {:else if filterTerm !== ""} + + + No used tags matching + + + {/if} + + {#if unusedTagsFiltered.length > 0} + + {#each unusedTagsFiltered as uutag} + + + + + {#if pendingChange === uutag.id} - {:else if job.tags.find((t) => t.id == tag.id)} - {:else} {/if} - {:else} - - No tags matching - {/each} -
-
- {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} -
+ + {:else if filterTerm !== ""} + + + No unused tags matching + + + {/if} + + {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} + + - {#if roles && authlevel >= roles.admin} - - - {/if} -
- {:else if allTagsFiltered.length == 0} - Search Term is not a valid Tag (type: name) - {/if} -
- - - -
- - + + + {/if} +
+ {:else if allTagsFiltered.length == 0} + Search Term is not a valid Tag (type: name) + {/if} +{/if} From ccbf3867e14b2b198177bad17b2d0b2a154dbcfd Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 16 Sep 2024 13:54:40 +0200 Subject: [PATCH 154/443] change global tag color from gray to magenta --- web/frontend/src/generic/helper/Tag.svelte | 4 ++-- web/templates/monitoring/taglist.tmpl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/generic/helper/Tag.svelte b/web/frontend/src/generic/helper/Tag.svelte index 66b4312..2be9ee6 100644 --- a/web/frontend/src/generic/helper/Tag.svelte +++ b/web/frontend/src/generic/helper/Tag.svelte @@ -38,9 +38,9 @@
{#if tag} {#if tag?.scope === "global"} - {tag.type}: {tag.name} + {tag.type}: {tag.name} {:else if tag.scope === "admin"} - {tag.type}: {tag.name} + {tag.type}: {tag.name} {:else} {tag.type}: {tag.name} {/if} diff --git a/web/templates/monitoring/taglist.tmpl b/web/templates/monitoring/taglist.tmpl index ea29cd7..7831a64 100644 --- a/web/templates/monitoring/taglist.tmpl +++ b/web/templates/monitoring/taglist.tmpl @@ -8,10 +8,10 @@
{{ range $tagList }} {{if eq .scope "global"}} - + {{ .name }} {{ .count }} {{else if eq .scope "admin"}} - + {{ .name }} {{ .count }} {{else}} From 2736b5d1ef4287c10bcbc51d732555fc815bd348 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 16 Sep 2024 15:00:42 +0200 Subject: [PATCH 155/443] change background color for tag listitems --- web/frontend/src/job/TagManagement.svelte | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/TagManagement.svelte b/web/frontend/src/job/TagManagement.svelte index f358c71..b6d290a 100644 --- a/web/frontend/src/job/TagManagement.svelte +++ b/web/frontend/src/job/TagManagement.svelte @@ -207,7 +207,7 @@ {#if usedTagsFiltered.length > 0} {#each usedTagsFiltered as utag} - + @@ -243,7 +243,7 @@ {#if unusedTagsFiltered.length > 0} {#each unusedTagsFiltered as uutag} - + @@ -343,7 +343,7 @@ {#if usedTagsFiltered.length > 0} {#each usedTagsFiltered as utag} - + @@ -379,7 +379,7 @@ {#if unusedTagsFiltered.length > 0} {#each unusedTagsFiltered as uutag} - + From e29be2f140dca8b0281c1d24efa95c848da44c2d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 16 Sep 2024 15:03:38 +0200 Subject: [PATCH 156/443] fix missing scope field request for jobview --- web/frontend/src/Job.root.svelte | 2 +- web/frontend/src/generic/helper/Tag.svelte | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index ca11692..517671d 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -75,7 +75,7 @@ duration, numNodes, numHWThreads, numAcc, SMT, exclusive, partition, subCluster, arrayJobId, monitoringStatus, state, walltime, - tags { id, type, name }, + tags { id, type, scope, name }, resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, diff --git a/web/frontend/src/generic/helper/Tag.svelte b/web/frontend/src/generic/helper/Tag.svelte index 2be9ee6..b3e2d6b 100644 --- a/web/frontend/src/generic/helper/Tag.svelte +++ b/web/frontend/src/generic/helper/Tag.svelte @@ -39,7 +39,7 @@ {#if tag} {#if tag?.scope === "global"} {tag.type}: {tag.name} - {:else if tag.scope === "admin"} + {:else if tag?.scope === "admin"} {tag.type}: {tag.name} {:else} {tag.type}: {tag.name} From f1893c596e41aaf270f3438ac2e26b1d38238042 Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Tue, 17 Sep 2024 14:36:42 +0200 Subject: [PATCH 157/443] Versioning to query endpoint --- internal/metricdata/cc-metric-store.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 1f3300e..f2853e3 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -140,6 +140,13 @@ func (ccms *CCMetricStore) doRequest( req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) } + // versioning the cc-metric-store query API. + // v2 = data with resampling + // v1 = data without resampling + q := req.URL.Query() + q.Add("version", "v2") + req.URL.RawQuery = q.Encode() + res, err := ccms.client.Do(req) if err != nil { log.Error("Error while performing request") @@ -198,12 +205,17 @@ func (ccms *CCMetricStore) LoadData( jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric) } + res := row[0].Resolution + if res == 0 { + res = mc.Timestep + } + jobMetric, ok := jobData[metric][scope] if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, - Timestep: row[0].Resolution, + Timestep: res, Series: make([]schema.Series, 0), } jobData[metric][scope] = jobMetric @@ -623,7 +635,7 @@ func (ccms *CCMetricStore) LoadNodeData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) return nil, err } From d7a8bbf40b5126cdca4568ac84fc7fba8605eb86 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 18 Sep 2024 17:23:29 +0200 Subject: [PATCH 158/443] Rework tag and tag edit placement, add other feedback - admin message shown primarily if exists - comment demo summary tab --- web/frontend/src/Job.root.svelte | 59 ++-- web/frontend/src/generic/helper/Tag.svelte | 22 +- .../helper}/TagManagement.svelte | 17 +- .../src/generic/joblist/JobInfo.svelte | 37 ++- web/frontend/src/job/JobSummary.svelte | 281 +++++++++--------- 5 files changed, 217 insertions(+), 199 deletions(-) rename web/frontend/src/{job => generic/helper}/TagManagement.svelte (97%) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 8e508fa..cddace1 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -25,7 +25,6 @@ CardHeader, CardTitle, Button, - Icon, } from "@sveltestrap/sveltestrap"; import { getContext } from "svelte"; import { @@ -35,7 +34,6 @@ transformDataForRoofline, } from "./generic/utils.js"; import Metric from "./job/Metric.svelte"; - import TagManagement from "./job/TagManagement.svelte"; import StatsTable from "./job/StatsTable.svelte"; import JobSummary from "./job/JobSummary.svelte"; import ConcurrentJobs from "./generic/helper/ConcurrentJobs.svelte"; @@ -54,12 +52,10 @@ const ccconfig = getContext("cc-config") let isMetricsSelectionOpen = false, - showFootprint = !!ccconfig[`job_view_showFootprint`], selectedMetrics = [], selectedScopes = []; let plots = {}, - jobTags, roofWidth let missingMetrics = [], @@ -240,14 +236,22 @@ {:else if $initq.data} - + {#if $initq.data?.job?.metaData?.message} + + + +
Job {$initq.data?.job?.jobId} ({$initq.data?.job?.cluster})
+ The following note was added by administrators: +
+ + {@html $initq.data.job.metaData.message} + +
+
+ {/if} + - - - - - - + {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} @@ -260,15 +264,6 @@
{/if} - {#if $initq.data?.job?.metaData?.message} - - -

This note was added by administrators:

-
-

{@html $initq.data.job.metaData.message}

-
-
- {/if}
{:else} @@ -276,21 +271,19 @@ {/if} - - {#if showFootprint} - - {#if $initq.error} - {$initq.error.message} - {:else if $initq?.data && $jobMetrics?.data} - - {:else} - - {/if} - - {/if} + + + {#if $initq.error} + {$initq.error.message} + {:else if $initq?.data && $jobMetrics?.data} + + {:else} + + {/if} + - + {#if $initq.error || $jobMetrics.error}

Initq Error: {$initq.error?.message}

diff --git a/web/frontend/src/generic/helper/Tag.svelte b/web/frontend/src/generic/helper/Tag.svelte index b3e2d6b..7efaf63 100644 --- a/web/frontend/src/generic/helper/Tag.svelte +++ b/web/frontend/src/generic/helper/Tag.svelte @@ -23,12 +23,22 @@ if ($initialized && tag == null) tag = allTags.find(tag => tag.id == id) } + + function getScopeColor(scope) { + switch (scope) { + case "admin": + return "#19e5e6"; + case "global": + return "#c85fc8"; + default: + return "#ffc107"; + } + } From c7d0c86d52be8a3a47df18c94b406397e3f0cc22 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 27 Sep 2024 13:46:19 +0200 Subject: [PATCH 168/443] add missing template changes --- web/templates/monitoring/job.tmpl | 1 + 1 file changed, 1 insertion(+) diff --git a/web/templates/monitoring/job.tmpl b/web/templates/monitoring/job.tmpl index 92365b3..09ef7ba 100644 --- a/web/templates/monitoring/job.tmpl +++ b/web/templates/monitoring/job.tmpl @@ -15,6 +15,7 @@ const authlevel = {{ .User.GetAuthLevel }}; const roles = {{ .Roles }}; const resampleConfig = {{ .Resampling }}; + const emission = {{ .Infos.emission }} {{end}} From 183b31069619cabfa4eaa6d29c15b388be776262 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 27 Sep 2024 13:48:14 +0200 Subject: [PATCH 169/443] add base constant to tooltip --- web/frontend/src/job/EnergySummary.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/job/EnergySummary.svelte b/web/frontend/src/job/EnergySummary.svelte index f22f984..70e419f 100644 --- a/web/frontend/src/job/EnergySummary.svelte +++ b/web/frontend/src/job/EnergySummary.svelte @@ -64,7 +64,7 @@ Estimated emission based on supplier energy mix and total energy consumption. + >Estimated emission based on supplier energy mix ({carbonPerkWh} g/kWh) and total energy consumption. {/if} From dcb8308f35a3c32a18fc7463dd785d8be496df44 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 12:27:32 +0200 Subject: [PATCH 170/443] add icons to energySummary component --- internal/graph/schema.resolvers.go | 2 +- web/frontend/src/job/EnergySummary.svelte | 34 +++++++++++++++++++---- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 8c5ee0d..bde7013 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -99,7 +99,7 @@ func (r *jobResolver) EnergyFootprint(ctx context.Context, obj *schema.Job) ([]* case matchCore.MatchString(test): hwType = "Core" default: - hwType = "Hardware" + hwType = "Other" } res = append(res, &model.EnergyFootprintValue{ diff --git a/web/frontend/src/job/EnergySummary.svelte b/web/frontend/src/job/EnergySummary.svelte index 70e419f..e614865 100644 --- a/web/frontend/src/job/EnergySummary.svelte +++ b/web/frontend/src/job/EnergySummary.svelte @@ -15,6 +15,7 @@ Tooltip, Row, Col, + Icon } from "@sveltestrap/sveltestrap"; import { round } from "mathjs"; @@ -34,7 +35,22 @@ {#each job.energyFootprint as efp} -
{efp.hardware}: {efp.value} Wh ({efp.metric})
+
+ {#if efp.hardware === 'CPU'} + + {:else if efp.hardware === 'Accelerator'} + + {:else if efp.hardware === 'Memory'} + + {:else if efp.hardware === 'Core'} + + {:else} + + {/if} + +
+
+
{efp.hardware}: {efp.value} Wh ({efp.metric})
Estimated energy consumption based on metric {efp.metric} and job runtime. {/each} - -
Total Energy: {job?.energy? job.energy : 0} Wh
+ +
+ +
+
+
Total Energy: {job?.energy? job.energy : 0} Wh
{#if carbonPerkWh} - -
Carbon Emission: {carbonMass} kg
+ +
+ +
+
+
Carbon Emission: {carbonMass} kg
{/if}
From c50e79375acf29c7013e45490f3eff2126f8ee0e Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 15:27:49 +0200 Subject: [PATCH 171/443] fix ccb side of unintentionally added endpoint format change in ccms --- internal/metricdata/cc-metric-store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index f2853e3..ce5101c 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -85,7 +85,7 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { } ccms.url = config.Url - ccms.queryEndpoint = fmt.Sprintf("%s/api/query/", config.Url) + ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.Url) ccms.jwt = config.Token ccms.client = http.Client{ Timeout: 10 * time.Second, From 218e56576ad0955808f69d25fbe771a1136ab3c7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 16:33:28 +0200 Subject: [PATCH 172/443] round calculated updateFootprint values to two digits --- internal/taskManager/updateFootprintService.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2a3eaef..b21502a 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -74,14 +74,15 @@ func RegisterFootprintWorker() { max = math.Max(max, series.Statistics.Max) } + // Add values rounded to 2 digits jobMeta.Statistics[metric] = schema.JobStatistics{ Unit: schema.Unit{ Prefix: archive.GetMetricConfig(job.Cluster, metric).Unit.Prefix, Base: archive.GetMetricConfig(job.Cluster, metric).Unit.Base, }, - Avg: avg / float64(job.NumNodes), - Min: min, - Max: max, + Avg: (math.Round((avg/float64(job.NumNodes))*100) / 100), + Min: (math.Round(min*100) / 100), + Max: (math.Round(max*100) / 100), } } From a9868fd275791f49ff169b3bf0952a334daf2b77 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 16:43:38 +0200 Subject: [PATCH 173/443] display energySumary only if energy data is present --- web/frontend/src/Job.root.svelte | 4 +-- web/frontend/src/job/EnergySummary.svelte | 30 +++++++++++++---------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 899d5af..9601e9c 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -310,10 +310,10 @@
-{#if $initq?.data} +{#if $initq?.data && $initq.data.job.energyFootprint.length != 0} - + {/if} diff --git a/web/frontend/src/job/EnergySummary.svelte b/web/frontend/src/job/EnergySummary.svelte index e614865..099cb57 100644 --- a/web/frontend/src/job/EnergySummary.svelte +++ b/web/frontend/src/job/EnergySummary.svelte @@ -1,8 +1,10 @@ - {#each job.energyFootprint as efp} - + {#each jobEnergyFootprint as efp} +
{#if efp.hardware === 'CPU'} @@ -53,20 +57,20 @@
{efp.hardware}: {efp.value} Wh ({efp.metric})
Estimated energy consumption based on metric {efp.metric} and job runtime. {/each} - +

-
Total Energy: {job?.energy? job.energy : 0} Wh
+
Total Energy: {jobEnergy? jobEnergy : 0} Wh
{#if carbonPerkWh} - +
@@ -79,14 +83,14 @@ Estimated total energy consumption of job. {#if carbonPerkWh} Estimated emission based on supplier energy mix ({carbonPerkWh} g/kWh) and total energy consumption. From 582dc8bf46c9dc9a13192af02e5e5c152afebbe4 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 18:29:46 +0200 Subject: [PATCH 174/443] add energy column index --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index e555c57..de151f2 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -67,6 +67,7 @@ CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_energy ON job (energy); CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); CREATE INDEX IF NOT EXISTS jobs_numhwthreads ON job (num_hwthreads); From 3b94863521b064978e12ace0e18561e8cb0b6c8b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 18:30:26 +0200 Subject: [PATCH 175/443] add sorting for job energy column --- web/frontend/src/generic/select/SortSelection.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/web/frontend/src/generic/select/SortSelection.svelte b/web/frontend/src/generic/select/SortSelection.svelte index 5125445..ae416d7 100644 --- a/web/frontend/src/generic/select/SortSelection.svelte +++ b/web/frontend/src/generic/select/SortSelection.svelte @@ -36,6 +36,7 @@ { field: "numNodes", type: "col", text: "Number of Nodes", order: "DESC" }, { field: "numHwthreads", type: "col", text: "Number of HWThreads", order: "DESC" }, { field: "numAcc", type: "col", text: "Number of Accelerators", order: "DESC" }, + { field: "energy", type: "col", text: "Total Energy", order: "DESC" }, ...getSortItems() ] } From b3222f3523dfbedeb4e2704a860f232b68aaf9ff Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 30 Sep 2024 18:31:49 +0200 Subject: [PATCH 176/443] fix: archived statisticsSeries with mean data now shown again --- internal/metricDataDispatcher/dataLoader.go | 4 +++- pkg/schema/metrics.go | 1 + web/frontend/src/Job.root.svelte | 1 + web/frontend/src/generic/joblist/JobListRow.svelte | 1 + web/frontend/src/generic/plots/MetricPlot.svelte | 12 ++++++++---- web/frontend/src/job/Metric.svelte | 1 + 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index 121fbf4..1f2e175 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -139,11 +139,13 @@ func LoadData(job *schema.Job, } // FIXME: Review: Is this really necessary or correct. - // Note: Lines 142-170 formerly known as prepareJobData(jobData, scoeps) + // Note: Lines 147-170 formerly known as prepareJobData(jobData, scopes) // For /monitoring/job/ and some other places, flops_any and mem_bw need // to be available at the scope 'node'. If a job has a lot of nodes, // statisticsSeries should be available so that a min/median/max Graph can be // used instead of a lot of single lines. + // NOTE: New StatsSeries will always be calculated as 'min/median/max' + // Existing (archived) StatsSeries can be 'min/mean/max'! const maxSeriesSize int = 15 for _, scopes := range jd { for _, jm := range scopes { diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index 08636f1..9db853d 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -123,6 +123,7 @@ func (jd *JobData) Size() int { for _, metric := range scopes { if metric.StatisticsSeries != nil { n += len(metric.StatisticsSeries.Max) + n += len(metric.StatisticsSeries.Mean) n += len(metric.StatisticsSeries.Median) n += len(metric.StatisticsSeries.Min) } diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 9601e9c..2827248 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -98,6 +98,7 @@ timestep statisticsSeries { min + mean median max } diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index b1e1511..f4cc71a 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -53,6 +53,7 @@ timestep statisticsSeries { min + mean median max } diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 230e090..1c09150 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -133,8 +133,8 @@ export let zoomState = null; if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; - if (useStatsSeries == false && series == null) useStatsSeries = true; + const usesMeanStatsSeries = (useStatsSeries && statisticsSeries.mean.length != 0) const dispatch = createEventDispatcher(); const subClusterTopology = getContext("getHardwareTopology")(cluster, subCluster); @@ -278,7 +278,7 @@ } const longestSeries = useStatsSeries - ? statisticsSeries.median.length + ? (usesMeanStatsSeries ? statisticsSeries.mean.length : statisticsSeries.median.length) : series.reduce((n, series) => Math.max(n, series.data.length), 0); const maxX = longestSeries * timestep; let maxY = null; @@ -327,7 +327,11 @@ if (useStatsSeries) { plotData.push(statisticsSeries.min); plotData.push(statisticsSeries.max); - plotData.push(statisticsSeries.median); + if (usesMeanStatsSeries) { + plotData.push(statisticsSeries.mean); + } else { + plotData.push(statisticsSeries.median); + } /* deprecated: sparse data handled by uplot */ // if (forNode === true) { @@ -426,7 +430,7 @@ // Draw plot type label: let textl = `${scope}${plotSeries.length > 2 ? "s" : ""}${ useStatsSeries - ? ": min/median/max" + ? (usesMeanStatsSeries ? ": min/mean/max" : ": min/median/max") : metricConfig != null && scope != metricConfig.scope ? ` (${metricConfig.aggregation})` : "" diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 89b9d30..4b4c8d0 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -74,6 +74,7 @@ timestep statisticsSeries { min + mean median max } From 6fe93ecb7eb47b6f8f8c52d4073decebeddefd5e Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 1 Oct 2024 11:42:46 +0200 Subject: [PATCH 177/443] fix adaptive legend title --- web/frontend/src/generic/plots/MetricPlot.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 1c09150..7950b04 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -355,7 +355,7 @@ stroke: "green", }); plotSeries.push({ - label: "median", + label: usesMeanStatsSeries ? "mean" : "median", scale: "y", width: lineWidth, stroke: "black", From 82baf5d38438882bd933014bef82b9e562d119d9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 1 Oct 2024 12:48:32 +0200 Subject: [PATCH 178/443] fix deepCopy of statisticsSeries for archived jobs --- internal/metricdata/utils.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index f480e40..dcdaaaa 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -72,13 +72,21 @@ func DeepCopy(jd_temp schema.JobData) schema.JobData { jd[k][k_].Unit.Base = v_.Unit.Base jd[k][k_].Unit.Prefix = v_.Unit.Prefix if v_.StatisticsSeries != nil { + // Init Slices jd[k][k_].StatisticsSeries = new(schema.StatsSeries) + jd[k][k_].StatisticsSeries.Max = make([]schema.Float, len(v_.StatisticsSeries.Max)) + jd[k][k_].StatisticsSeries.Min = make([]schema.Float, len(v_.StatisticsSeries.Min)) + jd[k][k_].StatisticsSeries.Median = make([]schema.Float, len(v_.StatisticsSeries.Median)) + jd[k][k_].StatisticsSeries.Mean = make([]schema.Float, len(v_.StatisticsSeries.Mean)) + // Copy Data copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max) copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min) copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median) copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean) + // Handle Percentiles for k__, v__ := range v_.StatisticsSeries.Percentiles { - jd[k][k_].StatisticsSeries.Percentiles[k__] = v__ + jd[k][k_].StatisticsSeries.Percentiles[k__] = make([]schema.Float, len(v__)) + copy(jd[k][k_].StatisticsSeries.Percentiles[k__], v__) } } else { jd[k][k_].StatisticsSeries = v_.StatisticsSeries From 615281601c2aebf1dde10f496bca017e4cad1cc0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 1 Oct 2024 14:58:19 +0200 Subject: [PATCH 179/443] fix wrong flag labelling, change to kWh energy calculation --- internal/repository/job.go | 13 +++++++------ web/frontend/src/job/EnergySummary.svelte | 8 ++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index 16390f2..d408341 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -603,12 +603,13 @@ func (r *JobRepository) UpdateEnergy( for _, fp := range sc.EnergyFootprint { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { - // FIXME: Check for unit conversions - // Energy: Watts * Time // Power: Energy / Time -> Correct labelling here? - if sc.MetricConfig[i].Energy == "power" { - // Unit: ( W * s ) / 3600 = Wh ; Rounded to 2 nearest digits - energy = math.Round(((LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600)*100) / 100 - } else if sc.MetricConfig[i].Energy == "energy" { + // Note: For DB data, calculate and save as kWh + // Energy: Power (in Watts) * Time (in Seconds) + if sc.MetricConfig[i].Energy == "energy" { + // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits + energy = math.Round(((LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 + // Power: Use directly as sum (Or as: [Energy (in Ws) / Time (in s)] + } else if sc.MetricConfig[i].Energy == "power" { // This assumes the metric is of aggregation type sum } } else { diff --git a/web/frontend/src/job/EnergySummary.svelte b/web/frontend/src/job/EnergySummary.svelte index 099cb57..6b8e318 100644 --- a/web/frontend/src/job/EnergySummary.svelte +++ b/web/frontend/src/job/EnergySummary.svelte @@ -29,8 +29,8 @@ let carbonMass; $: if (carbonPerkWh) { - // (( Wh / 1000 )* g/kWh) / 1000 = kg || Rounded to 2 Digits via [ round(x * 100) / 100 ] - carbonMass = round( (((jobEnergy ? jobEnergy : 0.0) / 1000 ) * carbonPerkWh) / 10 ) / 100; + // ( kWh * g/kWh) / 1000 = kg || Rounded to 2 Digits via [ round(x * 100) / 100 ] + carbonMass = round( ((jobEnergy ? jobEnergy : 0.0) * carbonPerkWh) / 10 ) / 100; } @@ -54,7 +54,7 @@

-
{efp.hardware}: {efp.value} Wh ({efp.metric})
+
{efp.hardware}: {efp.value} kWh ({efp.metric})

-
Total Energy: {jobEnergy? jobEnergy : 0} Wh
+
Total Energy: {jobEnergy? jobEnergy : 0} kWh
{#if carbonPerkWh} From 60198915913a8b7e8869080a03f8bc9c7efc0f55 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 1 Oct 2024 16:25:09 +0200 Subject: [PATCH 180/443] add energy filterr in new component --- api/schema.graphqls | 1 + internal/graph/generated/generated.go | 18 ++++- internal/graph/model/models_gen.go | 1 + internal/repository/jobQuery.go | 7 ++ web/frontend/src/generic/Filters.svelte | 24 +++++++ .../src/generic/filters/Energy.svelte | 70 +++++++++++++++++++ web/frontend/src/generic/filters/Stats.svelte | 2 +- 7 files changed, 121 insertions(+), 2 deletions(-) create mode 100644 web/frontend/src/generic/filters/Energy.svelte diff --git a/api/schema.graphqls b/api/schema.graphqls index 4aba1d5..994d94d 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -265,6 +265,7 @@ input JobFilter { cluster: StringInput partition: StringInput duration: IntRange + energy: FloatRange minRunningFor: Int diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index b4556a8..614f9c1 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -2153,6 +2153,7 @@ input JobFilter { cluster: StringInput partition: StringInput duration: IntRange + energy: FloatRange minRunningFor: Int @@ -13452,7 +13453,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int asMap[k] = v } - fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"} + fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -13522,6 +13523,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int return it, err } it.Duration = data + case "energy": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("energy")) + data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) + if err != nil { + return it, err + } + it.Energy = data case "minRunningFor": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) @@ -18550,6 +18558,14 @@ func (ec *executionContext) marshalOFloat2float64(ctx context.Context, sel ast.S return graphql.WrapContextMarshaler(ctx, res) } +func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputFloatRange(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalOFootprintValue2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprintValue(ctx context.Context, sel ast.SelectionSet, v []*model.FootprintValue) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 58389ab..99841fd 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -58,6 +58,7 @@ type JobFilter struct { Cluster *StringInput `json:"cluster,omitempty"` Partition *StringInput `json:"partition,omitempty"` Duration *schema.IntRange `json:"duration,omitempty"` + Energy *FloatRange `json:"energy,omitempty"` MinRunningFor *int `json:"minRunningFor,omitempty"` NumNodes *schema.IntRange `json:"numNodes,omitempty"` NumAccelerators *schema.IntRange `json:"numAccelerators,omitempty"` diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 843d797..8c16bb3 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -192,6 +192,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select if filter.Node != nil { query = buildStringCondition("job.resources", filter.Node, query) } + if filter.Energy != nil { + query = buildFloatCondition("job.energy", filter.Energy, query) + } if filter.MetricStats != nil { for _, ms := range filter.MetricStats { query = buildFloatJsonCondition(ms.MetricName, ms.Range, query) @@ -204,6 +207,10 @@ func buildIntCondition(field string, cond *schema.IntRange, query sq.SelectBuild return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To) } +func buildFloatCondition(field string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder { + return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To) +} + func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBuilder) sq.SelectBuilder { if cond.From != nil && cond.To != nil { return query.Where(field+" BETWEEN ? AND ?", cond.From.Unix(), cond.To.Unix()) diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index d01c16b..97c98ff 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -32,6 +32,7 @@ import StartTime from "./filters/StartTime.svelte"; import Tags from "./filters/Tags.svelte"; import Duration from "./filters/Duration.svelte"; + import Energy from "./filters/Energy.svelte"; import Resources from "./filters/Resources.svelte"; import Statistics from "./filters/Stats.svelte"; @@ -68,6 +69,7 @@ jobName: filterPresets.jobName || "", node: filterPresets.node || null, + energy: filterPresets.energy || { from: null, to: null }, numNodes: filterPresets.numNodes || { from: null, to: null }, numHWThreads: filterPresets.numHWThreads || { from: null, to: null }, numAccelerators: filterPresets.numAccelerators || { from: null, to: null }, @@ -80,6 +82,7 @@ isStartTimeOpen = false, isTagsOpen = false, isDurationOpen = false, + isEnergyOpen = false, isResourcesOpen = false, isStatsOpen = false, isNodesModified = false, @@ -110,6 +113,10 @@ items.push({ duration: { from: 0, to: filters.duration.lessThan } }); if (filters.duration.moreThan) items.push({ duration: { from: filters.duration.moreThan, to: 604800 } }); // 7 days to include special jobs with long runtimes + if (filters.energy.from || filters.energy.to) + items.push({ + energy: { from: filters.energy.from, to: filters.energy.to }, + }); if (filters.jobId) items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } }); if (filters.arrayJobId != null) @@ -181,6 +188,8 @@ opts.push(`duration=0-${filters.duration.lessThan}`); if (filters.duration.moreThan) opts.push(`duration=${filters.duration.moreThan}-604800`); + if (filters.energy.from && filters.energy.to) + opts.push(`energy=${filters.energy.from}-${filters.energy.to}`); if (filters.numNodes.from && filters.numNodes.to) opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`); if (filters.numAccelerators.from && filters.numAccelerators.to) @@ -239,6 +248,9 @@ (isResourcesOpen = true)}> Resources + (isEnergyOpen = true)}> + Energy + (isStatsOpen = true)}> (isStatsOpen = true)} /> Statistics @@ -354,6 +366,12 @@ {/if} + {#if filters.energy.from || filters.energy.to} + (isEnergyOpen = true)}> + Total Energy: {filters.energy.from} - {filters.energy.to} + + {/if} + {#if filters.stats.length > 0} (isStatsOpen = true)}> {filters.stats @@ -423,6 +441,12 @@ on:set-filter={() => updateFilters()} /> + updateFilters()} +/> + From f616c7e1c61bc6eadc9a635b59f9cc489816dc96 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 15:26:09 +0200 Subject: [PATCH 189/443] remove width tags from slot defs --- web/frontend/src/Job.root.svelte | 2 -- web/frontend/src/Node.root.svelte | 2 -- web/frontend/src/Systems.root.svelte | 2 -- 3 files changed, 6 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index ca1ce9e..c2b8683 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -351,7 +351,6 @@ {:else if $initq?.data && $jobMetrics?.data?.jobMetrics} gm.name == item.metric)?.scope} rawData={item.data.map((x) => x.metric)} scopes={item.data.map((x) => x.scope)} - {width} isShared={$initq.data.job.exclusive != 1} /> {:else} diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index 700b1de..3fde017 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -189,7 +189,6 @@ {:else} {#if item.disabled === false && item.metric} {#if item.disabled === false && item.data} Date: Tue, 8 Oct 2024 17:31:15 +0200 Subject: [PATCH 190/443] fix plotgrid display error, use plotheight default --- web/frontend/src/Node.root.svelte | 1 - web/frontend/src/Systems.root.svelte | 2 - .../src/generic/joblist/JobListRow.svelte | 1 - .../src/generic/plots/MetricPlot.svelte | 75 ++++++++++--------- web/frontend/src/job/Metric.svelte | 7 -- 5 files changed, 39 insertions(+), 47 deletions(-) diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index 3fde017..c78139a 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -208,7 +208,6 @@ {#if item.disabled === false && item.metric} c.name == cluster)} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 8a5fee2..488cdad 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -50,7 +50,6 @@ const clusters = getContext("clusters"); const globalMetrics = getContext("globalMetrics"); - let plotHeight = 300; let hostnameFilter = ""; let selectedMetric = ccconfig.system_view_selectedMetric; @@ -208,7 +207,6 @@ {#if item.disabled === false && item.data} { handleZoom(detail, metric.data.name) }} - width={plotWidth} height={plotHeight} timestep={metric.data.metric.timestep} scope={metric.data.scope} diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index fc7d8d5..ba7533f 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -6,8 +6,7 @@ Properties: - `metric String`: The metric name - `scope String?`: Scope of the displayed data [Default: node] - - `width Number`: The plot width - - `height Number`: The plot height + - `height Number?`: The plot height [Default: 300] - `timestep Number`: The timestep used for X-axis rendering - `series [GraphQL.Series]`: The metric data object - `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: null] @@ -118,8 +117,7 @@ export let metric; export let scope = "node"; - export let width; - export let height; + export let height = 300; export let timestep; export let series; export let useStatsSeries = null; @@ -132,15 +130,17 @@ export let numaccs = 0; export let zoomState = null; + let width; + if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; if (useStatsSeries == false && series == null) useStatsSeries = true; - const usesMeanStatsSeries = (useStatsSeries && statisticsSeries.mean.length != 0) + const usesMeanStatsSeries = (useStatsSeries && statisticsSeries.mean.length != 0) const dispatch = createEventDispatcher(); const subClusterTopology = getContext("getHardwareTopology")(cluster, subCluster); const metricConfig = getContext("getMetricConfig")(cluster, subCluster, metric); const clusterCockpitConfig = getContext("cc-config"); - const renderSleepTime = 100; + const renderSleepTime = 200; const normalLineColor = "#000000"; const lineWidth = clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio; @@ -164,8 +164,6 @@ let resampleResolutions; let resampleMinimum; - let wrapperWidth = 0; - if (resampleConfig) { resampleTrigger = Number(resampleConfig.trigger) resampleResolutions = [...resampleConfig.resolutions]; @@ -498,23 +496,32 @@ // RENDER HANDLING let plotWrapper = null; let uplot = null; - // let timeoutId = null; + let timeoutId = null; - function render(func_width, func_height) { + function render(ren_width, ren_height) { if (!uplot) { // Init uPlot - opts.width = func_width; - opts.height = func_height; + opts.width = ren_width; + opts.height = ren_height; if (zoomState) { opts.scales = {...zoomState} } // console.log('Init Sizes ...', { width: opts.width, height: opts.height }) uplot = new uPlot(opts, plotData, plotWrapper); } else { // Update size - // console.log('Update uPlot ...', { width: func_width, height: func_height }) - uplot.setSize({ width: func_width, height: func_height }); + // console.log('Update uPlot ...', { width: ren_width, height: ren_height }) + uplot.setSize({ width: ren_width, height: ren_height }); } } + function onSizeChange(chg_width, chg_height) { + if (!uplot) return; + if (timeoutId != null) clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + timeoutId = null; + render(chg_width, chg_height); + }, renderSleepTime); + } + onMount(() => { // Setup Wrapper if (series[0].data.length > 0) { @@ -525,35 +532,31 @@ plotWrapper.style.backgroundColor = backgroundColor(); plotWrapper.style.borderRadius = "5px"; } + // Init Plot + render(width, height); }); onDestroy(() => { - // if (timeoutId != null) clearTimeout(timeoutId); + if (timeoutId != null) clearTimeout(timeoutId); if (uplot) uplot.destroy(); }); - $: width = wrapperWidth; - - // This renders uPlot initially and updates it on all size changes - $: if (width > 0 && height > 0) { - // console.log('Triggered render() ...') - - // if (timeoutId != null) { - // clearTimeout(timeoutId); - // timeoutId = null; - // } - // timeoutId = setTimeout(render(width, height), renderSleepTime); - - setTimeout(render(width, height), renderSleepTime); + // This updates it on all size changes + // Condition for reactive triggering (eg scope change) + $: if (series[0].data.length > 0) { + onSizeChange(width, height); } + - -{#if series[0].data.length > 0} -
-{:else} - Cannot render plot: No series data returned for {metric} -{/if} + +
+ {#if series[0].data.length > 0} +
+ {:else} + Cannot render plot: No series data returned for {metric} + {/if} +
diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index ab1616a..0ff8125 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -7,7 +7,6 @@ - `metricUnit Object`: The metrics GQL unit object - `nativeScope String`: The metrics native scope - `scopes [String]`: The scopes returned for this metric - - `width Number`: Nested plot width - `rawData [Object]`: Metric data for all scopes returned for this metric - `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly in downstream plots [Default: false] --> @@ -38,7 +37,6 @@ export let metricUnit; export let nativeScope; export let scopes; - export let width; export let rawData; export let isShared = false; @@ -165,7 +163,6 @@ } $: data = rawData[selectedScopeIndex]; - $: series = data?.series?.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); @@ -203,8 +200,6 @@ {:else if series != null && !patternMatches} { handleZoom(detail) }} - {width} - height={300} cluster={job.cluster} subCluster={job.subCluster} timestep={data.timestep} @@ -217,8 +212,6 @@ {:else if statsSeries[selectedScopeIndex] != null && patternMatches} { handleZoom(detail) }} - {width} - height={300} cluster={job.cluster} subCluster={job.subCluster} timestep={data.timestep} From 4eff87bbf735a1e184bbafd10f932b047a1d36d9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 17:31:47 +0200 Subject: [PATCH 191/443] update frontend dependency manager version, adds license info --- web/frontend/package-lock.json | 160 +++++++++++++++++++++++++++------ 1 file changed, 133 insertions(+), 27 deletions(-) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 17f6614..2e7e8aa 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -35,6 +35,7 @@ "version": "1.0.8", "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.8.tgz", "integrity": "sha512-8BG6woLtDMvXB9Ajb/uE+Zr/U7y4qJ3upXi0JQHZmsKUJa7HjF/gFvmL2f3/mSmfZoQGRr9VoY97LCX2uaFMzA==", + "license": "MIT", "peerDependencies": { "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0" }, @@ -48,6 +49,7 @@ "version": "2.3.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "license": "Apache-2.0", "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" @@ -60,6 +62,7 @@ "version": "7.25.7", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.7.tgz", "integrity": "sha512-FjoyLe754PMiYsFaN5C94ttGiOmBNYTf6pLr4xXHAT5uctHb092PBszndLDR5XA/jghQvn4n7JMHl7dmTgbm9w==", + "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -71,6 +74,7 @@ "version": "0.3.5", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "license": "MIT", "dependencies": { "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", @@ -84,6 +88,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", "engines": { "node": ">=6.0.0" } @@ -92,6 +97,7 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", "engines": { "node": ">=6.0.0" } @@ -101,6 +107,7 @@ "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25" @@ -109,12 +116,14 @@ "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==" + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.25", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -123,12 +132,14 @@ "node_modules/@kurkle/color": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.2.tgz", - "integrity": "sha512-fuscdXJ9G1qb7W8VdHi+IwRqij3lBkosAm4ydQtEmbY58OzHXqQhvlxqEkoz0yssNVn38bcpRWgA9PP+OGoisw==" + "integrity": "sha512-fuscdXJ9G1qb7W8VdHi+IwRqij3lBkosAm4ydQtEmbY58OzHXqQhvlxqEkoz0yssNVn38bcpRWgA9PP+OGoisw==", + "license": "MIT" }, "node_modules/@popperjs/core": { "version": "2.11.8", "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/popperjs" @@ -139,6 +150,7 @@ "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-25.0.8.tgz", "integrity": "sha512-ZEZWTK5n6Qde0to4vS9Mr5x/0UZoqCxPVR9KRUjU4kA2sO7GEUn1fop0DAwpO6z0Nw/kJON9bDmSxdWxO/TT1A==", "dev": true, + "license": "MIT", "dependencies": { "@rollup/pluginutils": "^5.0.1", "commondir": "^1.0.1", @@ -164,6 +176,7 @@ "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.3.0.tgz", "integrity": "sha512-9eO5McEICxMzJpDW9OnMYSv4Sta3hmt7VtBFz5zR9273suNOydOyq/FrGeGy+KsTRFm8w0SLVhzig2ILFT63Ag==", "dev": true, + "license": "MIT", "dependencies": { "@rollup/pluginutils": "^5.0.1", "@types/resolve": "1.20.2", @@ -187,6 +200,7 @@ "version": "5.0.7", "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.7.tgz", "integrity": "sha512-PqxSfuorkHz/SPpyngLyg5GCEkOcee9M1bkxiVDr41Pd61mqP1PLOoDPbpl44SB2mQGKwV/In74gqQmGITOhEQ==", + "license": "MIT", "dependencies": { "@rollup/pluginutils": "^5.0.1", "magic-string": "^0.30.3" @@ -208,6 +222,7 @@ "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.4.tgz", "integrity": "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A==", "dev": true, + "license": "MIT", "dependencies": { "serialize-javascript": "^6.0.1", "smob": "^1.0.0", @@ -229,6 +244,7 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.2.tgz", "integrity": "sha512-/FIdS3PyZ39bjZlwqFnWqCOVnW7o963LtKMwQOD0NhQqw22gSr2YY1afu3FxRip4ZCZNsD5jq6Aaz6QV3D/Njw==", + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", @@ -254,6 +270,7 @@ "arm" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "android" @@ -267,6 +284,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "android" @@ -280,6 +298,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -293,6 +312,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -306,6 +326,7 @@ "arm" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -319,6 +340,7 @@ "arm" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -332,6 +354,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -345,6 +368,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -358,6 +382,7 @@ "ppc64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -371,6 +396,7 @@ "riscv64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -384,6 +410,7 @@ "s390x" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -397,6 +424,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -410,6 +438,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "linux" @@ -423,6 +452,7 @@ "arm64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -436,6 +466,7 @@ "ia32" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -449,6 +480,7 @@ "x64" ], "dev": true, + "license": "MIT", "optional": true, "os": [ "win32" @@ -458,6 +490,7 @@ "version": "6.2.7", "resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-6.2.7.tgz", "integrity": "sha512-WwLLfAFUb42BGuRrf3Vbct30bQMzlEMMipN/MfxhjuLTmLQeW9muVJfPyvjtWS+mY+RjkSCoHvAp/ZobP1NLlQ==", + "license": "MIT", "dependencies": { "@popperjs/core": "^2.11.8" }, @@ -469,23 +502,27 @@ "version": "1.2.6", "resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.6.tgz", "integrity": "sha512-EoAoLMFV2JfSG8+8XD9xWJQdyvfEB5xNpiQWGD7rTDSbDQQV8IVpkm0uOIxwJZ+1uC9hHKri9GmJ5wBSUO4jfg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==" + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "license": "MIT" }, "node_modules/@types/resolve": { "version": "1.20.2", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", "integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@urql/core": { "version": "5.0.6", "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.0.6.tgz", "integrity": "sha512-38rgSDqVNihFDauw1Pm9V7XLWIKuK8V9CKgrUF7/xEKinze8ENKP1ZeBhkG+dxWzJan7CHK+SLl46kAdvZwIlA==", + "license": "MIT", "dependencies": { "@0no-co/graphql.web": "^1.0.5", "wonka": "^6.3.2" @@ -495,6 +532,7 @@ "version": "4.2.1", "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.1.tgz", "integrity": "sha512-tzjt5qElu6EF4ns+AWLUFvvGFH+bDGEgLStHQTBu76puQcMCW374MrjxWM9lKA6lfA7iUyu1KXkIRhxNy09l4Q==", + "license": "MIT", "dependencies": { "@urql/core": "^5.0.0", "wonka": "^6.3.2" @@ -508,6 +546,7 @@ "version": "8.12.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -519,6 +558,7 @@ "version": "5.3.2", "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "license": "Apache-2.0", "engines": { "node": ">= 0.4" } @@ -527,6 +567,7 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "license": "Apache-2.0", "engines": { "node": ">= 0.4" } @@ -535,13 +576,15 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/brace-expansion": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dev": true, + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } @@ -550,12 +593,14 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/chart.js": { "version": "4.4.4", "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.4.tgz", "integrity": "sha512-emICKGBABnxhMjUjlYRR12PmOXhJ2eJjEHL2/dZlWjxRAZT1D8xplLFq5M0tMQK8ja+wBS/tuVEJB5C6r7VxJA==", + "license": "MIT", "dependencies": { "@kurkle/color": "^0.3.0" }, @@ -567,6 +612,7 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/code-red/-/code-red-1.0.4.tgz", "integrity": "sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==", + "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.15", "@types/estree": "^1.0.1", @@ -579,6 +625,7 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -587,18 +634,21 @@ "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/commondir": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/complex.js": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.2.3.tgz", "integrity": "sha512-XnOksGXxhQTvL3LjUgwiOPqL7vF7uikCQE/jpuylNpXmG2LZ+l0z1t6qIlJ2TJVDteXPHhlYd3+mhHOGeTFfsg==", + "license": "MIT", "engines": { "node": "*" }, @@ -611,6 +661,7 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "license": "MIT", "dependencies": { "mdn-data": "2.0.30", "source-map-js": "^1.0.1" @@ -623,6 +674,7 @@ "version": "2.30.0", "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.21.0" }, @@ -637,13 +689,15 @@ "node_modules/decimal.js": { "version": "10.4.3", "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", - "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==" + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", + "license": "MIT" }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -651,17 +705,20 @@ "node_modules/escape-latex": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/escape-latex/-/escape-latex-1.2.0.tgz", - "integrity": "sha512-nV5aVWW1K0wEiUIEdZ4erkGGH8mDxGyxSeqPzRNtWP7ataw+/olFObw7hujFWlVjNsaDFw5VZ5NzVSIqRgfTiw==" + "integrity": "sha512-nV5aVWW1K0wEiUIEdZ4erkGGH8mDxGyxSeqPzRNtWP7ataw+/olFObw7hujFWlVjNsaDFw5VZ5NzVSIqRgfTiw==", + "license": "MIT" }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" }, "node_modules/fraction.js": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz", "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==", + "license": "MIT", "engines": { "node": "*" }, @@ -674,7 +731,8 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/fsevents": { "version": "2.3.3", @@ -682,6 +740,7 @@ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, + "license": "MIT", "optional": true, "os": [ "darwin" @@ -695,6 +754,7 @@ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -705,6 +765,7 @@ "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -723,6 +784,7 @@ "version": "16.9.0", "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.9.0.tgz", "integrity": "sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==", + "license": "MIT", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } @@ -732,6 +794,7 @@ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, + "license": "MIT", "dependencies": { "function-bind": "^1.1.2" }, @@ -745,6 +808,7 @@ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, + "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -754,13 +818,15 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/is-core-module": { "version": "2.15.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==", "dev": true, + "license": "MIT", "dependencies": { "hasown": "^2.0.2" }, @@ -775,13 +841,15 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", "integrity": "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/is-reference": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-1.2.1.tgz", "integrity": "sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/estree": "*" } @@ -789,17 +857,20 @@ "node_modules/javascript-natural-sort": { "version": "0.7.1", "resolved": "https://registry.npmjs.org/javascript-natural-sort/-/javascript-natural-sort-0.7.1.tgz", - "integrity": "sha512-nO6jcEfZWQXDhOiBtG2KvKyEptz7RVbpGP4vTD2hLBdmNQSsCiicO2Ioinv6UI4y9ukqnBpy+XZ9H6uLNgJTlw==" + "integrity": "sha512-nO6jcEfZWQXDhOiBtG2KvKyEptz7RVbpGP4vTD2hLBdmNQSsCiicO2Ioinv6UI4y9ukqnBpy+XZ9H6uLNgJTlw==", + "license": "MIT" }, "node_modules/locate-character": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", - "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==" + "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==", + "license": "MIT" }, "node_modules/magic-string": { "version": "0.30.11", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.11.tgz", "integrity": "sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==", + "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } @@ -808,6 +879,7 @@ "version": "12.4.3", "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-12.4.3.tgz", "integrity": "sha512-oHdGPDbp7gO873xxG90RLq36IuicuKvbpr/bBG5g9c8Obm/VsKVrK9uoRZZHUodohzlnmCEqfDzbR3LH6m+aAQ==", + "license": "Apache-2.0", "dependencies": { "@babel/runtime": "^7.24.4", "complex.js": "^2.1.1", @@ -829,13 +901,15 @@ "node_modules/mdn-data": { "version": "2.0.30", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", - "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "license": "CC0-1.0" }, "node_modules/minimatch": { "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", "dev": true, + "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -848,6 +922,7 @@ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, + "license": "ISC", "dependencies": { "wrappy": "1" } @@ -856,12 +931,14 @@ "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/periscopic": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^3.0.0", @@ -872,6 +949,7 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -880,6 +958,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "license": "MIT", "dependencies": { "@types/estree": "*" } @@ -888,6 +967,7 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", "engines": { "node": ">=8.6" }, @@ -900,6 +980,7 @@ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, + "license": "MIT", "dependencies": { "safe-buffer": "^5.1.0" } @@ -907,13 +988,15 @@ "node_modules/regenerator-runtime": { "version": "0.14.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "license": "MIT" }, "node_modules/resolve": { "version": "1.22.8", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dev": true, + "license": "MIT", "dependencies": { "is-core-module": "^2.13.0", "path-parse": "^1.0.7", @@ -931,6 +1014,7 @@ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" } @@ -940,6 +1024,7 @@ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.24.0.tgz", "integrity": "sha512-DOmrlGSXNk1DM0ljiQA+i+o0rSLhtii1je5wgk60j49d1jHT5YYttBv1iWOnYSTG+fZZESUOSNiAl89SIet+Cg==", "devOptional": true, + "license": "MIT", "dependencies": { "@types/estree": "1.0.6" }, @@ -975,6 +1060,7 @@ "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.5.2.tgz", "integrity": "sha512-7rj9+jB17Pz8LNcPgtMUb16JcgD8lxQMK9HcGfAVhMK3na/WXes3oGIo5QsrQQVqtgAU6q6KnQNXJrYunaUIQQ==", "dev": true, + "license": "MIT", "dependencies": { "@rollup/pluginutils": "5" }, @@ -990,6 +1076,7 @@ "resolved": "https://registry.npmjs.org/rollup-plugin-svelte/-/rollup-plugin-svelte-7.2.2.tgz", "integrity": "sha512-hgnIblTRewaBEVQD6N0Q43o+y6q1TmDRhBjaEzQCi50bs8TXqjc+d1zFZyE8tsfgcfNHZQzclh4RxlFUB85H8Q==", "dev": true, + "license": "MIT", "dependencies": { "@rollup/pluginutils": "^4.1.0", "resolve.exports": "^2.0.0" @@ -1007,6 +1094,7 @@ "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz", "integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==", "dev": true, + "license": "MIT", "dependencies": { "estree-walker": "^2.0.1", "picomatch": "^2.2.2" @@ -1033,18 +1121,21 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/seedrandom": { "version": "3.0.5", "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-3.0.5.tgz", - "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==" + "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==", + "license": "MIT" }, "node_modules/serialize-javascript": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", "dev": true, + "license": "BSD-3-Clause", "dependencies": { "randombytes": "^2.1.0" } @@ -1053,13 +1144,15 @@ "version": "1.5.0", "resolved": "https://registry.npmjs.org/smob/-/smob-1.5.0.tgz", "integrity": "sha512-g6T+p7QO8npa+/hNx9ohv1E5pVCmWrVCUzUXJyLdMmftX6ER0oiWY/w9knEonLpnOp6b6FenKnMfR8gqwWdwig==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } @@ -1068,6 +1161,7 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } @@ -1077,6 +1171,7 @@ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "dev": true, + "license": "MIT", "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -1087,6 +1182,7 @@ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -1098,6 +1194,7 @@ "version": "4.2.19", "resolved": "https://registry.npmjs.org/svelte/-/svelte-4.2.19.tgz", "integrity": "sha512-IY1rnGr6izd10B0A8LqsBfmlT5OILVuZ7XsI0vdGPEvuonFV7NYEUK4dAkm9Zg2q0Um92kYjTpS1CAP3Nh/KWw==", + "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.1", "@jridgewell/sourcemap-codec": "^1.4.15", @@ -1122,6 +1219,7 @@ "version": "3.1.5", "resolved": "https://registry.npmjs.org/svelte-chartjs/-/svelte-chartjs-3.1.5.tgz", "integrity": "sha512-ka2zh7v5FiwfAX1oMflZ0HkNkgjHjFqANgRyC+vNYXfxtx2ku68Zo+2KgbKeBH2nS1ThDqkIACPzGxy4T0UaoA==", + "license": "MIT", "peerDependencies": { "chart.js": "^3.5.0 || ^4.0.0", "svelte": "^4.0.0" @@ -1131,6 +1229,7 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -1139,6 +1238,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "license": "MIT", "dependencies": { "@types/estree": "*" } @@ -1148,6 +1248,7 @@ "resolved": "https://registry.npmjs.org/terser/-/terser-5.34.1.tgz", "integrity": "sha512-FsJZ7iZLd/BXkz+4xrRTGJ26o/6VTjQytUk8b8OxkwcD2I+79VPJlz7qss1+zE7h8GNIScFqXcDyJ/KqBYZFVA==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.8.2", @@ -1164,12 +1265,14 @@ "node_modules/tiny-emitter": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==" + "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", + "license": "MIT" }, "node_modules/typed-function": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/typed-function/-/typed-function-4.2.1.tgz", "integrity": "sha512-EGjWssW7Tsk4DGfE+5yluuljS1OGYWiI1J6e8puZz9nTMM51Oug8CD5Zo4gWMsOhq5BI+1bF+rWTm4Vbj3ivRA==", + "license": "MIT", "engines": { "node": ">= 18" } @@ -1177,18 +1280,21 @@ "node_modules/uplot": { "version": "1.6.31", "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.31.tgz", - "integrity": "sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==" + "integrity": "sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==", + "license": "MIT" }, "node_modules/wonka": { "version": "6.3.4", "resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.4.tgz", - "integrity": "sha512-CjpbqNtBGNAeyNS/9W6q3kSkKE52+FjIj7AkFlLr11s/VWGUu6a2CdYSdGxocIhIVjaW/zchesBQUKPVU69Cqg==" + "integrity": "sha512-CjpbqNtBGNAeyNS/9W6q3kSkKE52+FjIj7AkFlLr11s/VWGUu6a2CdYSdGxocIhIVjaW/zchesBQUKPVU69Cqg==", + "license": "MIT" }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true + "dev": true, + "license": "ISC" } } } From 1adc741cc2bd0498c5646da39d5d17261c72ec25 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 17:32:51 +0200 Subject: [PATCH 192/443] remove dev logging --- web/frontend/src/Analysis.root.svelte | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index f0cba9e..ee74bb8 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -70,8 +70,6 @@ ...new Set([...metricsInHistograms, ...metricsInScatterplots.flat()]), ]; - $: console.log(">>> CLUSTER", cluster) - const sortOptions = [ { key: "totalWalltime", label: "Walltime" }, { key: "totalNodeHours", label: "Node Hours" }, From 322e161064981b5ead7b52d257ab83b7c69b3d5b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 17:36:28 +0200 Subject: [PATCH 193/443] cleanup leftover --- web/frontend/src/Job.root.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index c2b8683..bb48479 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -246,7 +246,7 @@ {/if} - + {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} From ed991de11a94eda342f23c6f9b6f11d750a314f4 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 17:54:12 +0200 Subject: [PATCH 194/443] fix: ad dmissing resampleConfig handling to scope select --- web/frontend/src/job/Metric.svelte | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 0ff8125..adbb44a 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,8 +110,9 @@ const selectedMetrics = [metricName] $: if (selectedScope || pendingResolution) { - if (!selectedResolution) { - // Skips reactive data load on init + + if (resampleConfig && !selectedResolution) { + // Skips reactive data load on init || Only if resampling is enabled selectedResolution = Number(pendingResolution) } else { @@ -119,14 +120,14 @@ selectedScopes = [...scopes, "socket", "core", "accelerator"] } - if (pendingResolution) { + if (resampleConfig && pendingResolution) { selectedResolution = Number(pendingResolution) } metricData = queryStore({ client: client, query: subQuery, - variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, + variables: { dbid, selectedMetrics, selectedScopes, selectedResolution: (resampleConfig ? selectedResolution : 0) }, // Never user network-only: causes reactive load-loop! }); From f4102b948ea7849487e905ef66a1736064151a76 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Oct 2024 18:46:59 +0200 Subject: [PATCH 195/443] rework clientwidth binds and size defaults for histograms --- web/frontend/src/Analysis.root.svelte | 55 +++++++--------- web/frontend/src/Status.root.svelte | 62 ++++++++----------- web/frontend/src/User.root.svelte | 56 +++++++---------- web/frontend/src/generic/PlotGrid.svelte | 4 +- .../src/generic/plots/Histogram.svelte | 21 ++++--- .../src/generic/plots/MetricPlot.svelte | 3 +- web/frontend/src/generic/plots/Scatter.svelte | 4 +- 7 files changed, 86 insertions(+), 119 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index ee74bb8..d287cf3 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -55,7 +55,7 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobFilters = []; let rooflineMaxY; - let colWidth1, colWidth2, colWidth3, colWidth4; + let colWidth1, colWidth2; let numBins = 50; let maxY = -1; @@ -465,36 +465,30 @@ {/if} -
- {#key $statsQuery.data.stats[0].histDuration} - - {/key} -
+ {#key $statsQuery.data.stats[0].histDuration} + + {/key} -
- {#key $statsQuery.data.stats[0].histNumCores} - - {/key} -
+ {#key $statsQuery.data.stats[0].histNumCores} + + {/key} {/if} @@ -525,7 +519,6 @@ ({ metric, @@ -542,8 +535,6 @@ > -
+

Top Users on {cluster.charAt(0).toUpperCase() + cluster.slice(1)}

@@ -479,7 +478,7 @@ {$topUserQuery.error.message} {:else} tu[topUserSelection.key], @@ -539,7 +538,7 @@ {$topProjectQuery.error.message} {:else} tp[topProjectSelection.key], @@ -591,25 +590,21 @@
-
- {#key $mainQuery.data.stats} - - {/key} -
+ {#key $mainQuery.data.stats} + + {/key} {#key $mainQuery.data.stats} -
- {#key $mainQuery.data.stats} - - {/key} -
+ {#key $mainQuery.data.stats} + + {/key} {#key $mainQuery.data.stats} -
- {#key $stats.data.jobsStatistics[0].histDuration} - - {/key} -
+ {#key $stats.data.jobsStatistics[0].histDuration} + + {/key} -
- {#key $stats.data.jobsStatistics[0].histNumNodes} - - {/key} -
+ {#key $stats.data.jobsStatistics[0].histNumNodes} + + {/key} {/if}
@@ -278,7 +267,6 @@ {#key $stats.data.jobsStatistics[0].histMetrics} x._is_placeholder === true + let rows = []; + const isPlaceholder = x => x._is_placeholder === true; function tile(items, itemsPerRow) { const rows = [] diff --git a/web/frontend/src/generic/plots/Histogram.svelte b/web/frontend/src/generic/plots/Histogram.svelte index a1bb79b..6c8939a 100644 --- a/web/frontend/src/generic/plots/Histogram.svelte +++ b/web/frontend/src/generic/plots/Histogram.svelte @@ -21,8 +21,8 @@ export let data; export let usesBins = false; - export let width = 500; - export let height = 300; + export let width = null; + export let height = 250; export let title = ""; export let xlabel = ""; export let xunit = ""; @@ -226,10 +226,13 @@ $: sizeChanged(width, height); -{#if data.length > 0} -
-{:else} - Cannot render histogram: No data! -{/if} + +
+ {#if data.length > 0} +
+ {:else} + Cannot render histogram: No data! + {/if} +
diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index ba7533f..09f313c 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -117,6 +117,7 @@ export let metric; export let scope = "node"; + export let width = null; export let height = 300; export let timestep; export let series; @@ -130,8 +131,6 @@ export let numaccs = 0; export let zoomState = null; - let width; - if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; if (useStatsSeries == false && series == null) useStatsSeries = true; diff --git a/web/frontend/src/generic/plots/Scatter.svelte b/web/frontend/src/generic/plots/Scatter.svelte index 08fa7bd..514223b 100644 --- a/web/frontend/src/generic/plots/Scatter.svelte +++ b/web/frontend/src/generic/plots/Scatter.svelte @@ -146,8 +146,8 @@ export let Y; export let S = null; export let color = '#0066cc'; - export let width; - export let height; + export let width = 250; + export let height = 300; export let xLabel; export let yLabel; From bc434ee8cb443e19f620de5e7ffc2fac04dc2f09 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 9 Oct 2024 11:08:14 +0200 Subject: [PATCH 196/443] add managed projects, update navbar layout, fix small issues --- web/frontend/src/Header.svelte | 158 +++++++++++++-------- web/frontend/src/header/NavbarLinks.svelte | 6 +- 2 files changed, 103 insertions(+), 61 deletions(-) diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index de5159a..9b12403 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -40,13 +40,17 @@ usersTitle.set(3, "Managed Users"); usersTitle.set(4, "Users"); usersTitle.set(5, "Users"); + const projectsTitle = new Map(); + projectsTitle.set(3, "Managed Projects"); + projectsTitle.set(4, "Projects"); + projectsTitle.set(5, "Projects"); const views = [ { title: "My Jobs", requiredRole: roles.user, href: `/monitoring/user/${username}`, - icon: "bar-chart-line-fill", + icon: "bar-chart-line", perCluster: false, listOptions: false, menu: "none", @@ -58,25 +62,7 @@ icon: "card-list", perCluster: false, listOptions: false, - menu: "none", - }, - { - title: usersTitle.get(authlevel), - requiredRole: roles.manager, - href: "/monitoring/users/", - icon: "people-fill", - perCluster: true, - listOptions: true, - menu: "Groups", - }, - { - title: "Projects", - requiredRole: roles.support, - href: "/monitoring/projects/", - icon: "folder", - perCluster: true, - listOptions: true, - menu: "Groups", + menu: "Jobs", }, { title: "Tags", @@ -85,8 +71,44 @@ icon: "tags", perCluster: false, listOptions: false, + menu: "Jobs", + }, + { + title: usersTitle.get(authlevel), + requiredRole: roles.manager, + href: "/monitoring/users/", + icon: "people", + perCluster: true, + listOptions: true, menu: "Groups", }, + { + title: projectsTitle.get(authlevel), + requiredRole: roles.manager, + href: "/monitoring/projects/", + icon: "journals", + perCluster: true, + listOptions: true, + menu: "Groups", + }, + { + title: "Nodes", + requiredRole: roles.admin, + href: "/monitoring/systems/", + icon: "hdd-rack", + perCluster: true, + listOptions: false, + menu: "Info", + }, + { + title: "Status", + requiredRole: roles.admin, + href: "/monitoring/status/", + icon: "clipboard-data", + perCluster: true, + listOptions: false, + menu: "Info", + }, { title: "Analysis", requiredRole: roles.support, @@ -94,25 +116,7 @@ icon: "graph-up", perCluster: true, listOptions: false, - menu: "Stats", - }, - { - title: "Nodes", - requiredRole: roles.admin, - href: "/monitoring/systems/", - icon: "cpu", - perCluster: true, - listOptions: false, - menu: "Groups", - }, - { - title: "Status", - requiredRole: roles.admin, - href: "/monitoring/status/", - icon: "cpu", - perCluster: true, - listOptions: false, - menu: "Stats", + menu: "Info", }, ]; @@ -140,24 +144,27 @@ item.requiredRole <= authlevel && item.menu != "Stats", + (item) => item.requiredRole <= authlevel && item.menu != "Info", )} /> - - - - Stats - - - - item.requiredRole <= authlevel && item.menu == "Stats", - )} - /> - - + {#if authlevel >= 4} + + + + Info + + + + item.requiredRole <= authlevel && item.menu == "Info", + )} + /> + + + {/if} {:else} item.requiredRole <= authlevel && item.menu == "none", )} /> - {#each Array("Groups", "Stats") as menu} + {#if authlevel >= 2} - {menu} + Jobs item.requiredRole <= authlevel && item.menu == menu, + (item) => item.requiredRole <= authlevel && item.menu == 'Jobs', )} /> - {/each} + {/if} + {#if authlevel >= 3} + + + Groups + + + item.requiredRole <= authlevel && item.menu == 'Groups', + )} + /> + + + {/if} + {#if authlevel >= 4} + + + Info + + + item.requiredRole <= authlevel && item.menu == 'Info', + )} + /> + + + {/if} {/if} diff --git a/web/frontend/src/header/NavbarLinks.svelte b/web/frontend/src/header/NavbarLinks.svelte index ef9e49c..c99f35d 100644 --- a/web/frontend/src/header/NavbarLinks.svelte +++ b/web/frontend/src/header/NavbarLinks.svelte @@ -4,6 +4,7 @@ Properties: - `clusters [String]`: List of cluster names - `links [Object]`: Pre-filtered link objects based on user auth + - `direction String?`: The direcion of the drop-down menue [default: down] --> {#each links as item} {#if item.listOptions} - + {item.title} @@ -60,7 +62,7 @@ > {item.title} {:else} - + {item.title} From e3104c61cb5c5d4b876aad590f049f21b5de2c49 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 9 Oct 2024 13:23:06 +0200 Subject: [PATCH 197/443] filter taglist scope visibility by role, add global tag handling to support role --- internal/repository/tags.go | 2 +- internal/routerConfig/routes.go | 36 +++++++++++++------ .../src/generic/helper/TagManagement.svelte | 34 ++++++++++-------- 3 files changed, 46 insertions(+), 26 deletions(-) diff --git a/internal/repository/tags.go b/internal/repository/tags.go index dcdbd29..48ea9ec 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -309,7 +309,7 @@ func (r *JobRepository) checkScopeAuth(ctx context.Context, operation string, sc } return false, nil case operation == "write" && scope == "global": - if user.HasRole(schema.RoleAdmin) || (len(user.Roles) == 1 && user.HasRole(schema.RoleApi)) { + if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) || (len(user.Roles) == 1 && user.HasRole(schema.RoleApi)) { return true, nil } return false, nil diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index ae3da8f..ba63ad3 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -128,7 +128,6 @@ func setupAnalysisRoute(i InfoType, r *http.Request) InfoType { func setupTaglistRoute(i InfoType, r *http.Request) InfoType { jobRepo := repository.GetJobRepository() - tags, counts, err := jobRepo.CountTags(r.Context()) tagMap := make(map[string][]map[string]interface{}) if err != nil { @@ -136,17 +135,34 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType { i["tagmap"] = tagMap return i } - + // Reduces displayed tags for unauth'd users + userAuthlevel := repository.GetUserFromContext(r.Context()).GetAuthLevel() // Uses tag.ID as second Map-Key component to differentiate tags with identical names - for _, tag := range tags { - tagItem := map[string]interface{}{ - "id": tag.ID, - "name": tag.Name, - "scope": tag.Scope, - "count": counts[fmt.Sprint(tag.Name, tag.ID)], + if userAuthlevel >= 4 { // Support+ : Show tags for all scopes, regardless of count + for _, tag := range tags { + tagItem := map[string]interface{}{ + "id": tag.ID, + "name": tag.Name, + "scope": tag.Scope, + "count": counts[fmt.Sprint(tag.Name, tag.ID)], + } + tagMap[tag.Type] = append(tagMap[tag.Type], tagItem) } - tagMap[tag.Type] = append(tagMap[tag.Type], tagItem) - } + } else if userAuthlevel < 4 && userAuthlevel >= 2 { // User+ : Show global and admin scope only if at least 1 tag used, private scope regardless of count + for _, tag := range tags { + tagCount := counts[fmt.Sprint(tag.Name, tag.ID)] + if ((tag.Scope == "global" || tag.Scope == "admin") && tagCount >= 1) || (tag.Scope != "global" && tag.Scope != "admin") { + tagItem := map[string]interface{}{ + "id": tag.ID, + "name": tag.Name, + "scope": tag.Scope, + "count": tagCount, + } + tagMap[tag.Type] = append(tagMap[tag.Type], tagItem) + } + } + } // auth < 2 return nothing for this route + i["tagmap"] = tagMap return i } diff --git a/web/frontend/src/generic/helper/TagManagement.svelte b/web/frontend/src/generic/helper/TagManagement.svelte index 0996f03..6fc4f48 100644 --- a/web/frontend/src/generic/helper/TagManagement.svelte +++ b/web/frontend/src/generic/helper/TagManagement.svelte @@ -48,7 +48,8 @@ let filterTerm = ""; let pendingChange = false; let isOpen = false; - const isAdmin = (roles && authlevel >= roles.admin); + const isAdmin = (roles && authlevel == roles.admin); + const isSupport = (roles && authlevel == roles.support); const client = getContextClient(); @@ -104,8 +105,8 @@ }; $: allTagsFiltered = ($initialized, fuzzySearchTags(filterTerm, allTags)); - $: usedTagsFiltered = matchJobTags(jobTags, allTagsFiltered, 'used', isAdmin); - $: unusedTagsFiltered = matchJobTags(jobTags, allTagsFiltered, 'unused', isAdmin); + $: usedTagsFiltered = matchJobTags(jobTags, allTagsFiltered, 'used', isAdmin, isSupport); + $: unusedTagsFiltered = matchJobTags(jobTags, allTagsFiltered, 'unused', isAdmin, isSupport); $: { newTagType = ""; @@ -117,16 +118,17 @@ } } - function matchJobTags(tags, availableTags, type, isAdmin) { + function matchJobTags(tags, availableTags, type, isAdmin, isSupport) { const jobTagIds = tags.map((t) => t.id) - if (type == 'used') { + if (isAdmin || type == 'used') { // Always show used tags, admin also show all unused return availableTags.filter((at) => jobTagIds.includes(at.id)) - } else if (type == 'unused' && isAdmin) { - return availableTags.filter((at) => !jobTagIds.includes(at.id)) - } else if (type == 'unused' && !isAdmin) { // Normal Users should not see unused global tags here - return availableTags.filter((at) => !jobTagIds.includes(at.id) && at.scope !== "global") + } else { // ... for unused + if (isSupport) { // ... show global tags for support + return availableTags.filter((at) => !jobTagIds.includes(at.id) && at.scope !== "admin") + } else { // ... show only private tags for user, manager + return availableTags.filter((at) => !jobTagIds.includes(at.id) && at.scope !== "admin" && at.scope !== "global") + } } - return [] } function isNewTag(type, name) { @@ -223,7 +225,7 @@ Private Tag {/if} - {#if isAdmin || (utag.scope !== 'global' && utag.scope !== 'admin')} + {#if isAdmin || (isSupport && utag.scope == 'global') || (utag.scope !== 'global' && utag.scope !== 'admin')} - {#if isAdmin} + {#if isSupport || isAdmin} - + {#if isAdmin} + + {/if} {/if} From 37f4ed7770fa141e77922340eb01119168f3ecf6 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 9 Oct 2024 17:52:46 +0200 Subject: [PATCH 198/443] add additional indices for sorting performance --- .../sqlite3/08_add-footprint.up.sql | 69 +++++++++++++++++-- 1 file changed, 65 insertions(+), 4 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index de151f2..9c9e53e 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -32,45 +32,106 @@ ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; +-- Indices for: Single filters, combined filters, sorting, sorting with filters +-- Cluster Filter CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); -CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); +-- Cluster Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (cluster, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (cluster, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_numhwthreads ON job (cluster, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (cluster, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (cluster, energy); +-- Cluster+Partition Filter CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +-- Cluster+Partition Filter Sorting CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, partition, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (cluster, partition, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (cluster, partition, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (cluster, partition, energy); + +-- Cluster+Partition+Jobstate Filter CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +-- Cluster+Partition+Jobstate Filter Sorting CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, partition, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (cluster, partition, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (cluster, partition, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (cluster, partition, job_state, energy); +-- Cluster+JobState Filter CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); +-- Cluster+JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (cluster, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (cluster, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numhwthreads ON job (cluster, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (cluster, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (cluster, job_state, energy); +-- User Filter CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +-- User Filter Sorting CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (user, duration); +CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (user, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_user_numhwthreads ON job (user, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_user_numacc ON job (user, num_acc); +CREATE INDEX IF NOT EXISTS jobs_user_energy ON job (user, energy); +-- Project Filter CREATE INDEX IF NOT EXISTS jobs_project ON job (project); -CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); +-- Project Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration); +CREATE INDEX IF NOT EXISTS jobs_project_numnodes ON job (project, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_project_numhwthreads ON job (project, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_project_numacc ON job (project, num_acc); +CREATE INDEX IF NOT EXISTS jobs_project_energy ON job (project, energy); +-- JobState Filter CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +-- JobState Filter Sorting CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_jobstate_duration ON job (job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_jobstate_numnodes ON job (job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_jobstate_numhwthreads ON job (job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_jobstate_numacc ON job (job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_jobstate_energy ON job (job_state, energy); +-- ArrayJob Filter CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); +-- Sorting without active filters CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); -CREATE INDEX IF NOT EXISTS jobs_energy ON job (energy); CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); CREATE INDEX IF NOT EXISTS jobs_numhwthreads ON job (num_hwthreads); CREATE INDEX IF NOT EXISTS jobs_numacc ON job (num_acc); +CREATE INDEX IF NOT EXISTS jobs_energy ON job (energy); +-- Single filters with default starttime sorting +CREATE INDEX IF NOT EXISTS jobs_duration_starttime ON job (duration, start_time); +CREATE INDEX IF NOT EXISTS jobs_numnodes_starttime ON job (num_nodes, start_time); +CREATE INDEX IF NOT EXISTS jobs_numhwthreads_starttime ON job (num_hwthreads, start_time); +CREATE INDEX IF NOT EXISTS jobs_numacc_starttime ON job (num_acc, start_time); +CREATE INDEX IF NOT EXISTS jobs_energy_starttime ON job (energy, start_time); + +-- Optimize DB index usage PRAGMA optimize; From 2f0460d6ecccb3e07b941b818864120e3d249340 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 10 Oct 2024 18:35:53 +0200 Subject: [PATCH 199/443] feat: make quick select starttimes url copyable --- api/schema.graphqls | 6 +- internal/graph/generated/generated.go | 70 +++++++++++++++++-- internal/graph/model/models_gen.go | 5 +- internal/graph/schema.resolvers.go | 1 - internal/repository/jobQuery.go | 17 +++++ internal/routerConfig/routes.go | 6 +- pkg/schema/config.go | 5 +- web/frontend/src/List.root.svelte | 8 +-- web/frontend/src/generic/Filters.svelte | 39 ++++++----- .../src/generic/filters/StartTime.svelte | 11 +++ 10 files changed, 128 insertions(+), 40 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 994d94d..e62fb0a 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -253,7 +253,7 @@ type Mutation { } type IntRangeOutput { from: Int!, to: Int! } -type TimeRangeOutput { from: Time!, to: Time! } +type TimeRangeOutput { range: String, from: Time!, to: Time! } input JobFilter { tags: [ID!] @@ -300,8 +300,8 @@ input StringInput { in: [String!] } -input IntRange { from: Int!, to: Int! } -input TimeRange { from: Time, to: Time } +input IntRange { from: Int!, to: Int! } +input TimeRange { range: String, from: Time, to: Time } input FloatRange { from: Float! diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 614f9c1..00609ac 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -318,8 +318,9 @@ type ComplexityRoot struct { } TimeRangeOutput struct { - From func(childComplexity int) int - To func(childComplexity int) int + From func(childComplexity int) int + Range func(childComplexity int) int + To func(childComplexity int) int } TimeWeights struct { @@ -1668,6 +1669,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.TimeRangeOutput.From(childComplexity), true + case "TimeRangeOutput.range": + if e.complexity.TimeRangeOutput.Range == nil { + break + } + + return e.complexity.TimeRangeOutput.Range(childComplexity), true + case "TimeRangeOutput.to": if e.complexity.TimeRangeOutput.To == nil { break @@ -2141,7 +2149,7 @@ type Mutation { } type IntRangeOutput { from: Int!, to: Int! } -type TimeRangeOutput { from: Time!, to: Time! } +type TimeRangeOutput { range: String, from: Time!, to: Time! } input JobFilter { tags: [ID!] @@ -2188,8 +2196,8 @@ input StringInput { in: [String!] } -input IntRange { from: Int!, to: Int! } -input TimeRange { from: Time, to: Time } +input IntRange { from: Int!, to: Int! } +input TimeRange { range: String, from: Time, to: Time } input FloatRange { from: Float! @@ -10914,6 +10922,47 @@ func (ec *executionContext) fieldContext_Tag_scope(_ context.Context, field grap return fc, nil } +func (ec *executionContext) _TimeRangeOutput_range(ctx context.Context, field graphql.CollectedField, obj *model.TimeRangeOutput) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TimeRangeOutput_range(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Range, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TimeRangeOutput_range(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TimeRangeOutput", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _TimeRangeOutput_from(ctx context.Context, field graphql.CollectedField, obj *model.TimeRangeOutput) (ret graphql.Marshaler) { fc, err := ec.fieldContext_TimeRangeOutput_from(ctx, field) if err != nil { @@ -13781,13 +13830,20 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int asMap[k] = v } - fieldsInOrder := [...]string{"from", "to"} + fieldsInOrder := [...]string{"range", "from", "to"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { continue } switch k { + case "range": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("range")) + data, err := ec.unmarshalOString2string(ctx, v) + if err != nil { + return it, err + } + it.Range = data case "from": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) @@ -16166,6 +16222,8 @@ func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.Select switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("TimeRangeOutput") + case "range": + out.Values[i] = ec._TimeRangeOutput_range(ctx, field, obj) case "from": out.Values[i] = ec._TimeRangeOutput_from(ctx, field, obj) if out.Values[i] == graphql.Null { diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 99841fd..7f0db5f 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -172,8 +172,9 @@ type StringInput struct { } type TimeRangeOutput struct { - From time.Time `json:"from"` - To time.Time `json:"to"` + Range *string `json:"range,omitempty"` + From time.Time `json:"from"` + To time.Time `json:"to"` } type TimeWeights struct { diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 14f71e1..58d664b 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -268,7 +268,6 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) // JobMetrics is the resolver for the jobMetrics field. func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) { - if resolution == nil { // Load from Config if config.Keys.EnableResampling != nil { defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions) diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 8c16bb3..5458043 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -218,6 +218,23 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui return query.Where("? <= "+field, cond.From.Unix()) } else if cond.To != nil { return query.Where(field+" <= ?", cond.To.Unix()) + } else if cond.Range != "" { + now := time.Now().Unix() + var then int64 + switch cond.Range { + case "last6h": + then = now - (60 * 60 * 6) + case "last24h": + then = now - (60 * 60 * 24) + case "last7d": + then = now - (60 * 60 * 24 * 7) + case "last30d": + then = now - (60 * 60 * 24 * 30) + default: + log.Debugf("No known named timeRange: startTime.range = %s", cond.Range) + return query + } + return query.Where(field+" BETWEEN ? AND ?", then, now) } else { return query } diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index ba63ad3..05b316d 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -255,7 +255,7 @@ func buildFilterPresets(query url.Values) map[string]interface{} { } if query.Get("startTime") != "" { parts := strings.Split(query.Get("startTime"), "-") - if len(parts) == 2 { + if len(parts) == 2 { // Time in seconds, from - to a, e1 := strconv.ParseInt(parts[0], 10, 64) b, e2 := strconv.ParseInt(parts[1], 10, 64) if e1 == nil && e2 == nil { @@ -264,6 +264,10 @@ func buildFilterPresets(query url.Values) map[string]interface{} { "to": time.Unix(b, 0).Format(time.RFC3339), } } + } else { // named range + filterPresets["startTime"] = map[string]string{ + "range": query.Get("startTime"), + } } } diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 10fb728..ccd848b 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -53,8 +53,9 @@ type IntRange struct { } type TimeRange struct { - From *time.Time `json:"from"` - To *time.Time `json:"to"` + Range string `json:"range,omitempty"` // Optional, e.g. 'last6h' + From *time.Time `json:"from"` + To *time.Time `json:"to"` } type FilterRanges struct { diff --git a/web/frontend/src/List.root.svelte b/web/frontend/src/List.root.svelte index 9425db5..ef57e27 100644 --- a/web/frontend/src/List.root.svelte +++ b/web/frontend/src/List.root.svelte @@ -40,15 +40,9 @@ if (filterPresets?.startTime == null) { if (filterPresets == null) filterPresets = {}; - const lastMonth = new Date( - Date.now() - 30 * 24 * 60 * 60 * 1000, - ).toISOString(); - const now = new Date(Date.now()).toISOString(); filterPresets.startTime = { - from: lastMonth, - to: now, + range: "last30d", text: "Last 30 Days", - url: "last30d", }; } diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index 5b829ba..9580f17 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -106,6 +106,10 @@ items.push({ startTime: { from: filters.startTime.from, to: filters.startTime.to }, }); + if (filters.startTime.range) + items.push({ + startTime: { range: filters.startTime.range }, + }); if (filters.tags.length != 0) items.push({ tags: filters.tags }); if (filters.duration.from || filters.duration.to) items.push({ @@ -167,13 +171,12 @@ if (filters.states.length != allJobStates.length) for (let state of filters.states) opts.push(`state=${state}`); if (filters.startTime.from && filters.startTime.to) - // if (filters.startTime.url) { - // opts.push(`startTime=${filters.startTime.url}`) - // } else { opts.push( `startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`, ); - // } + if (filters.startTime.range) { + opts.push(`startTime=${filters.startTime.range}`) + } if (filters.jobId.length != 0) if (filters.jobIdMatch != "in") { opts.push(`jobId=${filters.jobId}`); @@ -259,14 +262,11 @@ {#if startTimeQuickSelect} Start Time Quick Selection - {#each [{ text: "Last 6hrs", url: "last6h", seconds: 6 * 60 * 60 }, { text: "Last 24hrs", url: "last24h", seconds: 24 * 60 * 60 }, { text: "Last 7 days", url: "last7d", seconds: 7 * 24 * 60 * 60 }, { text: "Last 30 days", url: "last30d", seconds: 30 * 24 * 60 * 60 }] as { text, url, seconds }} + {#each [{ text: "Last 6hrs", range: "last6h" }, { text: "Last 24hrs", range: "last24h" }, { text: "Last 7 days", range: "last7d" }, { text: "Last 30 days", range: "last30d" }] as { text, range }} { - filters.startTime.from = new Date( - Date.now() - seconds * 1000, - ).toISOString(); - filters.startTime.to = new Date(Date.now()).toISOString(); - (filters.startTime.text = text), (filters.startTime.url = url); + filters.startTime.range = range; + filters.startTime.text = text; updateFilters(); }} > @@ -302,13 +302,15 @@ {#if filters.startTime.from || filters.startTime.to} (isStartTimeOpen = true)}> - {#if filters.startTime.text} - {filters.startTime.text} - {:else} - {new Date(filters.startTime.from).toLocaleString()} - {new Date( - filters.startTime.to, - ).toLocaleString()} - {/if} + {new Date(filters.startTime.from).toLocaleString()} - {new Date( + filters.startTime.to, + ).toLocaleString()} + +{/if} + +{#if filters.startTime.range} + (isStartTimeOpen = true)}> + {filters?.startTime?.text ? filters.startTime.text : filters.startTime.range } {/if} @@ -406,9 +408,10 @@ bind:isOpen={isStartTimeOpen} bind:from={filters.startTime.from} bind:to={filters.startTime.to} + bind:range={filters.startTime.range} on:set-filter={() => { delete filters.startTime["text"]; - delete filters.startTime["url"]; + delete filters.startTime["range"]; updateFilters(); }} /> diff --git a/web/frontend/src/generic/filters/StartTime.svelte b/web/frontend/src/generic/filters/StartTime.svelte index c781077..bc842f5 100644 --- a/web/frontend/src/generic/filters/StartTime.svelte +++ b/web/frontend/src/generic/filters/StartTime.svelte @@ -6,6 +6,7 @@ - `isOpen Bool?`: Is this filter component opened [Default: false] - `from Object?`: The currently selected from startime [Default: null] - `to Object?`: The currently selected to starttime (i.e. subCluster) [Default: null] + - `range String?`: The currently selected starttime range as string [Default: ""] Events: - `set-filter, {String?, String?}`: Set 'from, to' filter in upstream component @@ -16,6 +17,7 @@ import { parse, format, sub } from "date-fns"; import { Row, + Col, Button, Input, Modal, @@ -31,6 +33,7 @@ export let isOpen = false; export let from = null; export let to = null; + export let range = ""; let pendingFrom, pendingTo; @@ -86,6 +89,14 @@ (isOpen = !isOpen)}> Select Start Time + {#if range !== ""} +

Current Range

+ + + + + + {/if}

From

From 2cbe8e9517b4deb42484a22d10291966bdcbdf4e Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 11 Oct 2024 12:30:55 +0200 Subject: [PATCH 200/443] Split systems view into node-overview and node-list --- internal/routerConfig/routes.go | 35 +- web/frontend/src/Header.svelte | 2 +- web/frontend/src/Systems.root.svelte | 230 ++----------- web/frontend/src/header/NavbarLinks.svelte | 96 ++++-- web/frontend/src/systems.entrypoint.js | 1 + web/frontend/src/systems/NodeList.svelte | 322 ++++++++++++++++++ web/frontend/src/systems/NodeOverview.svelte | 232 +++++++++++++ .../src/systems/nodelist/NodeInfo.svelte | 152 +++++++++ .../src/systems/nodelist/NodeListRow.svelte | 207 +++++++++++ web/templates/monitoring/systems.tmpl | 1 + 10 files changed, 1032 insertions(+), 246 deletions(-) create mode 100644 web/frontend/src/systems/NodeList.svelte create mode 100644 web/frontend/src/systems/NodeOverview.svelte create mode 100644 web/frontend/src/systems/nodelist/NodeInfo.svelte create mode 100644 web/frontend/src/systems/nodelist/NodeListRow.svelte diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 05b316d..e6cb376 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -42,10 +42,11 @@ var routes []Route = []Route{ {"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }}, {"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute}, {"/monitoring/user/{id}", "monitoring/user.tmpl", "User - ClusterCockpit", true, setupUserRoute}, - {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster Overview - ClusterCockpit", false, setupClusterOverviewRoute}, + {"/monitoring/systems/list/{cluster}", "monitoring/systems.tmpl", "Cluster List - ClusterCockpit", false, setupClusterListRoute}, {"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node - ClusterCockpit", false, setupNodeRoute}, {"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute}, - {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterRoute}, + {"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of - ClusterCockpit", false, setupClusterStatusRoute}, } func setupHomeRoute(i InfoType, r *http.Request) InfoType { @@ -96,7 +97,7 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType { return i } -func setupClusterRoute(i InfoType, r *http.Request) InfoType { +func setupClusterStatusRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["id"] = vars["cluster"] i["cluster"] = vars["cluster"] @@ -108,6 +109,34 @@ func setupClusterRoute(i InfoType, r *http.Request) InfoType { return i } +func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["displayType"] = "OVERVIEW" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + +func setupClusterListRoute(i InfoType, r *http.Request) InfoType { + vars := mux.Vars(r) + i["id"] = vars["cluster"] + i["cluster"] = vars["cluster"] + i["displayType"] = "LIST" + + from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to") + if from != "" || to != "" { + i["from"] = from + i["to"] = to + } + return i +} + func setupNodeRoute(i InfoType, r *http.Request) InfoType { vars := mux.Vars(r) i["cluster"] = vars["cluster"] diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index 9b12403..9a7ae79 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -97,7 +97,7 @@ href: "/monitoring/systems/", icon: "hdd-rack", perCluster: true, - listOptions: false, + listOptions: true, menu: "Info", }, { diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 488cdad..baef2d0 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -1,232 +1,44 @@ - - {#if $initq.error} - {$initq.error.message} - {:else if $initq.fetching} - - {:else} - - - - - Find Node - - - - - - - - - - - - Metric - - - - - - { - const diff = Date.now() - to; - from = new Date(from.getTime() + diff); - to = new Date(to.getTime() + diff); - }} - /> - - {/if} - -
-{#if $nodesQuery.error} - - - {$nodesQuery.error.message} - - -{:else if $nodesQuery.fetching || $initq.fetching} - - - - - +{#if displayType === 'OVERVIEW'} + +{:else if displayType === 'LIST'} + {:else} - - h.host.includes(hostnameFilter) && - h.metrics.some( - (m) => m.name == selectedMetric && m.scope == "node", - ), - ) - .map((h) => ({ - host: h.host, - subCluster: h.subCluster, - data: h.metrics.find( - (m) => m.name == selectedMetric && m.scope == "node", - ), - disabled: checkMetricDisabled( - selectedMetric, - cluster, - h.subCluster, - ), - })) - .sort((a, b) => a.host.localeCompare(b.host))} - > -

- {item.host} ({item.subCluster}) -

- {#if item.disabled === false && item.data} - c.name == cluster)} - subCluster={item.subCluster} - forNode={true} - /> - {:else if item.disabled === true && item.data} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - No dataset returned for {selectedMetric} - {/if} - + + + + Unknown displayList type! + + + {/if} diff --git a/web/frontend/src/header/NavbarLinks.svelte b/web/frontend/src/header/NavbarLinks.svelte index c99f35d..2772eb7 100644 --- a/web/frontend/src/header/NavbarLinks.svelte +++ b/web/frontend/src/header/NavbarLinks.svelte @@ -24,39 +24,69 @@ {#each links as item} {#if item.listOptions} - - - - {item.title} - - - - All Clusters - - - {#each clusters as cluster} - - - {cluster.name} - - - - Running Jobs - - - - {/each} - - + {#if item.title === 'Nodes'} + + + + {item.title} + + + {#each clusters as cluster} + + + {cluster.name} + + + + Node Overview + + + Node List + + + + {/each} + + + {:else} + + + + {item.title} + + + + All Clusters + + + {#each clusters as cluster} + + + {cluster.name} + + + + Running Jobs + + + + {/each} + + + {/if} {:else if !item.perCluster} {item.title} + + + + +
+ + + + + {#if showFootprint} + + {/if} + {#each metrics as metric (metric)} + + {/each} + + + + {#if $jobsStore.error} + + + + {:else} + {#each jobs as job (job)} + + {:else} + + + + {/each} + {/if} + {#if $jobsStore.fetching || !$jobsStore.data} + + + + {/if} + +
+ Job Info + + Job Footprint + + {metric} + {#if $initialized} + ({getUnit(metric)}) + {/if} +
+

{$jobsStore.error.message}

+
No jobs found
+
+ +
+
+
+
+ +{#if usePaging} + { + if (detail.itemsPerPage != itemsPerPage) { + updateConfiguration(detail.itemsPerPage.toString(), detail.page); + } else { + jobs = [] + paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; + } + }} + /> +{/if} + + diff --git a/web/frontend/src/systems/NodeOverview.svelte b/web/frontend/src/systems/NodeOverview.svelte new file mode 100644 index 0000000..b5d1d0b --- /dev/null +++ b/web/frontend/src/systems/NodeOverview.svelte @@ -0,0 +1,232 @@ + + + + + + {#if $initq.error} + {$initq.error.message} + {:else if $initq.fetching} + + {:else} + + + + + Find Node + + + + + + + + + + + + Metric + + + + + + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + {/if} + +
+{#if $nodesQuery.error} + + + {$nodesQuery.error.message} + + +{:else if $nodesQuery.fetching || $initq.fetching} + + + + + +{:else} + + h.host.includes(hostnameFilter) && + h.metrics.some( + (m) => m.name == selectedMetric && m.scope == "node", + ), + ) + .map((h) => ({ + host: h.host, + subCluster: h.subCluster, + data: h.metrics.find( + (m) => m.name == selectedMetric && m.scope == "node", + ), + disabled: checkMetricDisabled( + selectedMetric, + cluster, + h.subCluster, + ), + })) + .sort((a, b) => a.host.localeCompare(b.host))} + > +

+ {item.host} ({item.subCluster}) +

+ {#if item.disabled === false && item.data} + c.name == cluster)} + subCluster={item.subCluster} + forNode={true} + /> + {:else if item.disabled === true && item.data} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + No dataset returned for {selectedMetric} + {/if} +
+{/if} diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte new file mode 100644 index 0000000..0aee223 --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -0,0 +1,152 @@ + + + + +
+

+ {job.jobId} + ({job.cluster}) + {#if job.metaData?.jobName} +
+ {#if job.metaData?.jobName.length <= 25} +

{job.metaData.jobName}
+ {:else} +
+ {job.metaData.jobName} +
+ {/if} + {/if} + {#if job.arrayJobId} + Array Job: #{job.arrayJobId} + {/if} +

+ +

+ + + {scrambleNames ? scramble(job.user) : job.user} + + {#if job.userData && job.userData.name} + ({scrambleNames ? scramble(job.userData.name) : job.userData.name}) + {/if} + {#if job.project && job.project != "no project"} +
+ + + {scrambleNames ? scramble(job.project) : job.project} + + {/if} +

+ +

+ {#if job.numNodes == 1} + {job.resources[0].hostname} + {:else} + {job.numNodes} + {/if} + + {#if job.exclusive != 1} + (shared) + {/if} + {#if job.numAcc > 0} + , {job.numAcc} + {/if} + {#if job.numHWThreads > 0} + , {job.numHWThreads} + {/if} +
+ {job.subCluster} +

+ +

+ Start: {new Date(job.startTime).toLocaleString()} +
+ Duration: {formatDuration(job.duration)} + {job.state} + {#if job.walltime} +
+ Walltime: {formatDuration(job.walltime)} + {/if} +

+ + {#if showTagedit} +
+

+ : + {#if jobTags?.length > 0} + {#each jobTags as tag} + + {/each} + {:else} + No Tags + {/if} +

+ {:else} +

+ {#each jobTags as tag} + + {/each} +

+ {/if} +
+ + diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte new file mode 100644 index 0000000..1832a94 --- /dev/null +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -0,0 +1,207 @@ + + + + + + + + + {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} + + Not monitored or archiving failed + + {:else if $metricsQuery.fetching} + + + + {:else if $metricsQuery.error} + + + {$metricsQuery.error.message.length > 500 + ? $metricsQuery.error.message.substring(0, 499) + "..." + : $metricsQuery.error.message} + + + {:else} + {#if showFootprint} + + + + {/if} + {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} + + + {#if metric.disabled == false && metric.data} + { handleZoom(detail, metric.data.name) }} + height={plotHeight} + timestep={metric.data.metric.timestep} + scope={metric.data.scope} + series={metric.data.metric.series} + statisticsSeries={metric.data.metric.statisticsSeries} + metric={metric.data.name} + {cluster} + subCluster={job.subCluster} + isShared={job.exclusive != 1} + numhwthreads={job.numHWThreads} + numaccs={job.numAcc} + zoomState={zoomStates[metric.data.name] || null} + /> + {:else if metric.disabled == true && metric.data} + Metric disabled for subcluster {metric.data.name}:{job.subCluster} + {:else} + No dataset returned + {/if} + + {/each} + {/if} + diff --git a/web/templates/monitoring/systems.tmpl b/web/templates/monitoring/systems.tmpl index 27bbf64..635bf46 100644 --- a/web/templates/monitoring/systems.tmpl +++ b/web/templates/monitoring/systems.tmpl @@ -7,6 +7,7 @@ {{end}} {{define "javascript"}} From 2f6e5a7648c61bf24f2e3b5c92e5684fc8e24513 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 14 Oct 2024 11:55:59 +0200 Subject: [PATCH 201/443] Move common logic into systems view again - adds backend log if subcluster for node not configured --- internal/graph/schema.resolvers.go | 7 +- web/frontend/src/Systems.root.svelte | 245 +++++++++++++++- web/frontend/src/generic/PlotGrid.svelte | 4 +- .../src/generic/plots/MetricPlot.svelte | 2 +- web/frontend/src/generic/utils.js | 15 +- web/frontend/src/systems/NodeList.svelte | 271 ++++-------------- web/frontend/src/systems/NodeOverview.svelte | 243 +++------------- .../src/systems/nodelist/NodeInfo.svelte | 2 +- .../src/systems/nodelist/NodeListRow.svelte | 6 +- 9 files changed, 350 insertions(+), 445 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 58d664b..73090a8 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -438,7 +438,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ data, err := metricDataDispatcher.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { - log.Warn("Error while loading node data") + log.Warn("error while loading node data") return nil, err } @@ -448,7 +448,10 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ Host: hostname, Metrics: make([]*model.JobMetricWithName, 0, len(metrics)*len(scopes)), } - host.SubCluster, _ = archive.GetSubClusterByNode(cluster, hostname) + host.SubCluster, err = archive.GetSubClusterByNode(cluster, hostname) + if err != nil { + log.Warnf("error in nodeMetrics resolver: %s", err) + } for metric, scopedMetrics := range metrics { for _, scopedMetric := range scopedMetrics { diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index baef2d0..8ff25c8 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -9,36 +9,259 @@ --> -{#if displayType === 'OVERVIEW'} - -{:else if displayType === 'LIST'} - -{:else} - - - - Unknown displayList type! - - + + + {#if $initq.data} + + {#if !displayNodeOverview} + + + + Metrics + + + + {/if} + + + + + Find Node(s) + + + + + + + + + {#if displayNodeOverview} + + + + Metric + + {#each systemMetrics as metric} + + {/each} + + + + {/if} + + + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + {/if} + + +{#if displayType !== "OVERVIEW" && displayType !== "LIST"} + + + Unknown displayList type! + + +{:else if $nodesQuery.error} + + + {$nodesQuery.error.message} + + +{:else if $nodesQuery.fetching } + + + + + +{:else if $initialized && $nodesQuery?.data} + {#if displayNodeOverview} + + + + {:else} + + + + + + + + + {/if} {/if} diff --git a/web/frontend/src/generic/PlotGrid.svelte b/web/frontend/src/generic/PlotGrid.svelte index 3bbee55..a56ffef 100644 --- a/web/frontend/src/generic/PlotGrid.svelte +++ b/web/frontend/src/generic/PlotGrid.svelte @@ -22,10 +22,10 @@ function tile(items, itemsPerRow) { const rows = [] - for (let ri = 0; ri < items.length; ri += itemsPerRow) { + for (let ri = 0; ri < items?.length; ri += itemsPerRow) { const row = [] for (let ci = 0; ci < itemsPerRow; ci += 1) { - if (ri + ci < items.length) + if (ri + ci < items?.length) row.push(items[ri + ci]) else row.push({ _is_placeholder: true, ri, ci }) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 09f313c..bf3dd45 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -227,7 +227,7 @@ function update(u) { const { left, top } = u.cursor; - const width = u.over.querySelector(".u-legend").offsetWidth; + const width = u?.over?.querySelector(".u-legend")?.offsetWidth ? u.over.querySelector(".u-legend").offsetWidth : 0; legendEl.style.transform = "translate(" + (left - width - 15) + "px, " + (top + 15) + "px)"; } diff --git a/web/frontend/src/generic/utils.js b/web/frontend/src/generic/utils.js index 57248fc..0e06c0f 100644 --- a/web/frontend/src/generic/utils.js +++ b/web/frontend/src/generic/utils.js @@ -303,8 +303,19 @@ export function stickyHeader(datatableHeaderSelector, updatePading) { export function checkMetricDisabled(m, c, s) { // [m]etric, [c]luster, [s]ubcluster const metrics = getContext("globalMetrics"); - const result = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) - return !result + const available = metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s) + // Return inverse logic + return !available +} + +export function checkMetricsDisabled(ma, c, s) { // [m]etric[a]rray, [c]luster, [s]ubcluster + let result = {}; + const metrics = getContext("globalMetrics"); + ma.forEach((m) => { + // Return named inverse logic: !available + result[m] = !(metrics?.find((gm) => gm.name === m)?.availability?.find((av) => av.cluster === c)?.subClusters?.includes(s)) + }); + return result } export function getStatsItems() { diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index a5538e9..935a068 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -1,5 +1,10 @@ -
+
- {#if showFootprint} - - {/if} - {#each metrics as metric (metric)} + + {#each selectedMetrics as metric (metric)} {/each} - {#if $jobsStore.error} - - - + {#each nodes as node (node)} + {node} + {:else} - {#each jobs as job (job)} - - {:else} - - - - {/each} - {/if} - {#if $jobsStore.fetching || !$jobsStore.data} - + - {/if} + {/each}
- Job Info + Node Info - Job Footprint - - {metric} - {#if $initialized} - ({getUnit(metric)}) - {/if} + {metric} ({systemUnits[metric]})
-

{$jobsStore.error.message}

-
No jobs found
-
- -
-
No nodes found
-{#if usePaging} - { - if (detail.itemsPerPage != itemsPerPage) { - updateConfiguration(detail.itemsPerPage.toString(), detail.page); - } else { - jobs = [] - paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; - } - }} - /> -{/if} - diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 3feac5c..5e9a31b 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -5,203 +5,52 @@ - `job Object`: The job object (GraphQL.Job) - `metrics [String]`: Currently selected metrics - `plotWidth Number`: Width of the sub-components - - `plotHeight Number?`: Height of the sub-components [Default: 275] - - `showFootprint Bool`: Display of footprint component for job - - `triggerMetricRefresh Bool?`: If changed to true from upstream, will trigger metric query --> - + - {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} - - Not monitored or archiving failed - - {:else if $metricsQuery.fetching} - - - - {:else if $metricsQuery.error} - - - {$metricsQuery.error.message.length > 500 - ? $metricsQuery.error.message.substring(0, 499) + "..." - : $metricsQuery.error.message} - - - {:else} - {#if showFootprint} - - - - {/if} - {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} - - - {#if metric.disabled == false && metric.data} - { handleZoom(detail, metric.data.name) }} - height={plotHeight} - timestep={metric.data.metric.timestep} - scope={metric.data.scope} - series={metric.data.metric.series} - statisticsSeries={metric.data.metric.statisticsSeries} - metric={metric.data.name} - {cluster} - subCluster={job.subCluster} - isShared={job.exclusive != 1} - numhwthreads={job.numHWThreads} - numaccs={job.numAcc} - zoomState={zoomStates[metric.data.name] || null} - /> - {:else if metric.disabled == true && metric.data} - + {#if metricData} + {#if nodeData?.disabled[metricData.name]} + Metric disabled for subcluster {metric.data.name}:{job.subCluster}{metricData.name}:{nodeData.subCluster} {:else} - No dataset returned + {/if} - - {/each} - {/if} + {:else} + No dataset returned for {metricData.name} + {/if} + + {/each} From 3dfeabcec65cd1fa20a1ac5c1fe12c29e0dd8903 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Oct 2024 12:41:15 +0200 Subject: [PATCH 203/443] simplify plotGrid, add cancel to metricSelect, improve metricPlot render logic --- web/frontend/src/Analysis.root.svelte | 2 - web/frontend/src/Job.root.svelte | 1 - web/frontend/src/Node.root.svelte | 1 - web/frontend/src/Status.root.svelte | 1 - web/frontend/src/Systems.root.svelte | 9 ++- web/frontend/src/User.root.svelte | 1 - web/frontend/src/generic/PlotGrid.svelte | 45 ++--------- .../src/generic/plots/MetricPlot.svelte | 44 +++++------ .../src/generic/select/MetricSelection.svelte | 1 + web/frontend/src/systems/NodeList.svelte | 4 +- web/frontend/src/systems/NodeOverview.svelte | 75 ++++++++----------- .../src/systems/nodelist/NodeInfo.svelte | 20 ++--- .../src/systems/nodelist/NodeListRow.svelte | 40 ++++------ 13 files changed, 90 insertions(+), 154 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index d287cf3..c6dc424 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -519,7 +519,6 @@ ({ metric, ...binsFromFootprint( @@ -563,7 +562,6 @@ ({ m1, f1: $footprintsQuery.data.footprints.metrics.find( diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index bb48479..04e9cf9 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -351,7 +351,6 @@ {:else if $initq?.data && $jobMetrics?.data?.jobMetrics} ({ diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index e49d11a..65d3091 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -645,7 +645,6 @@ {#key $mainQuery.data.stats[0].histMetrics} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index f2ef4ea..e1da89f 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -45,7 +45,7 @@ if (from == null || to == null) { to = new Date(Date.now()); from = new Date(to.getTime()); - from.setHours(from.getHours() - 4); + from.setHours(from.getHours() - 2); } const initialized = getContext("initialized"); @@ -61,6 +61,7 @@ // Todo: Add Idle State Filter (== No allocated Jobs) // Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch? // Todo: Review performance // observed high client-side load frequency + // Is Svelte {#each} -> -> onMount() related : Cannot be skipped ... const client = getContextClient(); const nodeQuery = gql` @@ -245,13 +246,13 @@ -{:else if $initialized && $nodesQuery?.data} +{:else if filteredData?.length > 0} {#if displayNodeOverview} - + {:else} - + {/if} {/if} diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 57f2f28..1d911ba 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -267,7 +267,6 @@ {#key $stats.data.jobsStatistics[0].histMetrics} diff --git a/web/frontend/src/generic/PlotGrid.svelte b/web/frontend/src/generic/PlotGrid.svelte index a56ffef..5152e0d 100644 --- a/web/frontend/src/generic/PlotGrid.svelte +++ b/web/frontend/src/generic/PlotGrid.svelte @@ -4,7 +4,6 @@ Properties: - `itemsPerRow Number`: Elements to render per row - `items [Any]`: List of plot components to render - - `renderFor String`: If 'job', filter disabled metrics --> -{#each rows as row} - - {#each row as item (item)} - - {#if !isPlaceholder(item)} - - {/if} - - {/each} - -{/each} + + {#each items as item} + + + + {/each} + diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index bf3dd45..d3af6f9 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -11,7 +11,7 @@ - `series [GraphQL.Series]`: The metric data object - `useStatsSeries Bool?`: If this plot uses the statistics Min/Max/Median representation; automatically set to according bool [Default: null] - `statisticsSeries [GraphQL.StatisticsSeries]?`: Min/Max/Median representation of metric data [Default: null] - - `cluster GraphQL.Cluster`: Cluster Object of the parent job + - `cluster String`: Cluster name of the parent job / data - `subCluster String`: Name of the subCluster of the parent job - `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly [Default: false] - `forNode Bool?`: If this plot is used for node data display; will ren[data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)](https://github.com/ClusterCockpit/cc-backend/blob/9fe7cdca9215220a19930779a60c8afc910276a3/internal/graph/schema.resolvers.go#L391-L392)der x-axis as negative time with $now as maximum [Default: false] @@ -117,13 +117,13 @@ export let metric; export let scope = "node"; - export let width = null; + export let width = 0; export let height = 300; export let timestep; export let series; export let useStatsSeries = null; export let statisticsSeries = null; - export let cluster; + export let cluster = ""; export let subCluster; export let isShared = false; export let forNode = false; @@ -522,17 +522,9 @@ } onMount(() => { - // Setup Wrapper - if (series[0].data.length > 0) { - if (forNode) { - plotWrapper.style.paddingTop = "0.5rem" - plotWrapper.style.paddingBottom = "0.5rem" - } - plotWrapper.style.backgroundColor = backgroundColor(); - plotWrapper.style.borderRadius = "5px"; + if (plotWrapper) { + render(width, height); } - // Init Plot - render(width, height); }); onDestroy(() => { @@ -540,22 +532,20 @@ if (uplot) uplot.destroy(); }); - // This updates it on all size changes - // Condition for reactive triggering (eg scope change) - $: if (series[0].data.length > 0) { + // This updates plot on all size changes if wrapper (== data) exists + $: if (plotWrapper) { onSizeChange(width, height); } - -
- {#if series[0].data.length > 0} -
- {:else} - Cannot render plot: No series data returned for {metric} - {/if} -
- + +{#if series[0].data.length > 0} +
+{:else} + Cannot render plot: No series data returned for {metric} +{/if} diff --git a/web/frontend/src/generic/select/MetricSelection.svelte b/web/frontend/src/generic/select/MetricSelection.svelte index b65b407..71b42b8 100644 --- a/web/frontend/src/generic/select/MetricSelection.svelte +++ b/web/frontend/src/generic/select/MetricSelection.svelte @@ -178,6 +178,7 @@ + diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index 54f7530..b8eacdf 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -36,7 +36,7 @@ {cluster} Node Info @@ -53,7 +53,7 @@ - {#each data as nodeData (nodeData)} + {#each data as nodeData (nodeData.host)} {:else} diff --git a/web/frontend/src/systems/NodeOverview.svelte b/web/frontend/src/systems/NodeOverview.svelte index 7ad711d..9e02f10 100644 --- a/web/frontend/src/systems/NodeOverview.svelte +++ b/web/frontend/src/systems/NodeOverview.svelte @@ -9,9 +9,7 @@ --> - -

- {item.host} ({item.subCluster}) -

- {#if item?.data[0]} - {#if item?.disabled[selectedMetric]} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - c.name == cluster)} - subCluster={item.subCluster} - forNode={true} - /> - {/if} - {:else} - No dataset returned for {selectedMetric} - {/if} -
+ + + {#each data as item (item.host)} + +

+ {item.host} ({item.subCluster}) +

+ {#if item?.disabled[selectedMetric]} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + + + {/if} + + {/each} +
\ No newline at end of file diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte index 2a40c7a..d0ea2b9 100644 --- a/web/frontend/src/systems/nodelist/NodeInfo.svelte +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -56,7 +56,7 @@ - +
@@ -106,7 +106,7 @@

{#if $nodeJobsData.data.jobs.count > 0} - + @@ -116,11 +116,11 @@ - Show List + List {:else} - + @@ -132,30 +132,30 @@ {/if}

- + - Users on Node + Show Users on Node - Show List + List

- + - Projects on Node + Show Projects on Node - Show List + List

diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 5e9a31b..a510b0e 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -24,32 +24,24 @@ - {#each sortOrder(nodeData?.data) as metricData} + {#each sortOrder(nodeData?.data) as metricData (metricData.name)} - {#if metricData} - {#if nodeData?.disabled[metricData.name]} - Metric disabled for subcluster {metricData.name}:{nodeData.subCluster} - {:else} - - {/if} - {:else} - No dataset returned for {metricData.name}Metric disabled for subcluster {metricData.name}:{nodeData.subCluster} + {:else} + + {/if} {/each} From 85a77e05afc2b8b9de6449e4259c2ea8dce189e0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Oct 2024 12:51:10 +0200 Subject: [PATCH 204/443] edit nodeInfo string --- web/frontend/src/systems/nodelist/NodeInfo.svelte | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte index d0ea2b9..15effe2 100644 --- a/web/frontend/src/systems/nodelist/NodeInfo.svelte +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -137,9 +137,9 @@ - Show Users on Node + Show Users - + List @@ -151,7 +151,7 @@ - Show Projects on Node + Show Projects From 33d219d2acbcf0a8ea513e694781479c5a8d04f7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Oct 2024 13:05:03 +0200 Subject: [PATCH 205/443] Add subCluster to node view info field --- web/frontend/src/Node.root.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index bbbcb0f..60ab404 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -141,7 +141,7 @@ Selected Node - + From 60d7984d6639a5cff1b1a0b1707cdc7ca11001de Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Oct 2024 14:16:31 +0200 Subject: [PATCH 206/443] add notes --- web/frontend/src/Systems.root.svelte | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index e1da89f..39d17aa 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -58,8 +58,9 @@ let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric]; let isMetricsSelectionOpen = false; - // Todo: Add Idle State Filter (== No allocated Jobs) + // Todo: Add Idle State Filter (== No allocated Jobs) [Frontend?] // Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch? + // Todo: Rework GQL Query: Add Paging (Scrollable), Add Nodes Filter (see jobs-onthefly-userfilter), add scopes // Todo: Review performance // observed high client-side load frequency // Is Svelte {#each} -> -> onMount() related : Cannot be skipped ... From 39b22267d6dcdf30d5b3641ecc582ce94d67e8d5 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Oct 2024 16:03:31 +0200 Subject: [PATCH 207/443] Update component descriptions --- web/frontend/src/systems/NodeList.svelte | 7 ++++--- web/frontend/src/systems/nodelist/NodeInfo.svelte | 7 ++++--- web/frontend/src/systems/nodelist/NodeListRow.svelte | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index b8eacdf..5d106b6 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -2,9 +2,10 @@ @component Cluster Per Node List component; renders current state of SELECTABLE metrics for ALL nodes Properties: - - `cluster String`: The cluster to show status information for - - `from Date?`: Custom Time Range selection 'from' [Default: null] - - `to Date?`: Custom Time Range selection 'to' [Default: null] + - `cluster String`: The nodes' cluster + - `data [Object]`: The node data array for all nodes + - `selectedMetrics [String]`: The array of selected metrics + - `selectedMetrics Object`: The object of metric units -->

- {job.jobId} - ({job.cluster}) + + + {job.jobId} + ({job.cluster}) + + + {#if job.metaData?.jobName} -
{#if job.metaData?.jobName.length <= 25}

{job.metaData.jobName}
{:else} From 63b9e619a4d80e1921aa9cfa8410cb2326bb8990 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 22 Oct 2024 14:37:22 +0200 Subject: [PATCH 210/443] fix: fixed and changed to footprint update by transactions --- internal/repository/job.go | 10 ++-- internal/repository/transaction.go | 10 ++-- .../taskManager/updateFootprintService.go | 47 ++++++++++++------- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index d408341..05d0acc 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -621,13 +621,12 @@ func (r *JobRepository) UpdateEnergy( } var rawFootprint []byte - if rawFootprint, err = json.Marshal(energyFootprint); err != nil { - log.Warnf("Error while marshaling energy footprint for job, DB ID '%v'", jobMeta.ID) + log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return stmt, err } - return stmt.Set("energy_footprint", rawFootprint).Set("energy", (math.Round(totalEnergy*100) / 100)), nil + return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100) / 100)), nil } func (r *JobRepository) UpdateFootprint( @@ -654,11 +653,10 @@ func (r *JobRepository) UpdateFootprint( } var rawFootprint []byte - if rawFootprint, err = json.Marshal(footprint); err != nil { - log.Warnf("Error while marshaling footprint for job, DB ID '%v'", jobMeta.ID) + log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return stmt, err } - return stmt.Set("footprint", rawFootprint), nil + return stmt.Set("footprint", string(rawFootprint)), nil } diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go index 8c5d357..603d505 100644 --- a/internal/repository/transaction.go +++ b/internal/repository/transaction.go @@ -49,7 +49,6 @@ func (r *JobRepository) TransactionEnd(t *Transaction) error { log.Warn("Error while committing SQL transactions") return err } - return nil } @@ -74,11 +73,16 @@ func (r *JobRepository) TransactionAddNamed( } func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) { - res := t.tx.MustExec(query, args) + + res, err := t.tx.Exec(query, args...) + if err != nil { + log.Errorf("TransactionAdd(), Exec() Error: %v", err) + return 0, err + } id, err := res.LastInsertId() if err != nil { - log.Errorf("repository initDB(): %v", err) + log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err) return 0, err } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index b21502a..caa4f63 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -24,12 +24,15 @@ func RegisterFootprintWorker() { gocron.NewTask( func() { s := time.Now() - log.Printf("Update Footprints started at %s using direct query execution", s.Format(time.RFC3339)) + c := 0 + ce := 0 + cl := 0 + log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) - // t, err := jobRepo.TransactionInit() - // if err != nil { - // log.Errorf("Failed TransactionInit %v", err) - // } + t, err := jobRepo.TransactionInit() + if err != nil { + log.Errorf("Failed TransactionInit %v", err) + } for _, cluster := range archive.Clusters { jobs, err := jobRepo.FindRunningJobs(cluster.Name) @@ -47,10 +50,12 @@ func RegisterFootprintWorker() { scopes = append(scopes, schema.MetricScopeAccelerator) for _, job := range jobs { - // log.Debugf("Try job %d", job.JobID) + log.Debugf("Try job %d", job.JobID) + cl++ jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background(), 0) // 0 Resolution-Value retrieves highest res if err != nil { log.Errorf("Error wile loading job data for footprint update: %v", err) + ce++ continue } @@ -65,6 +70,7 @@ func RegisterFootprintWorker() { nodeData, ok := data["node"] if !ok { // This should never happen ? + ce++ continue } @@ -92,33 +98,38 @@ func RegisterFootprintWorker() { stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta) if err != nil { log.Errorf("Update job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) + ce++ continue } stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta) if err != nil { log.Errorf("Update job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) + ce++ continue } // Add WHERE Filter stmt = stmt.Where("job.id = ?", job.ID) - // query, args, err := stmt.ToSql() - // if err != nil { - // log.Errorf("Failed in ToSQL conversion: %v", err) - // continue - // } - - // jobRepo.TransactionAdd(t, query, args) - if err := jobRepo.Execute(stmt); err != nil { - log.Errorf("Update job footprint (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + query, args, err := stmt.ToSql() + if err != nil { + log.Errorf("Failed in ToSQL conversion: %v", err) + ce++ continue } + + // Args: JSON, JSON, ENERGY, JOBID + jobRepo.TransactionAdd(t, query, args...) + // if err := jobRepo.Execute(stmt); err != nil { + // log.Errorf("Update job footprint (dbid: %d) failed at db execute: %s", job.ID, err.Error()) + // continue + // } + c++ log.Debugf("Finish Job %d", job.JobID) } + jobRepo.TransactionCommit(t) log.Debugf("Finish Cluster %s", cluster.Name) - // jobRepo.TransactionCommit(t) } - // jobRepo.TransactionEnd(t) - log.Printf("Update Footprints is done and took %s", time.Since(s)) + jobRepo.TransactionEnd(t) + log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) })) } From 6f74c8cb7705118510d0fb4bd523ea6a63282ab3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Oct 2024 16:15:44 +0200 Subject: [PATCH 211/443] feat: make cron worker frequency configurable --- internal/taskManager/updateDurationService.go | 11 +++++++++-- internal/taskManager/updateFootprintService.go | 12 ++++++++++-- pkg/schema/config.go | 10 ++++++++++ 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go index 6023547..fc7f446 100644 --- a/internal/taskManager/updateDurationService.go +++ b/internal/taskManager/updateDurationService.go @@ -7,14 +7,21 @@ package taskManager import ( "time" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/go-co-op/gocron/v2" ) func RegisterUpdateDurationWorker() { - log.Info("Register duration update service") + var frequency string + if config.Keys.CronFrequency.DurationWorker != "" { + frequency = config.Keys.CronFrequency.DurationWorker + } else { + frequency = "5m" + } + d, _ := time.ParseDuration(frequency) + log.Infof("Register Duration Update service with %s interval", frequency) - d, _ := time.ParseDuration("5m") s.NewJob(gocron.DurationJob(d), gocron.NewTask( func() { diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index caa4f63..a0eccc8 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -9,6 +9,7 @@ import ( "math" "time" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" @@ -18,8 +19,15 @@ import ( ) func RegisterFootprintWorker() { - log.Info("Register Footprint Update service") - d, _ := time.ParseDuration("10m") + var frequency string + if config.Keys.CronFrequency.FootprintWorker != "" { + frequency = config.Keys.CronFrequency.FootprintWorker + } else { + frequency = "10m" + } + d, _ := time.ParseDuration(frequency) + log.Infof("Register Footprint Update service with %s interval", frequency) + s.NewJob(gocron.DurationJob(d), gocron.NewTask( func() { diff --git a/pkg/schema/config.go b/pkg/schema/config.go index ccd848b..04e3f10 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -84,6 +84,13 @@ type ResampleConfig struct { Resolutions []int `json:"resolutions"` } +type CronFrequency struct { + // Duration Update Worker [Defaults to '5m'] + DurationWorker string `json:"duration-worker"` + // Metric- and Energy Footprint Update Worker [Defaults to '10m'] + FootprintWorker string `json:"footprint-worker"` +} + // Format of the configuration (file). See below for the defaults. type ProgramConfig struct { // Address where the http (or https) server will listen on (for example: 'localhost:80'). @@ -159,4 +166,7 @@ type ProgramConfig struct { // Energy Mix CO2 Emission Constant [g/kWh] // If entered, displays estimated CO2 emission for job based on jobs totalEnergy EmissionConstant int `json:"emission-constant"` + + // Frequency of cron job workers + CronFrequency *CronFrequency `json:"cron-frequency"` } From 934d1a6114c72e53f37b9c994878dd22f683452d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Oct 2024 16:16:28 +0200 Subject: [PATCH 212/443] fix: use configured footprint statType for update --- internal/repository/job.go | 12 +++++++++++- web/frontend/src/generic/joblist/JobInfo.svelte | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index 05d0acc..d1c54a1 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -642,7 +642,17 @@ func (r *JobRepository) UpdateFootprint( footprint := make(map[string]float64) for _, fp := range sc.Footprint { - statType := "avg" + var statType string + for _, gm := range archive.GlobalMetricList { + if gm.Name == fp { + statType = gm.Footprint + } + } + + if statType != "avg" && statType != "min" && statType != "max" { + log.Warnf("unknown statType for footprint update: %s", statType) + return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType) + } if i, err := archive.MetricIndex(sc.MetricConfig, fp); err != nil { statType = sc.MetricConfig[i].Footprint diff --git a/web/frontend/src/generic/joblist/JobInfo.svelte b/web/frontend/src/generic/joblist/JobInfo.svelte index 2e71462..adacd4f 100644 --- a/web/frontend/src/generic/joblist/JobInfo.svelte +++ b/web/frontend/src/generic/joblist/JobInfo.svelte @@ -72,7 +72,7 @@ {:else}
{job.metaData.jobName} From 01102cb9b0aa79a571f3edb0234a52f4f51c5f11 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Oct 2024 16:17:47 +0200 Subject: [PATCH 213/443] feat: add updateUserOnLogin config option for oidc, jwt --- internal/auth/auth.go | 35 +++++++++++++++++++++++-------- internal/auth/jwtCookieSession.go | 4 ++-- internal/auth/jwtSession.go | 4 ++-- internal/auth/oidc.go | 4 ++-- pkg/schema/config.go | 8 +++++-- 5 files changed, 38 insertions(+), 17 deletions(-) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index b6e4cbe..270989f 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -143,19 +143,36 @@ func GetAuthInstance() *Authentication { return authInstance } -func persistUser(user *schema.User) { +func handleTokenUser(tokenUser *schema.User) { r := repository.GetUserRepository() - dbUser, err := r.GetUser(user.Username) + dbUser, err := r.GetUser(tokenUser.Username) if err != nil && err != sql.ErrNoRows { - log.Errorf("Error while loading user '%s': %v", user.Username, err) - } else if err == sql.ErrNoRows { // Adds New User - if err := r.AddUser(user); err != nil { - log.Errorf("Error while adding user '%s' to DB: %v", user.Username, err) + log.Errorf("Error while loading user '%s': %v", tokenUser.Username, err) + } else if err == sql.ErrNoRows && config.Keys.JwtConfig.SyncUserOnLogin { // Adds New User + if err := r.AddUser(tokenUser); err != nil { + log.Errorf("Error while adding user '%s' to DB: %v", tokenUser.Username, err) } - } else { // Update Existing - if err := r.UpdateUser(dbUser, user); err != nil { - log.Errorf("Error while updating user '%s' to DB: %v", user.Username, err) + } else if err == nil && config.Keys.JwtConfig.UpdateUserOnLogin { // Update Existing User + if err := r.UpdateUser(dbUser, tokenUser); err != nil { + log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) + } + } +} + +func handleOIDCUser(OIDCUser *schema.User) { + r := repository.GetUserRepository() + dbUser, err := r.GetUser(OIDCUser.Username) + + if err != nil && err != sql.ErrNoRows { + log.Errorf("Error while loading user '%s': %v", OIDCUser.Username, err) + } else if err == sql.ErrNoRows && config.Keys.OpenIDConfig.SyncUserOnLogin { // Adds New User + if err := r.AddUser(OIDCUser); err != nil { + log.Errorf("Error while adding user '%s' to DB: %v", OIDCUser.Username, err) + } + } else if err == nil && config.Keys.OpenIDConfig.UpdateUserOnLogin { // Update Existing User + if err := r.UpdateUser(dbUser, OIDCUser); err != nil { + log.Errorf("Error while updating user '%s' to DB: %v", dbUser.Username, err) } } } diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go index 926f7ba..7e0e045 100644 --- a/internal/auth/jwtCookieSession.go +++ b/internal/auth/jwtCookieSession.go @@ -198,8 +198,8 @@ func (ja *JWTCookieSessionAuthenticator) Login( AuthSource: schema.AuthViaToken, } - if jc.SyncUserOnLogin { - persistUser(user) + if jc.SyncUserOnLogin || jc.UpdateUserOnLogin { + handleTokenUser(user) } } diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go index 765a9fd..67457ee 100644 --- a/internal/auth/jwtSession.go +++ b/internal/auth/jwtSession.go @@ -138,8 +138,8 @@ func (ja *JWTSessionAuthenticator) Login( AuthSource: schema.AuthViaToken, } - if config.Keys.JwtConfig.SyncUserOnLogin { - persistUser(user) + if config.Keys.JwtConfig.SyncUserOnLogin || config.Keys.JwtConfig.UpdateUserOnLogin { + handleTokenUser(user) } } diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go index 5cfb563..ba1c9da 100644 --- a/internal/auth/oidc.go +++ b/internal/auth/oidc.go @@ -168,8 +168,8 @@ func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { AuthSource: schema.AuthViaOIDC, } - if config.Keys.OpenIDConfig.SyncUserOnLogin { - persistUser(user) + if config.Keys.OpenIDConfig.SyncUserOnLogin || config.Keys.OpenIDConfig.UpdateUserOnLogin { + handleOIDCUser(user) } oa.authentication.SaveSession(rw, r, user) diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 04e3f10..b87841c 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -24,8 +24,9 @@ type LdapConfig struct { } type OpenIDConfig struct { - Provider string `json:"provider"` - SyncUserOnLogin bool `json:"syncUserOnLogin"` + Provider string `json:"provider"` + SyncUserOnLogin bool `json:"syncUserOnLogin"` + UpdateUserOnLogin bool `json:"updateUserOnLogin"` } type JWTAuthConfig struct { @@ -45,6 +46,9 @@ type JWTAuthConfig struct { // Should an non-existent user be added to the DB based on the information in the token SyncUserOnLogin bool `json:"syncUserOnLogin"` + + // Should an existent user be updated in the DB based on the information in the token + UpdateUserOnLogin bool `json:"updateUserOnLogin"` } type IntRange struct { From 9688bad622936ef9a4b1418df4ed285e479c848d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 24 Oct 2024 18:14:31 +0200 Subject: [PATCH 214/443] note decision of implementaion Q4 --- web/frontend/src/Systems.root.svelte | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 39d17aa..0be0bfe 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -60,9 +60,10 @@ // Todo: Add Idle State Filter (== No allocated Jobs) [Frontend?] // Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch? - // Todo: Rework GQL Query: Add Paging (Scrollable), Add Nodes Filter (see jobs-onthefly-userfilter), add scopes + // Todo: Rework GQL Query: Add Paging (Scrollable / Paging Configbar), Add Nodes Filter (see jobs-onthefly-userfilter: ccms inkompatibel!), add scopes // Todo: Review performance // observed high client-side load frequency // Is Svelte {#each} -> -> onMount() related : Cannot be skipped ... + // ==> Skip for Q4/24 Release, build from ccms upgrade (paging/filter) up const client = getContextClient(); const nodeQuery = gql` From 35012b18c5e13b73977229dc8709e890cdefea78 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 25 Oct 2024 14:47:09 +0200 Subject: [PATCH 215/443] one more note --- web/frontend/src/Systems.root.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 0be0bfe..0ffff3f 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -61,6 +61,7 @@ // Todo: Add Idle State Filter (== No allocated Jobs) [Frontend?] // Todo: NodeList: Mindestens Accelerator Scope ... "Show Detail" Switch? // Todo: Rework GQL Query: Add Paging (Scrollable / Paging Configbar), Add Nodes Filter (see jobs-onthefly-userfilter: ccms inkompatibel!), add scopes + // All three issues need either new features in ccms (paging, filter) or new implementation of ccms node queries with scopes (currently very job-specific) // Todo: Review performance // observed high client-side load frequency // Is Svelte {#each} -> -> onMount() related : Cannot be skipped ... // ==> Skip for Q4/24 Release, build from ccms upgrade (paging/filter) up From ae327f545e75e422685e2eba1ef2c8573d1492e8 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 25 Oct 2024 15:23:49 +0200 Subject: [PATCH 216/443] add logging to rest stopJobHandler --- internal/api/rest.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index e43cf51..f143a7f 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -473,11 +473,11 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) job, err = api.JobRepository.FindById(id) } else { - handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) + handleError(fmt.Errorf("the parameter 'id' is required"), http.StatusBadRequest, rw) return } if err != nil { - handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw) return } @@ -506,7 +506,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) if r.URL.Query().Get("all-metrics") == "true" { data, err = metricdata.LoadData(job, nil, scopes, r.Context()) if err != nil { - log.Warn("Error while loading job data") + log.Warnf("REST: error while loading all-metrics job data for JobID %d on %s", job.JobID, job.Cluster) return } } @@ -571,7 +571,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { return } if err != nil { - handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("finding job with db id %s failed: %w", id, err), http.StatusUnprocessableEntity, rw) return } @@ -603,7 +603,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { data, err := metricdata.LoadData(job, metrics, scopes, r.Context()) if err != nil { - log.Warn("Error while loading job data") + log.Warnf("REST: error while loading job data for JobID %d on %s", job.JobID, job.Cluster) return } @@ -1096,12 +1096,12 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) { // Sanity checks if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { - handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) + handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger than startTime %d and only running jobs can be stopped (state is: %s)", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix(), job.State), http.StatusBadRequest, rw) return } if req.State != "" && !req.State.Valid() { - handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw) + handleError(fmt.Errorf("jobId %d (id %d) on %s : invalid requested job state: %#v", job.JobID, job.ID, job.Cluster, req.State), http.StatusBadRequest, rw) return } else if req.State == "" { req.State = schema.JobStateCompleted @@ -1111,11 +1111,11 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo job.Duration = int32(req.StopTime - job.StartTime.Unix()) job.State = req.State if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil { - handleError(fmt.Errorf("marking job as stopped failed: %w", err), http.StatusInternalServerError, rw) + handleError(fmt.Errorf("jobId %d (id %d) on %s : marking job as '%s' (duration: %d) in DB failed: %w", job.JobID, job.ID, job.Cluster, job.State, job.Duration, err), http.StatusInternalServerError, rw) return } - log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) + log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s, duration=%d, state=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime, job.Duration, job.State) // Send a response (with status OK). This means that erros that happen from here on forward // can *NOT* be communicated to the client. If reading from a MetricDataRepository or From 06f24e988fa654218fb5cb9b38f598eb4011271a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 28 Oct 2024 11:56:34 +0100 Subject: [PATCH 217/443] fix incorrect config conditions --- internal/taskManager/updateDurationService.go | 2 +- internal/taskManager/updateFootprintService.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/taskManager/updateDurationService.go b/internal/taskManager/updateDurationService.go index fc7f446..81d799e 100644 --- a/internal/taskManager/updateDurationService.go +++ b/internal/taskManager/updateDurationService.go @@ -14,7 +14,7 @@ import ( func RegisterUpdateDurationWorker() { var frequency string - if config.Keys.CronFrequency.DurationWorker != "" { + if config.Keys.CronFrequency != nil && config.Keys.CronFrequency.DurationWorker != "" { frequency = config.Keys.CronFrequency.DurationWorker } else { frequency = "5m" diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index a0eccc8..efca6d1 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -20,7 +20,7 @@ import ( func RegisterFootprintWorker() { var frequency string - if config.Keys.CronFrequency.FootprintWorker != "" { + if config.Keys.CronFrequency != nil && config.Keys.CronFrequency.FootprintWorker != "" { frequency = config.Keys.CronFrequency.FootprintWorker } else { frequency = "10m" From bf1bff9ace25614bcfcfd753521d1336b25ed0f4 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 28 Oct 2024 16:42:19 +0100 Subject: [PATCH 218/443] fix tagManagement condition --- web/frontend/src/generic/helper/TagManagement.svelte | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/generic/helper/TagManagement.svelte b/web/frontend/src/generic/helper/TagManagement.svelte index 6fc4f48..eb2a8ee 100644 --- a/web/frontend/src/generic/helper/TagManagement.svelte +++ b/web/frontend/src/generic/helper/TagManagement.svelte @@ -120,10 +120,13 @@ function matchJobTags(tags, availableTags, type, isAdmin, isSupport) { const jobTagIds = tags.map((t) => t.id) - if (isAdmin || type == 'used') { // Always show used tags, admin also show all unused + + if (type == 'used') { // Always show used tags return availableTags.filter((at) => jobTagIds.includes(at.id)) } else { // ... for unused - if (isSupport) { // ... show global tags for support + if (isAdmin) { // ... show all tags for admin + return availableTags.filter((at) => !jobTagIds.includes(at.id)) + } else if (isSupport) { // ... show global tags for support return availableTags.filter((at) => !jobTagIds.includes(at.id) && at.scope !== "admin") } else { // ... show only private tags for user, manager return availableTags.filter((at) => !jobTagIds.includes(at.id) && at.scope !== "admin" && at.scope !== "global") From f5cc5d07fd1e60b915eddaa898e639634a6f1086 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 29 Oct 2024 17:01:05 +0100 Subject: [PATCH 219/443] add more logging to rest api stopJobByRequest --- internal/api/rest.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/api/rest.go b/internal/api/rest.go index f143a7f..d38a94d 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -929,6 +929,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } + log.Printf("loading db job from request for stopJobByRequest... : jobId=%d, cluster=%s, startTime=%d", *req.JobId, *req.Cluster, *req.StartTime) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { From c4a901504d59d3be147ea06b9173f67e2143e8e0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 29 Oct 2024 18:25:41 +0100 Subject: [PATCH 220/443] change debug format key --- internal/api/rest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index d38a94d..25f78b5 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -929,7 +929,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("loading db job from request for stopJobByRequest... : jobId=%d, cluster=%s, startTime=%d", *req.JobId, *req.Cluster, *req.StartTime) + log.Printf("loading db job from request for stopJobByRequest... : jobId=%d, cluster=%v, startTime=%d", req.JobId, req.Cluster, req.StartTime) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { From 597ee1dad761155666b35cb29660a5d3b692d392 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 29 Oct 2024 18:39:23 +0100 Subject: [PATCH 221/443] change log to request and sql prints --- internal/api/rest.go | 2 +- internal/repository/job.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 25f78b5..eed30b9 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -929,7 +929,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("loading db job from request for stopJobByRequest... : jobId=%d, cluster=%v, startTime=%d", req.JobId, req.Cluster, req.StartTime) + log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%#v", req) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { diff --git a/internal/repository/job.go b/internal/repository/job.go index b42598d..54ec6d0 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -235,6 +235,9 @@ func (r *JobRepository) Find( q = q.Where("job.start_time = ?", *startTime) } + s, _, _ := q.ToSql() + log.Printf("trying to find db job with query: %s", s) + log.Debugf("Timer Find %s", time.Since(start)) return scanJob(q.RunWith(r.stmtCache).QueryRow()) } From c120d6517fb32493bb14455917873167757fdd01 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 30 Oct 2024 16:24:58 +0100 Subject: [PATCH 222/443] change logging key, add args, add orderby id job.Find() --- internal/api/rest.go | 2 +- internal/repository/job.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index eed30b9..78b28af 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -929,7 +929,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%#v", req) + log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { diff --git a/internal/repository/job.go b/internal/repository/job.go index 54ec6d0..644d427 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -235,8 +235,10 @@ func (r *JobRepository) Find( q = q.Where("job.start_time = ?", *startTime) } - s, _, _ := q.ToSql() - log.Printf("trying to find db job with query: %s", s) + q = q.OrderBy("job.id DESC") // always use newest matching job by db id + + s, args, _ := q.ToSql() + log.Printf("trying to find db job with query: %s | %v", s, args) log.Debugf("Timer Find %s", time.Since(start)) return scanJob(q.RunWith(r.stmtCache).QueryRow()) From eabc6212eaea3e229882947da3bb1273f58e8647 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 31 Oct 2024 13:36:27 +0100 Subject: [PATCH 223/443] add debug logging for user context and web render --- internal/repository/user.go | 3 ++- internal/routerConfig/routes.go | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/internal/repository/user.go b/internal/repository/user.go index 3b7d945..cd7efe9 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -321,9 +321,10 @@ const ContextUserKey ContextKey = "user" func GetUserFromContext(ctx context.Context) *schema.User { x := ctx.Value(ContextUserKey) if x == nil { + log.Warnf("no user retrieved from context") return nil } - + log.Infof("user retrieved from context: %v", x.(*schema.User)) return x.(*schema.User) } diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 1dd6dee..9ed5638 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -254,6 +254,9 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { for _, route := range routes { route := route router.HandleFunc(route.Route, func(rw http.ResponseWriter, r *http.Request) { + + log.Info(">>> HELLO ROUTE HANDLER ...") + conf, err := userCfgRepo.GetUIConfig(repository.GetUserFromContext(r.Context())) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) @@ -261,15 +264,21 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { } title := route.Title + log.Infof(">>> >>> ROUTE TITLE : %s ", title) + infos := route.Setup(map[string]interface{}{}, r) if id, ok := infos["id"]; ok { title = strings.Replace(route.Title, "", id.(string), 1) } + log.Infof(">>> >>> ROUTE INFOS : %v ", infos) // Get User -> What if NIL? user := repository.GetUserFromContext(r.Context()) + log.Infof(">>> >>> ROUTE USER : %v ", *user) + // Get Roles availableRoles, _ := schema.GetValidRolesMap(user) + log.Infof(">>> >>> ROUTE AVAILABLE ROLES : %v ", availableRoles) page := web.Page{ Title: title, @@ -279,10 +288,12 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { Config: conf, Infos: infos, } + log.Infof(">>> >>> ROUTE PAGE : %v ", page) if route.Filter { page.FilterPresets = buildFilterPresets(r.URL.Query()) } + log.Infof(">>> >>> ROUTE FILTER : %v ", page.FilterPresets) web.RenderTemplate(rw, route.Template, &page) }) From 2c8b73e2e2ed0c4b62d07faa9a2e9154c8e6df64 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 31 Oct 2024 14:34:32 +0100 Subject: [PATCH 224/443] add logged timing to homeroute calls --- internal/repository/user.go | 2 +- internal/routerConfig/routes.go | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/internal/repository/user.go b/internal/repository/user.go index cd7efe9..19364dc 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -324,7 +324,7 @@ func GetUserFromContext(ctx context.Context) *schema.User { log.Warnf("no user retrieved from context") return nil } - log.Infof("user retrieved from context: %v", x.(*schema.User)) + // log.Infof("user retrieved from context: %v", x.(*schema.User)) return x.(*schema.User) } diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 9ed5638..9540cd3 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -51,15 +51,21 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { jobRepo := repository.GetJobRepository() groupBy := model.AggregateCluster + log.Infof(">>> HELLO HOME ROUTE") + + startJobCount := time.Now() stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy) if err != nil { log.Warnf("failed to count jobs: %s", err.Error()) } + log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount)) + startRunningJobCount := time.Now() stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running") if err != nil { log.Warnf("failed to count running jobs: %s", err.Error()) } + log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount)) i["clusters"] = stats @@ -72,6 +78,8 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { } } + log.Infof("... BYE HOME ROUTE") + return i } @@ -254,7 +262,6 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { for _, route := range routes { route := route router.HandleFunc(route.Route, func(rw http.ResponseWriter, r *http.Request) { - log.Info(">>> HELLO ROUTE HANDLER ...") conf, err := userCfgRepo.GetUIConfig(repository.GetUserFromContext(r.Context())) @@ -264,21 +271,16 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { } title := route.Title - log.Infof(">>> >>> ROUTE TITLE : %s ", title) - infos := route.Setup(map[string]interface{}{}, r) if id, ok := infos["id"]; ok { title = strings.Replace(route.Title, "", id.(string), 1) } - log.Infof(">>> >>> ROUTE INFOS : %v ", infos) // Get User -> What if NIL? user := repository.GetUserFromContext(r.Context()) - log.Infof(">>> >>> ROUTE USER : %v ", *user) // Get Roles availableRoles, _ := schema.GetValidRolesMap(user) - log.Infof(">>> >>> ROUTE AVAILABLE ROLES : %v ", availableRoles) page := web.Page{ Title: title, @@ -288,12 +290,11 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { Config: conf, Infos: infos, } - log.Infof(">>> >>> ROUTE PAGE : %v ", page) if route.Filter { page.FilterPresets = buildFilterPresets(r.URL.Query()) } - log.Infof(">>> >>> ROUTE FILTER : %v ", page.FilterPresets) + log.Infof("... ROUTE HANDLED: %s for %v", page.Title, page.User) web.RenderTemplate(rw, route.Template, &page) }) From 939dd2320a9c4b5c48e0afdf22546c9c76b14f5d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 31 Oct 2024 15:47:45 +0100 Subject: [PATCH 225/443] Cleanup debug logging, keep orderBy param for repo.Find --- internal/api/rest.go | 2 +- internal/repository/job.go | 6 +++--- internal/routerConfig/routes.go | 15 ++++----------- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 78b28af..5f8e15c 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -929,7 +929,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req) + // log.Printf("loading db job for stopJobByRequest... : stopJobApiRequest=%v", req) job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { diff --git a/internal/repository/job.go b/internal/repository/job.go index 644d427..84b13a0 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -235,10 +235,10 @@ func (r *JobRepository) Find( q = q.Where("job.start_time = ?", *startTime) } - q = q.OrderBy("job.id DESC") // always use newest matching job by db id + q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match - s, args, _ := q.ToSql() - log.Printf("trying to find db job with query: %s | %v", s, args) + // s, args, _ := q.ToSql() + // log.Printf("trying to find db job with query: %s | %v", s, args) log.Debugf("Timer Find %s", time.Since(start)) return scanJob(q.RunWith(r.stmtCache).QueryRow()) diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 9540cd3..e7dc924 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -51,21 +51,19 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { jobRepo := repository.GetJobRepository() groupBy := model.AggregateCluster - log.Infof(">>> HELLO HOME ROUTE") - - startJobCount := time.Now() + // startJobCount := time.Now() stats, err := jobRepo.JobCountGrouped(r.Context(), nil, &groupBy) if err != nil { log.Warnf("failed to count jobs: %s", err.Error()) } - log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount)) + // log.Infof("Timer HOME ROUTE startJobCount: %s", time.Since(startJobCount)) - startRunningJobCount := time.Now() + // startRunningJobCount := time.Now() stats, err = jobRepo.AddJobCountGrouped(r.Context(), nil, &groupBy, stats, "running") if err != nil { log.Warnf("failed to count running jobs: %s", err.Error()) } - log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount)) + // log.Infof("Timer HOME ROUTE startRunningJobCount: %s", time.Since(startRunningJobCount)) i["clusters"] = stats @@ -78,8 +76,6 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { } } - log.Infof("... BYE HOME ROUTE") - return i } @@ -262,8 +258,6 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { for _, route := range routes { route := route router.HandleFunc(route.Route, func(rw http.ResponseWriter, r *http.Request) { - log.Info(">>> HELLO ROUTE HANDLER ...") - conf, err := userCfgRepo.GetUIConfig(repository.GetUserFromContext(r.Context())) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) @@ -294,7 +288,6 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { if route.Filter { page.FilterPresets = buildFilterPresets(r.URL.Query()) } - log.Infof("... ROUTE HANDLED: %s for %v", page.Title, page.User) web.RenderTemplate(rw, route.Template, &page) }) From ec1ead89ab257a74182e80706996f747caf3569d Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 8 Nov 2024 06:27:27 +0100 Subject: [PATCH 226/443] Switch back to previous meaning of energy metric attribute --- internal/repository/job.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index d1c54a1..ab65426 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -605,12 +605,10 @@ func (r *JobRepository) UpdateEnergy( if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh // Energy: Power (in Watts) * Time (in Seconds) - if sc.MetricConfig[i].Energy == "energy" { + if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits energy = math.Round(((LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 - // Power: Use directly as sum (Or as: [Energy (in Ws) / Time (in s)] - } else if sc.MetricConfig[i].Energy == "power" { - // This assumes the metric is of aggregation type sum } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) From f0257a278444c98d34669c231e41adede81c3bd0 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 8 Nov 2024 19:16:56 +0100 Subject: [PATCH 227/443] Drop privileges after server start --- cmd/cc-backend/main.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 9f7e673..bb72f69 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -204,13 +204,6 @@ func main() { taskManager.Start() serverInit() - // Because this program will want to bind to a privileged port (like 80), the listener must - // be established first, then the user can be changed, and after that, - // the actual http server can be started. - if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { - log.Fatalf("error while preparing server start: %s", err.Error()) - } - var wg sync.WaitGroup wg.Add(1) @@ -219,6 +212,13 @@ func main() { serverStart() }() + // Because this program will want to bind to a privileged port (like 80), the listener must + // be established first, then the user can be changed, and after that, + // the actual http server can be started. + if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { + log.Fatalf("error while preparing server start: %s", err.Error()) + } + wg.Add(1) sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) From 492e56a0984cd75150719353567233afb3c1a409 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 8 Nov 2024 19:23:54 +0100 Subject: [PATCH 228/443] Put privilege drop to previous location --- cmd/cc-backend/main.go | 7 ------- cmd/cc-backend/server.go | 8 ++++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index bb72f69..1fc6ae4 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -212,13 +212,6 @@ func main() { serverStart() }() - // Because this program will want to bind to a privileged port (like 80), the listener must - // be established first, then the user can be changed, and after that, - // the actual http server can be started. - if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { - log.Fatalf("error while preparing server start: %s", err.Error()) - } - wg.Add(1) sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index bc20fcf..3c6fa55 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -27,6 +27,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" "github.com/ClusterCockpit/cc-backend/web" "github.com/gorilla/handlers" "github.com/gorilla/mux" @@ -298,6 +299,13 @@ func serverStart() { } else { fmt.Printf("HTTP server listening at %s...", config.Keys.Addr) } + // + // Because this program will want to bind to a privileged port (like 80), the listener must + // be established first, then the user can be changed, and after that, + // the actual http server can be started. + if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { + log.Fatalf("error while preparing server start: %s", err.Error()) + } if err = server.Serve(listener); err != nil && err != http.ErrServerClosed { log.Fatalf("starting server failed: %v", err) From 4327c4b1f7fe256293c805816bc990b05c19d547 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 8 Nov 2024 19:44:11 +0100 Subject: [PATCH 229/443] Start archive worker --- cmd/cc-backend/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 1fc6ae4..436379d 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -13,6 +13,7 @@ import ( "sync" "syscall" + "github.com/ClusterCockpit/cc-backend/internal/archiver" "github.com/ClusterCockpit/cc-backend/internal/auth" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/importer" @@ -201,6 +202,7 @@ func main() { return } + archiver.Start(repository.GetJobRepository()) taskManager.Start() serverInit() From 1a87ed8210afc6e2bc7a58767424519b6de07df8 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 9 Nov 2024 09:24:51 +0100 Subject: [PATCH 230/443] Disable UpdateFootprint service for debugging --- internal/taskManager/taskManager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index 101fc4a..4dbe4ad 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -79,7 +79,7 @@ func Start() { RegisterLdapSyncService(lc.SyncInterval) } - RegisterFootprintWorker() + // RegisterFootprintWorker() RegisterUpdateDurationWorker() s.Start() From 6056341525da8fa42857e05336b7e815bec7864d Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 14 Nov 2024 19:09:56 +0100 Subject: [PATCH 231/443] Remove obsolete Archive Migration Tool --- tools/archive-migration/cluster.go | 65 ---- tools/archive-migration/clusterConfig.go | 166 ---------- tools/archive-migration/float.go | 109 ------- tools/archive-migration/fsBackend.go | 142 --------- tools/archive-migration/job.go | 162 ---------- tools/archive-migration/json.go | 66 ---- tools/archive-migration/main.go | 371 ----------------------- tools/archive-migration/metrics.go | 65 ---- 8 files changed, 1146 deletions(-) delete mode 100644 tools/archive-migration/cluster.go delete mode 100644 tools/archive-migration/clusterConfig.go delete mode 100644 tools/archive-migration/float.go delete mode 100644 tools/archive-migration/fsBackend.go delete mode 100644 tools/archive-migration/job.go delete mode 100644 tools/archive-migration/json.go delete mode 100644 tools/archive-migration/main.go delete mode 100644 tools/archive-migration/metrics.go diff --git a/tools/archive-migration/cluster.go b/tools/archive-migration/cluster.go deleted file mode 100644 index f9a45ad..0000000 --- a/tools/archive-migration/cluster.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -// type Accelerator struct { -// ID string `json:"id"` -// Type string `json:"type"` -// Model string `json:"model"` -// } - -// type Topology struct { -// Node []int `json:"node"` -// Socket [][]int `json:"socket"` -// MemoryDomain [][]int `json:"memoryDomain"` -// Die [][]int `json:"die"` -// Core [][]int `json:"core"` -// Accelerators []*Accelerator `json:"accelerators"` -// } - -type SubCluster struct { - Name string `json:"name"` - Nodes string `json:"nodes"` - NumberOfNodes int `json:"numberOfNodes"` - ProcessorType string `json:"processorType"` - SocketsPerNode int `json:"socketsPerNode"` - CoresPerSocket int `json:"coresPerSocket"` - ThreadsPerCore int `json:"threadsPerCore"` - FlopRateScalar int `json:"flopRateScalar"` - FlopRateSimd int `json:"flopRateSimd"` - MemoryBandwidth int `json:"memoryBandwidth"` - Topology *schema.Topology `json:"topology"` -} - -// type SubClusterConfig struct { -// Name string `json:"name"` -// Peak float64 `json:"peak"` -// Normal float64 `json:"normal"` -// Caution float64 `json:"caution"` -// Alert float64 `json:"alert"` -// } - -type MetricConfig struct { - Name string `json:"name"` - Unit string `json:"unit"` - Scope schema.MetricScope `json:"scope"` - Aggregation string `json:"aggregation"` - Timestep int `json:"timestep"` - Peak float64 `json:"peak"` - Normal float64 `json:"normal"` - Caution float64 `json:"caution"` - Alert float64 `json:"alert"` - SubClusters []*schema.SubClusterConfig `json:"subClusters"` -} - -type Cluster struct { - Name string `json:"name"` - MetricConfig []*MetricConfig `json:"metricConfig"` - SubClusters []*SubCluster `json:"subClusters"` -} diff --git a/tools/archive-migration/clusterConfig.go b/tools/archive-migration/clusterConfig.go deleted file mode 100644 index 0f9f426..0000000 --- a/tools/archive-migration/clusterConfig.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "errors" - "fmt" - - "github.com/ClusterCockpit/cc-backend/pkg/archive" - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -var Clusters []*Cluster -var nodeLists map[string]map[string]archive.NodeList - -func initClusterConfig() error { - - Clusters = []*Cluster{} - nodeLists = map[string]map[string]archive.NodeList{} - - for _, c := range ar.GetClusters() { - - cluster, err := ar.LoadClusterCfg(c) - if err != nil { - return err - } - - if len(cluster.Name) == 0 || - len(cluster.MetricConfig) == 0 || - len(cluster.SubClusters) == 0 { - return errors.New("cluster.name, cluster.metricConfig and cluster.SubClusters should not be empty") - } - - for _, mc := range cluster.MetricConfig { - if len(mc.Name) == 0 { - return errors.New("cluster.metricConfig.name should not be empty") - } - if mc.Timestep < 1 { - return errors.New("cluster.metricConfig.timestep should not be smaller than one") - } - - // For backwards compability... - if mc.Scope == "" { - mc.Scope = schema.MetricScopeNode - } - if !mc.Scope.Valid() { - return errors.New("cluster.metricConfig.scope must be a valid scope ('node', 'scocket', ...)") - } - } - - Clusters = append(Clusters, cluster) - - nodeLists[cluster.Name] = make(map[string]archive.NodeList) - for _, sc := range cluster.SubClusters { - if sc.Nodes == "" { - continue - } - - nl, err := archive.ParseNodeList(sc.Nodes) - if err != nil { - return fmt.Errorf("in %s/cluster.json: %w", cluster.Name, err) - } - nodeLists[cluster.Name][sc.Name] = nl - } - } - - return nil -} - -func GetCluster(cluster string) *Cluster { - - for _, c := range Clusters { - if c.Name == cluster { - return c - } - } - return nil -} - -func GetSubCluster(cluster, subcluster string) *SubCluster { - - for _, c := range Clusters { - if c.Name == cluster { - for _, p := range c.SubClusters { - if p.Name == subcluster { - return p - } - } - } - } - return nil -} - -func GetMetricConfig(cluster, metric string) *MetricConfig { - - for _, c := range Clusters { - if c.Name == cluster { - for _, m := range c.MetricConfig { - if m.Name == metric { - return m - } - } - } - } - return nil -} - -// AssignSubCluster sets the `job.subcluster` property of the job based -// on its cluster and resources. -func AssignSubCluster(job *BaseJob) error { - - cluster := GetCluster(job.Cluster) - if cluster == nil { - return fmt.Errorf("unkown cluster: %#v", job.Cluster) - } - - if job.SubCluster != "" { - for _, sc := range cluster.SubClusters { - if sc.Name == job.SubCluster { - return nil - } - } - return fmt.Errorf("already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster) - } - - if len(job.Resources) == 0 { - return fmt.Errorf("job without any resources/hosts") - } - - host0 := job.Resources[0].Hostname - for sc, nl := range nodeLists[job.Cluster] { - if nl != nil && nl.Contains(host0) { - job.SubCluster = sc - return nil - } - } - - if cluster.SubClusters[0].Nodes == "" { - job.SubCluster = cluster.SubClusters[0].Name - return nil - } - - return fmt.Errorf("no subcluster found for cluster %#v and host %#v", job.Cluster, host0) -} - -func GetSubClusterByNode(cluster, hostname string) (string, error) { - - for sc, nl := range nodeLists[cluster] { - if nl != nil && nl.Contains(hostname) { - return sc, nil - } - } - - c := GetCluster(cluster) - if c == nil { - return "", fmt.Errorf("unkown cluster: %#v", cluster) - } - - if c.SubClusters[0].Nodes == "" { - return c.SubClusters[0].Name, nil - } - - return "", fmt.Errorf("no subcluster found for cluster %#v and host %#v", cluster, hostname) -} diff --git a/tools/archive-migration/float.go b/tools/archive-migration/float.go deleted file mode 100644 index 3fbccf8..0000000 --- a/tools/archive-migration/float.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "errors" - "io" - "math" - "strconv" -) - -// A custom float type is used so that (Un)MarshalJSON and -// (Un)MarshalGQL can be overloaded and NaN/null can be used. -// The default behaviour of putting every nullable value behind -// a pointer has a bigger overhead. -type Float float64 - -var NaN Float = Float(math.NaN()) -var nullAsBytes []byte = []byte("null") - -func (f Float) IsNaN() bool { - return math.IsNaN(float64(f)) -} - -// NaN will be serialized to `null`. -func (f Float) MarshalJSON() ([]byte, error) { - if f.IsNaN() { - return nullAsBytes, nil - } - - return strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64), nil -} - -// `null` will be unserialized to NaN. -func (f *Float) UnmarshalJSON(input []byte) error { - s := string(input) - if s == "null" { - *f = NaN - return nil - } - - val, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - *f = Float(val) - return nil -} - -// UnmarshalGQL implements the graphql.Unmarshaler interface. -func (f *Float) UnmarshalGQL(v interface{}) error { - f64, ok := v.(float64) - if !ok { - return errors.New("invalid Float scalar") - } - - *f = Float(f64) - return nil -} - -// MarshalGQL implements the graphql.Marshaler interface. -// NaN will be serialized to `null`. -func (f Float) MarshalGQL(w io.Writer) { - if f.IsNaN() { - w.Write(nullAsBytes) - } else { - w.Write(strconv.AppendFloat(make([]byte, 0, 10), float64(f), 'f', 2, 64)) - } -} - -// Only used via REST-API, not via GraphQL. -// This uses a lot less allocations per series, -// but it turns out that the performance increase -// from using this is not that big. -func (s *Series) MarshalJSON() ([]byte, error) { - buf := make([]byte, 0, 512+len(s.Data)*8) - buf = append(buf, `{"hostname":"`...) - buf = append(buf, s.Hostname...) - buf = append(buf, '"') - if s.Id != nil { - buf = append(buf, `,"id":`...) - buf = strconv.AppendInt(buf, int64(*s.Id), 10) - } - if s.Statistics != nil { - buf = append(buf, `,"statistics":{"min":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Min, 'f', 2, 64) - buf = append(buf, `,"avg":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Avg, 'f', 2, 64) - buf = append(buf, `,"max":`...) - buf = strconv.AppendFloat(buf, s.Statistics.Max, 'f', 2, 64) - buf = append(buf, '}') - } - buf = append(buf, `,"data":[`...) - for i := 0; i < len(s.Data); i++ { - if i != 0 { - buf = append(buf, ',') - } - - if s.Data[i].IsNaN() { - buf = append(buf, `null`...) - } else { - buf = strconv.AppendFloat(buf, float64(s.Data[i]), 'f', 2, 32) - } - } - buf = append(buf, ']', '}') - return buf, nil -} diff --git a/tools/archive-migration/fsBackend.go b/tools/archive-migration/fsBackend.go deleted file mode 100644 index 81cf57e..0000000 --- a/tools/archive-migration/fsBackend.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/ClusterCockpit/cc-backend/pkg/log" -) - -type FsArchiveConfig struct { - Path string `json:"path"` -} - -type FsArchive struct { - path string - clusters []string -} - -func getPath( - job *JobMeta, - rootPath string, - file string) string { - - lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000) - return filepath.Join( - rootPath, - job.Cluster, - lvl1, lvl2, - strconv.FormatInt(job.StartTime, 10), file) -} - -func loadJobMeta(filename string) (*JobMeta, error) { - - f, err := os.Open(filename) - if err != nil { - log.Errorf("fsBackend loadJobMeta()- %v", err) - return &JobMeta{}, err - } - defer f.Close() - - return DecodeJobMeta(bufio.NewReader(f)) -} - -func (fsa *FsArchive) Init(rawConfig json.RawMessage) error { - - var config FsArchiveConfig - if err := json.Unmarshal(rawConfig, &config); err != nil { - log.Errorf("fsBackend Init()- %v", err) - return err - } - if config.Path == "" { - err := fmt.Errorf("fsBackend Init()- empty path") - log.Errorf("fsBackend Init()- %v", err) - return err - } - fsa.path = config.Path - - entries, err := os.ReadDir(fsa.path) - if err != nil { - log.Errorf("fsBackend Init()- %v", err) - return err - } - - for _, de := range entries { - fsa.clusters = append(fsa.clusters, de.Name()) - } - - return nil -} - -func (fsa *FsArchive) Iter() <-chan *JobMeta { - - ch := make(chan *JobMeta) - go func() { - clustersDir, err := os.ReadDir(fsa.path) - if err != nil { - log.Fatalf("Reading clusters failed: %s", err.Error()) - } - - for _, clusterDir := range clustersDir { - lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name())) - if err != nil { - log.Fatalf("Reading jobs failed: %s", err.Error()) - } - - for _, lvl1Dir := range lvl1Dirs { - if !lvl1Dir.IsDir() { - // Could be the cluster.json file - continue - } - - lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name())) - if err != nil { - log.Fatalf("Reading jobs failed: %s", err.Error()) - } - - for _, lvl2Dir := range lvl2Dirs { - dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name()) - startTimeDirs, err := os.ReadDir(dirpath) - if err != nil { - log.Fatalf("Reading jobs failed: %s", err.Error()) - } - - for _, startTimeDir := range startTimeDirs { - if startTimeDir.IsDir() { - job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json")) - if err != nil { - log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) - } else { - ch <- job - } - } - } - } - } - } - close(ch) - }() - return ch -} - -func (fsa *FsArchive) LoadClusterCfg(name string) (*Cluster, error) { - b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json")) - if err != nil { - log.Errorf("fsBackend LoadClusterCfg()- %v", err) - return &Cluster{}, err - } - return DecodeCluster(bytes.NewReader(b)) -} - -func (fsa *FsArchive) GetClusters() []string { - return fsa.clusters -} diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go deleted file mode 100644 index 8705ce9..0000000 --- a/tools/archive-migration/job.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "errors" - "fmt" - "io" - "time" - - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -// Non-Swaggered Comment: BaseJob -// Non-Swaggered Comment: Common subset of Job and JobMeta. Use one of those, not this type directly. - -type BaseJob struct { - // The unique identifier of a job - JobID int64 `json:"jobId" db:"job_id" example:"123000"` - User string `json:"user" db:"user" example:"abcd100h"` // The unique identifier of a user - Project string `json:"project" db:"project" example:"abcd200"` // The unique identifier of a project - Cluster string `json:"cluster" db:"cluster" example:"fritz"` // The unique identifier of a cluster - SubCluster string `json:"subCluster" db:"subcluster" example:"main"` // The unique identifier of a sub cluster - Partition string `json:"partition" db:"partition" example:"main"` // The Slurm partition to which the job was submitted - ArrayJobId int64 `json:"arrayJobId" db:"array_job_id" example:"123000"` // The unique identifier of an array job - NumNodes int32 `json:"numNodes" db:"num_nodes" example:"2" minimum:"1"` // Number of nodes used (Min > 0) - NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads" example:"20" minimum:"1"` // Number of HWThreads used (Min > 0) - NumAcc int32 `json:"numAcc" db:"num_acc" example:"2" minimum:"1"` // Number of accelerators used (Min > 0) - Exclusive int32 `json:"exclusive" db:"exclusive" example:"1" minimum:"0" maximum:"2"` // Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user - MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status" example:"1" minimum:"0" maximum:"3"` // State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull - SMT int32 `json:"smt" db:"smt" example:"4"` // SMT threads used by job - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` // Final state of job - Duration int32 `json:"duration" db:"duration" example:"43200" minimum:"1"` // Duration of job in seconds (Min > 0) - Walltime int64 `json:"walltime" db:"walltime" example:"86400" minimum:"1"` // Requested walltime of job in seconds (Min > 0) - Tags []*schema.Tag `json:"tags"` // List of tags - RawResources []byte `json:"-" db:"resources"` // Resources used by job [As Bytes] - Resources []*Resource `json:"resources"` // Resources used by job - RawMetaData []byte `json:"-" db:"meta_data"` // Additional information about the job [As Bytes] - MetaData map[string]string `json:"metaData"` // Additional information about the job -} - -// Non-Swaggered Comment: Job -// Non-Swaggered Comment: This type is used as the GraphQL interface and using sqlx as a table row. - -// Job model -// @Description Information of a HPC job. -type Job struct { - // The unique identifier of a job in the database - ID int64 `json:"id" db:"id"` - BaseJob - StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds - StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 - NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 - NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 - FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 - FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64 -} - -// Non-Swaggered Comment: JobMeta -// Non-Swaggered Comment: When reading from the database or sending data via GraphQL, the start time can be in the much more -// Non-Swaggered Comment: convenient time.Time type. In the `meta.json` files, the start time is encoded as a unix epoch timestamp. -// Non-Swaggered Comment: This is why there is this struct, which contains all fields from the regular job struct, but "overwrites" -// Non-Swaggered Comment: the StartTime field with one of type int64. -// Non-Swaggered Comment: ID *int64 `json:"id,omitempty"` >> never used in the job-archive, only available via REST-API - -// JobMeta model -// @Description Meta data information of a HPC job. -type JobMeta struct { - // The unique identifier of a job in the database - ID *int64 `json:"id,omitempty"` - BaseJob - StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0) - Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job -} - -const ( - MonitoringStatusDisabled int32 = 0 - MonitoringStatusRunningOrArchiving int32 = 1 - MonitoringStatusArchivingFailed int32 = 2 - MonitoringStatusArchivingSuccessful int32 = 3 -) - -var JobDefaults BaseJob = BaseJob{ - Exclusive: 1, - MonitoringStatus: MonitoringStatusRunningOrArchiving, -} - -// JobStatistics model -// @Description Specification for job metric statistics. -type JobStatistics struct { - // Metric unit (see schema/unit.schema.json) - Unit string `json:"unit" example:"GHz"` - Avg float64 `json:"avg" example:"2500" minimum:"0"` // Job metric average - Min float64 `json:"min" example:"2000" minimum:"0"` // Job metric minimum - Max float64 `json:"max" example:"3000" minimum:"0"` // Job metric maximum -} - -// Tag model -// @Description Defines a tag using name and type. -type Tag struct { - // The unique DB identifier of a tag - ID int64 `json:"id" db:"id"` - Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type - Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name -} - -// Resource model -// @Description A resource used by a job -type Resource struct { - Hostname string `json:"hostname"` // Name of the host (= node) - HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids - Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids - Configuration string `json:"configuration,omitempty"` // The configuration options of the node -} - -type JobState string - -const ( - JobStateRunning JobState = "running" - JobStateCompleted JobState = "completed" - JobStateFailed JobState = "failed" - JobStateCancelled JobState = "cancelled" - JobStateStopped JobState = "stopped" - JobStateTimeout JobState = "timeout" - JobStatePreempted JobState = "preempted" - JobStateOutOfMemory JobState = "out_of_memory" -) - -func (e *JobState) UnmarshalGQL(v interface{}) error { - str, ok := v.(string) - if !ok { - return fmt.Errorf("enums must be strings") - } - - *e = JobState(str) - if !e.Valid() { - return errors.New("invalid job state") - } - - return nil -} - -func (e JobState) MarshalGQL(w io.Writer) { - fmt.Fprintf(w, "\"%s\"", e) -} - -func (e JobState) Valid() bool { - return e == JobStateRunning || - e == JobStateCompleted || - e == JobStateFailed || - e == JobStateCancelled || - e == JobStateStopped || - e == JobStateTimeout || - e == JobStatePreempted || - e == JobStateOutOfMemory -} diff --git a/tools/archive-migration/json.go b/tools/archive-migration/json.go deleted file mode 100644 index b2c281c..0000000 --- a/tools/archive-migration/json.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "encoding/json" - "io" - - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -func DecodeJobData(r io.Reader) (*JobData, error) { - var d JobData - if err := json.NewDecoder(r).Decode(&d); err != nil { - return nil, err - } - - return &d, nil -} - -func DecodeJobMeta(r io.Reader) (*JobMeta, error) { - var d JobMeta - if err := json.NewDecoder(r).Decode(&d); err != nil { - return nil, err - } - - return &d, nil -} - -func DecodeCluster(r io.Reader) (*Cluster, error) { - var c Cluster - if err := json.NewDecoder(r).Decode(&c); err != nil { - return nil, err - } - - return &c, nil -} - -func EncodeJobData(w io.Writer, d *schema.JobData) error { - // Sanitize parameters - if err := json.NewEncoder(w).Encode(d); err != nil { - return err - } - - return nil -} - -func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error { - // Sanitize parameters - if err := json.NewEncoder(w).Encode(d); err != nil { - return err - } - - return nil -} - -func EncodeCluster(w io.Writer, c *schema.Cluster) error { - // Sanitize parameters - if err := json.NewEncoder(w).Encode(c); err != nil { - return err - } - - return nil -} diff --git a/tools/archive-migration/main.go b/tools/archive-migration/main.go deleted file mode 100644 index b78e94e..0000000 --- a/tools/archive-migration/main.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "bufio" - "encoding/json" - "errors" - "flag" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" - ccunits "github.com/ClusterCockpit/cc-units" -) - -const Version = 1 - -var ar FsArchive -var srcPath string -var dstPath string - -func loadJobData(filename string) (*JobData, error) { - - f, err := os.Open(filename) - if err != nil { - return &JobData{}, fmt.Errorf("fsBackend loadJobData()- %v", err) - } - defer f.Close() - - return DecodeJobData(bufio.NewReader(f)) -} - -func ConvertUnitString(us string) schema.Unit { - var nu schema.Unit - - if us == "CPI" || - us == "IPC" || - us == "load" || - us == "" { - nu.Base = us - return nu - } - u := ccunits.NewUnit(us) - p := u.GetPrefix() - if p.Prefix() != "" { - prefix := p.Prefix() - nu.Prefix = prefix - } - m := u.GetMeasure() - d := u.GetUnitDenominator() - if d.Short() != "inval" { - nu.Base = fmt.Sprintf("%s/%s", m.Short(), d.Short()) - } else { - nu.Base = m.Short() - } - - return nu -} - -func deepCopyJobMeta(j *JobMeta) schema.JobMeta { - var jn schema.JobMeta - - //required properties - jn.JobID = j.JobID - jn.User = j.User - jn.Project = j.Project - jn.Cluster = j.Cluster - jn.SubCluster = j.SubCluster - jn.NumNodes = j.NumNodes - jn.Exclusive = j.Exclusive - jn.StartTime = j.StartTime - jn.State = schema.JobState(j.State) - jn.Duration = j.Duration - - for _, ro := range j.Resources { - var rn schema.Resource - rn.Hostname = ro.Hostname - rn.Configuration = ro.Configuration - hwt := make([]int, len(ro.HWThreads)) - if ro.HWThreads != nil { - copy(hwt, ro.HWThreads) - } - rn.HWThreads = hwt - acc := make([]string, len(ro.Accelerators)) - if ro.Accelerators != nil { - copy(acc, ro.Accelerators) - } - rn.Accelerators = acc - jn.Resources = append(jn.Resources, &rn) - } - jn.MetaData = make(map[string]string) - - for k, v := range j.MetaData { - jn.MetaData[k] = v - } - - jn.Statistics = make(map[string]schema.JobStatistics) - for k, v := range j.Statistics { - var sn schema.JobStatistics - sn.Avg = v.Avg - sn.Max = v.Max - sn.Min = v.Min - tmpUnit := ConvertUnitString(v.Unit) - if tmpUnit.Base == "inval" { - sn.Unit = schema.Unit{Base: ""} - } else { - sn.Unit = tmpUnit - } - jn.Statistics[k] = sn - } - - //optional properties - jn.Partition = j.Partition - jn.ArrayJobId = j.ArrayJobId - jn.NumHWThreads = j.NumHWThreads - jn.NumAcc = j.NumAcc - jn.MonitoringStatus = j.MonitoringStatus - jn.SMT = j.SMT - jn.Walltime = j.Walltime - - for _, t := range j.Tags { - jn.Tags = append(jn.Tags, t) - } - - return jn -} - -func deepCopyJobData(d *JobData, cluster string, subCluster string) *schema.JobData { - var dn = make(schema.JobData) - - for k, v := range *d { - // fmt.Printf("Metric %s\n", k) - dn[k] = make(map[schema.MetricScope]*schema.JobMetric) - - for mk, mv := range v { - // fmt.Printf("Scope %s\n", mk) - var mn schema.JobMetric - tmpUnit := ConvertUnitString(mv.Unit) - if tmpUnit.Base == "inval" { - mn.Unit = schema.Unit{Base: ""} - } else { - mn.Unit = tmpUnit - } - - mn.Timestep = mv.Timestep - - for _, v := range mv.Series { - var sn schema.Series - sn.Hostname = v.Hostname - if v.Id != nil { - var id = new(string) - - if mk == schema.MetricScopeAccelerator { - s := GetSubCluster(cluster, subCluster) - var err error - - *id, err = s.Topology.GetAcceleratorID(*v.Id) - if err != nil { - log.Fatal(err) - } - - } else { - *id = fmt.Sprint(*v.Id) - } - sn.Id = id - } - if v.Statistics != nil { - sn.Statistics = schema.MetricStatistics{ - Avg: v.Statistics.Avg, - Min: v.Statistics.Min, - Max: v.Statistics.Max} - } - - sn.Data = make([]schema.Float, len(v.Data)) - copy(sn.Data, v.Data) - mn.Series = append(mn.Series, sn) - } - - dn[k][mk] = &mn - } - // fmt.Printf("FINISH %s\n", k) - } - - return &dn -} - -func deepCopyClusterConfig(co *Cluster) schema.Cluster { - var cn schema.Cluster - - cn.Name = co.Name - for _, sco := range co.SubClusters { - var scn schema.SubCluster - scn.Name = sco.Name - scn.Nodes = sco.Nodes - scn.ProcessorType = sco.ProcessorType - scn.SocketsPerNode = sco.SocketsPerNode - scn.CoresPerSocket = sco.CoresPerSocket - scn.ThreadsPerCore = sco.ThreadsPerCore - scn.FlopRateScalar = schema.MetricValue{ - Unit: schema.Unit{Base: "F/s", Prefix: "G"}, - Value: float64(sco.FlopRateScalar)} - scn.FlopRateSimd = schema.MetricValue{ - Unit: schema.Unit{Base: "F/s", Prefix: "G"}, - Value: float64(sco.FlopRateSimd)} - scn.MemoryBandwidth = schema.MetricValue{ - Unit: schema.Unit{Base: "B/s", Prefix: "G"}, - Value: float64(sco.MemoryBandwidth)} - scn.Topology = *sco.Topology - cn.SubClusters = append(cn.SubClusters, &scn) - } - - for _, mco := range co.MetricConfig { - var mcn schema.MetricConfig - mcn.Name = mco.Name - mcn.Scope = mco.Scope - if mco.Aggregation == "" { - fmt.Println("cluster.json - Property aggregation missing! Please review file!") - mcn.Aggregation = "sum" - } else { - mcn.Aggregation = mco.Aggregation - } - mcn.Timestep = mco.Timestep - tmpUnit := ConvertUnitString(mco.Unit) - if tmpUnit.Base == "inval" { - mcn.Unit = schema.Unit{Base: ""} - } else { - mcn.Unit = tmpUnit - } - mcn.Peak = mco.Peak - mcn.Normal = mco.Normal - mcn.Caution = mco.Caution - mcn.Alert = mco.Alert - mcn.SubClusters = mco.SubClusters - - cn.MetricConfig = append(cn.MetricConfig, &mcn) - } - - return cn -} - -func convertJob(job *JobMeta) { - // check if source data is available, otherwise skip job - src_data_path := getPath(job, srcPath, "data.json") - info, err := os.Stat(src_data_path) - if err != nil { - log.Fatal(err) - } - if info.Size() == 0 { - fmt.Printf("Skip path %s, filesize is 0 Bytes.", src_data_path) - return - } - - path := getPath(job, dstPath, "meta.json") - err = os.MkdirAll(filepath.Dir(path), 0750) - if err != nil { - log.Fatal(err) - } - f, err := os.Create(path) - if err != nil { - log.Fatal(err) - } - - jmn := deepCopyJobMeta(job) - if err = EncodeJobMeta(f, &jmn); err != nil { - log.Fatal(err) - } - if err = f.Close(); err != nil { - log.Fatal(err) - } - - f, err = os.Create(getPath(job, dstPath, "data.json")) - if err != nil { - log.Fatal(err) - } - - var jd *JobData - jd, err = loadJobData(src_data_path) - if err != nil { - log.Fatal(err) - } - jdn := deepCopyJobData(jd, job.Cluster, job.SubCluster) - if err := EncodeJobData(f, jdn); err != nil { - log.Fatal(err) - } - if err := f.Close(); err != nil { - log.Fatal(err) - } -} - -func main() { - var flagLogLevel, flagConfigFile string - var flagLogDateTime, debug bool - - flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") - flag.BoolVar(&debug, "debug", false, "Set this flag to force sequential execution for debugging") - flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") - flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") - flag.StringVar(&srcPath, "src", "./var/job-archive", "Specify the source job archive path") - flag.StringVar(&dstPath, "dst", "./var/job-archive-new", "Specify the destination job archive path") - flag.Parse() - - if _, err := os.Stat(filepath.Join(srcPath, "version.txt")); !errors.Is(err, os.ErrNotExist) { - log.Fatal("Archive version exists!") - } - - log.Init(flagLogLevel, flagLogDateTime) - config.Init(flagConfigFile) - srcConfig := fmt.Sprintf("{\"path\": \"%s\"}", srcPath) - err := ar.Init(json.RawMessage(srcConfig)) - if err != nil { - log.Fatal(err) - } - - err = initClusterConfig() - if err != nil { - log.Fatal(err) - } - // setup new job archive - err = os.Mkdir(dstPath, 0750) - if err != nil { - log.Fatal(err) - } - - for _, c := range Clusters { - path := fmt.Sprintf("%s/%s", dstPath, c.Name) - fmt.Println(path) - err = os.Mkdir(path, 0750) - if err != nil { - log.Fatal(err) - } - cn := deepCopyClusterConfig(c) - - f, err := os.Create(fmt.Sprintf("%s/%s/cluster.json", dstPath, c.Name)) - if err != nil { - log.Fatal(err) - } - if err := EncodeCluster(f, &cn); err != nil { - log.Fatal(err) - } - if err := f.Close(); err != nil { - log.Fatal(err) - } - } - - var wg sync.WaitGroup - - for job := range ar.Iter() { - if debug { - fmt.Printf("Job %d\n", job.JobID) - convertJob(job) - } else { - job := job - wg.Add(1) - - go func() { - defer wg.Done() - convertJob(job) - }() - } - } - - wg.Wait() - os.WriteFile(filepath.Join(dstPath, "version.txt"), []byte(fmt.Sprintf("%d", Version)), 0644) -} diff --git a/tools/archive-migration/metrics.go b/tools/archive-migration/metrics.go deleted file mode 100644 index ec5de6f..0000000 --- a/tools/archive-migration/metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package main - -import ( - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -type JobData map[string]map[schema.MetricScope]*JobMetric - -type JobMetric struct { - Unit string `json:"unit"` - Scope schema.MetricScope `json:"scope"` - Timestep int `json:"timestep"` - Series []Series `json:"series"` - StatisticsSeries *StatsSeries `json:"statisticsSeries"` -} - -type Series struct { - Hostname string `json:"hostname"` - Id *int `json:"id,omitempty"` - Statistics *MetricStatistics `json:"statistics"` - Data []schema.Float `json:"data"` -} - -type MetricStatistics struct { - Avg float64 `json:"avg"` - Min float64 `json:"min"` - Max float64 `json:"max"` -} - -type StatsSeries struct { - Mean []Float `json:"mean"` - Min []Float `json:"min"` - Max []Float `json:"max"` - Percentiles map[int][]Float `json:"percentiles,omitempty"` -} - -// type MetricScope string - -// const ( -// MetricScopeInvalid MetricScope = "invalid_scope" - -// MetricScopeNode MetricScope = "node" -// MetricScopeSocket MetricScope = "socket" -// MetricScopeMemoryDomain MetricScope = "memoryDomain" -// MetricScopeCore MetricScope = "core" -// MetricScopeHWThread MetricScope = "hwthread" - -// MetricScopeAccelerator MetricScope = "accelerator" -// ) - -// var metricScopeGranularity map[MetricScope]int = map[MetricScope]int{ -// MetricScopeNode: 10, -// MetricScopeSocket: 5, -// MetricScopeMemoryDomain: 3, -// MetricScopeCore: 2, -// MetricScopeHWThread: 1, - -// MetricScopeAccelerator: 5, // Special/Randomly choosen - -// MetricScopeInvalid: -1, -// } From ff37f71fdb109913e2dc6df955ee2d221bbfb7ce Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 14 Nov 2024 19:10:37 +0100 Subject: [PATCH 232/443] Increase job archive required version --- pkg/archive/archive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 52a760f..c6c04e4 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -14,7 +14,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/schema" ) -const Version uint64 = 1 +const Version uint64 = 2 type ArchiveBackend interface { Init(rawConfig json.RawMessage) (uint64, error) From 92ec64d80f38019d5f491dcd36bb687dbf18f875 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 14 Nov 2024 19:10:55 +0100 Subject: [PATCH 233/443] Update demo config file --- configs/config-demo.json | 117 +++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 53 deletions(-) diff --git a/configs/config-demo.json b/configs/config-demo.json index 8423758..e8d4570 100644 --- a/configs/config-demo.json +++ b/configs/config-demo.json @@ -1,56 +1,67 @@ { - "addr": "127.0.0.1:8080", - "archive": { - "kind": "file", - "path": "./var/job-archive" - }, - "jwts": { - "max-age": "2000h" - }, - "clusters": [ - { - "name": "fritz", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "http://localhost:8082", - "token": "" - }, - "filterRanges": { - "numNodes": { - "from": 1, - "to": 64 - }, - "duration": { - "from": 0, - "to": 86400 - }, - "startTime": { - "from": "2022-01-01T00:00:00Z", - "to": null - } - } - }, - { - "name": "alex", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "http://localhost:8082", - "token": "" - }, - "filterRanges": { - "numNodes": { - "from": 1, - "to": 64 - }, - "duration": { - "from": 0, - "to": 86400 - }, - "startTime": { - "from": "2022-01-01T00:00:00Z", - "to": null - } - } - } + "addr": "127.0.0.1:8080", + "short-running-jobs-duration": 300, + "archive": { + "kind": "file", + "path": "./var/job-archive" + }, + "jwts": { + "max-age": "2000h" + }, + "enable-resampling": { + "trigger": 30, + "resolutions": [ + 600, + 300, + 120, + 60 ] + }, + "emission-constant": 317, + "clusters": [ + { + "name": "fritz", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + }, + { + "name": "alex", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + } + ] } From 210a7d3136f38cf13a487360153750d80399de9e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 14 Nov 2024 19:13:07 +0100 Subject: [PATCH 234/443] Debugging initDB archive import Footprint working EnergyFootprint still missing --- internal/importer/initDB.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 5f06f36..afcde77 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -81,7 +81,7 @@ func InitDB() error { name := fmt.Sprintf("%s_%s", fp, statType) - job.Footprint[fp] = repository.LoadJobStat(jobMeta, name, statType) + job.Footprint[name] = repository.LoadJobStat(jobMeta, fp, statType) } job.RawFootprint, err = json.Marshal(job.Footprint) From cdd45ce88b0a03cae4ea457c4bf092fcb77ffb7e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 16 Nov 2024 06:36:55 +0100 Subject: [PATCH 235/443] Fix importers and add Energy footprint to import --- internal/importer/handleImport.go | 32 ++++++++++++++++++++++++++++++- internal/importer/initDB.go | 29 ++++++++++++++++++++++++++++ internal/repository/jobCreate.go | 4 ++-- 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 153402a..01773a5 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/json" "fmt" + "math" "os" "strings" @@ -84,7 +85,8 @@ func HandleImportFlag(flag string) error { } name := fmt.Sprintf("%s_%s", fp, statType) - job.Footprint[fp] = repository.LoadJobStat(&job, name, statType) + + job.Footprint[name] = repository.LoadJobStat(&job, fp, statType) } job.RawFootprint, err = json.Marshal(job.Footprint) @@ -92,6 +94,34 @@ func HandleImportFlag(flag string) error { log.Warn("Error while marshaling job footprint") return err } + + job.EnergyFootprint = make(map[string]float64) + var totalEnergy float64 + var energy float64 + + for _, fp := range sc.EnergyFootprint { + if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { + // Note: For DB data, calculate and save as kWh + // Energy: Power (in Watts) * Time (in Seconds) + if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) + // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits + energy = math.Round(((repository.LoadJobStat(&job, fp, "avg")*float64(job.Duration))/3600/1000)*100) / 100 + } + } else { + log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) + } + + job.EnergyFootprint[fp] = energy + totalEnergy += energy + } + + job.Energy = (math.Round(totalEnergy*100) / 100) + if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { + log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) + return err + } + job.RawResources, err = json.Marshal(job.Resources) if err != nil { log.Warn("Error while marshaling job resources") diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index afcde77..fa2ee6e 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -7,6 +7,7 @@ package importer import ( "encoding/json" "fmt" + "math" "strings" "time" @@ -70,6 +71,7 @@ func InitDB() error { log.Errorf("cannot get subcluster: %s", err.Error()) return err } + job.Footprint = make(map[string]float64) for _, fp := range sc.Footprint { @@ -90,6 +92,33 @@ func InitDB() error { return err } + job.EnergyFootprint = make(map[string]float64) + var totalEnergy float64 + var energy float64 + + for _, fp := range sc.EnergyFootprint { + if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { + // Note: For DB data, calculate and save as kWh + // Energy: Power (in Watts) * Time (in Seconds) + if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) + // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits + energy = math.Round(((repository.LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 + } + } else { + log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) + } + + job.EnergyFootprint[fp] = energy + totalEnergy += energy + } + + job.Energy = (math.Round(totalEnergy*100) / 100) + if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { + log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) + return err + } + job.RawResources, err = json.Marshal(job.Resources) if err != nil { log.Errorf("repository initDB(): %v", err) diff --git a/internal/repository/jobCreate.go b/internal/repository/jobCreate.go index 43c26c1..1b05b52 100644 --- a/internal/repository/jobCreate.go +++ b/internal/repository/jobCreate.go @@ -15,10 +15,10 @@ import ( const NamedJobInsert string = `INSERT INTO job ( job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc, - exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, resources, meta_data + exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, energy, energy_footprint, resources, meta_data ) VALUES ( :job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, - :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :resources, :meta_data + :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :energy, :energy_footprint, :resources, :meta_data );` func (r *JobRepository) InsertJob(job *schema.JobMeta) (int64, error) { From 3ab26172c4b18cb71425de6bb8f56c6a829ee28c Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 16 Nov 2024 07:03:29 +0100 Subject: [PATCH 236/443] Port tests to new job archive version --- internal/api/api_test.go | 2 +- internal/importer/importer_test.go | 2 +- pkg/archive/fsBackend_test.go | 2 +- pkg/archive/testdata/archive/version.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 0312e43..3d1d7bb 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -120,7 +120,7 @@ func setup(t *testing.T) *api.RestApi { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { + if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 2)), 0666); err != nil { t.Fatal(err) } diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index ce0d2e1..4e839cf 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -82,7 +82,7 @@ func setup(t *testing.T) *repository.JobRepository { if err := os.Mkdir(jobarchive, 0777); err != nil { t.Fatal(err) } - if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 1)), 0666); err != nil { + if err := os.WriteFile(filepath.Join(jobarchive, "version.txt"), []byte(fmt.Sprintf("%d", 2)), 0666); err != nil { t.Fatal(err) } fritzArchive := filepath.Join(tmpdir, "job-archive", "fritz") diff --git a/pkg/archive/fsBackend_test.go b/pkg/archive/fsBackend_test.go index d60e478..9db68ed 100644 --- a/pkg/archive/fsBackend_test.go +++ b/pkg/archive/fsBackend_test.go @@ -48,7 +48,7 @@ func TestInit(t *testing.T) { if fsa.path != "testdata/archive" { t.Fail() } - if version != 1 { + if version != 2 { t.Fail() } if len(fsa.clusters) != 3 || fsa.clusters[1] != "emmy" { diff --git a/pkg/archive/testdata/archive/version.txt b/pkg/archive/testdata/archive/version.txt index d00491f..0cfbf08 100644 --- a/pkg/archive/testdata/archive/version.txt +++ b/pkg/archive/testdata/archive/version.txt @@ -1 +1 @@ -1 +2 From c093cca8b15eaad988f186d1874291ea6eb19fa4 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sat, 16 Nov 2024 07:45:18 +0100 Subject: [PATCH 237/443] Update README --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5ce9125..ce093d2 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ cd ./cc-backend ./startDemo.sh ``` -You can also try the demo using the lates release binary. +You can also try the demo using the latest release binary. Create a folder and put the release binary `cc-backend` into this folder. Execute the following steps: @@ -88,7 +88,9 @@ Analysis, Systems and Status views). There is a Makefile to automate the build of cc-backend. The Makefile supports the following targets: -* `make`: Initialize `var` directory and build svelte frontend and backend binary. Note that there is no proper prerequesite handling. Any change of frontend source files will result in a complete rebuild. +* `make`: Initialize `var` directory and build svelte frontend and backend +binary. Note that there is no proper prerequisite handling. Any change of +frontend source files will result in a complete rebuild. * `make clean`: Clean go build cache and remove binary. * `make test`: Run the tests that are also run in the GitHub workflow setup. @@ -147,8 +149,6 @@ contains Go packages that can be used by other projects. Additional command line helper tools. * [`archive-manager`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-manager) Commands for getting infos about and existing job archive. - * [`archive-migration`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-migration) - Tool to migrate from previous to current job archive version. * [`convert-pem-pubkey`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/convert-pem-pubkey) Tool to convert external pubkey for use in `cc-backend`. * [`gen-keypair`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/gen-keypair) From fc1c54a1410544b5e483318c3ff7980c7a8e8e62 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 21 Nov 2024 14:39:03 +0100 Subject: [PATCH 238/443] fix: use left join to keep unmatched stats query result rows --- internal/repository/stats.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ba7a8aa..f5677ad 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -86,7 +86,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user").GroupBy(col) + ).From("job").LeftJoin("user ON user.username = job.user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", @@ -226,6 +226,8 @@ func (r *JobRepository) JobsStatsGrouped( TotalAccHours: totalAccHours, }) } else { + log.Debugf(">>>> STATS ID %s", id.String) + log.Debugf(">>>> STATS TOTALNODES %d", totalNodes) stats = append(stats, &model.JobsStatistics{ ID: id.String, From 7f43c88a39db4aa75bb53b45190dfd3ffd1f6293 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 14:54:04 +0100 Subject: [PATCH 239/443] Add example config for mariadb backend --- configs/config-mariadb.json | 69 +++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 configs/config-mariadb.json diff --git a/configs/config-mariadb.json b/configs/config-mariadb.json new file mode 100644 index 0000000..e068439 --- /dev/null +++ b/configs/config-mariadb.json @@ -0,0 +1,69 @@ +{ + "addr": "127.0.0.1:8080", + "short-running-jobs-duration": 300, + "archive": { + "kind": "file", + "path": "./var/job-archive" + }, + "jwts": { + "max-age": "2000h" + }, + "db-driver": "mysql", + "db": "clustercockpit:demo@tcp(127.0.0.1:3306)/clustercockpit", + "enable-resampling": { + "trigger": 30, + "resolutions": [ + 600, + 300, + 120, + 60 + ] + }, + "emission-constant": 317, + "clusters": [ + { + "name": "fritz", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + }, + { + "name": "alex", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null + } + } + } + ] +} From 35bd7739c647d79be80fd762b2140a08ae79e7fd Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 15:02:30 +0100 Subject: [PATCH 240/443] fix: Replace reserved keywords in database schemas Port migration to mariadb --- internal/repository/job.go | 6 +- internal/repository/jobCreate.go | 4 +- internal/repository/jobFind.go | 2 +- internal/repository/jobQuery.go | 10 +- internal/repository/migration.go | 8 ++ .../mysql/08_add-footprint.down.sql | 21 +++ .../migrations/mysql/08_add-footprint.up.sql | 123 ++++++++++++++++++ .../sqlite3/08_add-footprint.up.sql | 75 ++++++----- internal/repository/stats.go | 8 +- internal/repository/user.go | 26 ++-- pkg/schema/job.go | 4 +- 11 files changed, 222 insertions(+), 65 deletions(-) create mode 100644 internal/repository/migrations/mysql/08_add-footprint.down.sql create mode 100644 internal/repository/migrations/mysql/08_add-footprint.up.sql diff --git a/internal/repository/job.go b/internal/repository/job.go index ab65426..592997e 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -51,7 +51,7 @@ func GetJobRepository() *JobRepository { } var jobColumns []string = []string{ - "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id", + "job.id", "job.job_id", "job.hpc_user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.cluster_partition", "job.array_job_id", "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", "job.duration", "job.walltime", "job.resources", "job.footprint", "job.energy", } @@ -314,7 +314,7 @@ func (r *JobRepository) FindUserOrProjectOrJobname(user *schema.User, searchterm return "", uresult, "", "" } // Find username by name (like) - nresult, _ := r.FindColumnValue(user, searchterm, "user", "username", "name", true) + nresult, _ := r.FindColumnValue(user, searchterm, "hpc_user", "username", "name", true) if nresult != "" { return "", nresult, "", "" } @@ -400,7 +400,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) { start := time.Now() partitions := r.cache.Get("partitions:"+cluster, func() (interface{}, time.Duration, int) { parts := []string{} - if err = r.DB.Select(&parts, `SELECT DISTINCT job.partition FROM job WHERE job.cluster = ?;`, cluster); err != nil { + if err = r.DB.Select(&parts, `SELECT DISTINCT job.cluster_partition FROM job WHERE job.cluster = ?;`, cluster); err != nil { return nil, 0, 1000 } diff --git a/internal/repository/jobCreate.go b/internal/repository/jobCreate.go index 1b05b52..9e47974 100644 --- a/internal/repository/jobCreate.go +++ b/internal/repository/jobCreate.go @@ -14,10 +14,10 @@ import ( ) const NamedJobInsert string = `INSERT INTO job ( - job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc, + job_id, hpc_user, project, cluster, subcluster, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, footprint, energy, energy_footprint, resources, meta_data ) VALUES ( - :job_id, :user, :project, :cluster, :subcluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, + :job_id, :hpc_user, :project, :cluster, :subcluster, :cluster_partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :walltime, :footprint, :energy, :energy_footprint, :resources, :meta_data );` diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go index a383eb6..ff5a936 100644 --- a/internal/repository/jobFind.go +++ b/internal/repository/jobFind.go @@ -136,7 +136,7 @@ func (r *JobRepository) IsJobOwner(jobId int64, startTime int64, user string, cl q := sq.Select("id"). From("job"). Where("job.job_id = ?", jobId). - Where("job.user = ?", user). + Where("job.hpc_user = ?", user). Where("job.cluster = ?", cluster). Where("job.start_time = ?", startTime) diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 5458043..c9812a3 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -121,13 +121,13 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde return query, nil case user.HasRole(schema.RoleManager): // Manager : Add filter for managed projects' jobs only + personal jobs if len(user.Projects) != 0 { - return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil + return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil } else { log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username) - return query.Where("job.user = ?", user.Username), nil + return query.Where("job.hpc_user = ?", user.Username), nil } case user.HasRole(schema.RoleUser): // User : Only personal jobs - return query.Where("job.user = ?", user.Username), nil + return query.Where("job.hpc_user = ?", user.Username), nil default: // No known Role, return error var qnil sq.SelectBuilder return qnil, fmt.Errorf("user has no or unknown roles") @@ -147,7 +147,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = query.Where("job.array_job_id = ?", *filter.ArrayJobID) } if filter.User != nil { - query = buildStringCondition("job.user", filter.User, query) + query = buildStringCondition("job.hpc_user", filter.User, query) } if filter.Project != nil { query = buildStringCondition("job.project", filter.Project, query) @@ -159,7 +159,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildStringCondition("job.cluster", filter.Cluster, query) } if filter.Partition != nil { - query = buildStringCondition("job.partition", filter.Partition, query) + query = buildStringCondition("job.cluster_partition", filter.Partition, query) } if filter.StartTime != nil { query = buildTimeCondition("job.start_time", filter.StartTime, query) diff --git a/internal/repository/migration.go b/internal/repository/migration.go index 970fbc2..d32a624 100644 --- a/internal/repository/migration.go +++ b/internal/repository/migration.go @@ -114,6 +114,14 @@ func MigrateDB(backend string, db string) error { return err } + v, dirty, err := m.Version() + + log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version) + + if dirty { + return fmt.Errorf("last migration to version %d has failed, please fix the db manually and force version with -force-db flag", Version) + } + if err := m.Up(); err != nil { if err == migrate.ErrNoChange { log.Info("DB already up to date!") diff --git a/internal/repository/migrations/mysql/08_add-footprint.down.sql b/internal/repository/migrations/mysql/08_add-footprint.down.sql new file mode 100644 index 0000000..8c99eb5 --- /dev/null +++ b/internal/repository/migrations/mysql/08_add-footprint.down.sql @@ -0,0 +1,21 @@ +ALTER TABLE job DROP energy; +ALTER TABLE job DROP energy_footprint; +ALTER TABLE job ADD COLUMN flops_any_avg; +ALTER TABLE job ADD COLUMN mem_bw_avg; +ALTER TABLE job ADD COLUMN mem_used_max; +ALTER TABLE job ADD COLUMN load_avg; +ALTER TABLE job ADD COLUMN net_bw_avg; +ALTER TABLE job ADD COLUMN net_data_vol_total; +ALTER TABLE job ADD COLUMN file_bw_avg; +ALTER TABLE job ADD COLUMN file_data_vol_total; + +UPDATE job SET flops_any_avg = json_extract(footprint, '$.flops_any_avg'); +UPDATE job SET mem_bw_avg = json_extract(footprint, '$.mem_bw_avg'); +UPDATE job SET mem_used_max = json_extract(footprint, '$.mem_used_max'); +UPDATE job SET load_avg = json_extract(footprint, '$.cpu_load_avg'); +UPDATE job SET net_bw_avg = json_extract(footprint, '$.net_bw_avg'); +UPDATE job SET net_data_vol_total = json_extract(footprint, '$.net_data_vol_total'); +UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); +UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); + +ALTER TABLE job DROP footprint; diff --git a/internal/repository/migrations/mysql/08_add-footprint.up.sql b/internal/repository/migrations/mysql/08_add-footprint.up.sql new file mode 100644 index 0000000..207ccf9 --- /dev/null +++ b/internal/repository/migrations/mysql/08_add-footprint.up.sql @@ -0,0 +1,123 @@ +DROP INDEX IF EXISTS job_stats ON job; +DROP INDEX IF EXISTS job_by_user ON job; +DROP INDEX IF EXISTS job_by_starttime ON job; +DROP INDEX IF EXISTS job_by_job_id ON job; +DROP INDEX IF EXISTS job_list ON job; +DROP INDEX IF EXISTS job_list_user ON job; +DROP INDEX IF EXISTS job_list_users ON job; +DROP INDEX IF EXISTS job_list_users_start ON job; + +ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; +ALTER TABLE job ADD COLUMN energy_footprint JSON; + +ALTER TABLE job ADD COLUMN footprint JSON; +ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global'; + +-- Do not use reserved keywords anymore +RENAME TABLE `user` TO hpc_user; +ALTER TABLE job RENAME COLUMN `user` TO hpc_user; +ALTER TABLE job RENAME COLUMN `partition` TO cluster_partition; + +ALTER TABLE job MODIFY COLUMN cluster VARCHAR(50); +ALTER TABLE job MODIFY COLUMN hpc_user VARCHAR(50); +ALTER TABLE job MODIFY COLUMN subcluster VARCHAR(50); +ALTER TABLE job MODIFY COLUMN project VARCHAR(50); +ALTER TABLE job MODIFY COLUMN cluster_partition VARCHAR(50); +ALTER TABLE job MODIFY COLUMN job_state VARCHAR(25); + +UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; +UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); +UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); +UPDATE job SET footprint = json_insert(footprint, '$.mem_used_max', job.mem_used_max); +UPDATE job SET footprint = json_insert(footprint, '$.cpu_load_avg', job.load_avg); +UPDATE job SET footprint = json_insert(footprint, '$.net_bw_avg', job.net_bw_avg) WHERE job.net_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.net_data_vol_total', job.net_data_vol_total) WHERE job.net_data_vol_total != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_bw_avg', job.file_bw_avg) WHERE job.file_bw_avg != 0; +UPDATE job SET footprint = json_insert(footprint, '$.file_data_vol_total', job.file_data_vol_total) WHERE job.file_data_vol_total != 0; + +ALTER TABLE job DROP flops_any_avg; +ALTER TABLE job DROP mem_bw_avg; +ALTER TABLE job DROP mem_used_max; +ALTER TABLE job DROP load_avg; +ALTER TABLE job DROP net_bw_avg; +ALTER TABLE job DROP net_data_vol_total; +ALTER TABLE job DROP file_bw_avg; +ALTER TABLE job DROP file_data_vol_total; + +-- Indices for: Single filters, combined filters, sorting, sorting with filters +-- Cluster Filter +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); +-- Cluster Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_duration ON job (cluster, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_numnodes ON job (cluster, num_nodes); + +-- Cluster+Partition Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, cluster_partition); +-- Cluster+Partition Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, cluster_partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, cluster_partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, cluster_partition, num_nodes); + +-- Cluster+Partition+Jobstate Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, cluster_partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, cluster_partition, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, cluster_partition, job_state, project); +-- Cluster+Partition+Jobstate Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, cluster_partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, cluster_partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, cluster_partition, job_state, num_nodes); + +-- Cluster+JobState Filter +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); +-- Cluster+JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_duration ON job (cluster, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numnodes ON job (cluster, job_state, num_nodes); + +-- User Filter +CREATE INDEX IF NOT EXISTS jobs_user ON job (hpc_user); +-- User Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (hpc_user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (hpc_user, duration); +CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (hpc_user, num_nodes); + +-- Project Filter +CREATE INDEX IF NOT EXISTS jobs_project ON job (project); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, hpc_user); +-- Project Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration); +CREATE INDEX IF NOT EXISTS jobs_project_numnodes ON job (project, num_nodes); + +-- JobState Filter +CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); +CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +-- JobState Filter Sorting +CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_jobstate_duration ON job (job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_jobstate_numnodes ON job (job_state, num_nodes); + +-- ArrayJob Filter +CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); + +-- Sorting without active filters +CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); +CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); + +-- Single filters with default starttime sorting +CREATE INDEX IF NOT EXISTS jobs_duration_starttime ON job (duration, start_time); +CREATE INDEX IF NOT EXISTS jobs_numnodes_starttime ON job (num_nodes, start_time); +CREATE INDEX IF NOT EXISTS jobs_numacc_starttime ON job (num_acc, start_time); +CREATE INDEX IF NOT EXISTS jobs_energy_starttime ON job (energy, start_time); + +-- Optimize DB index usage diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 9c9e53e..5c28da9 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,11 +1,11 @@ -DROP INDEX job_stats; -DROP INDEX job_by_user; -DROP INDEX job_by_starttime; -DROP INDEX job_by_job_id; -DROP INDEX job_list; -DROP INDEX job_list_user; -DROP INDEX job_list_users; -DROP INDEX job_list_users_start; +DROP INDEX IF EXISTS job_stats; +DROP INDEX IF EXISTS job_by_user; +DROP INDEX IF EXISTS job_by_starttime; +DROP INDEX IF EXISTS job_by_job_id; +DROP INDEX IF EXISTS job_list; +DROP INDEX IF EXISTS job_list_user; +DROP INDEX IF EXISTS job_list_users; +DROP INDEX IF EXISTS job_list_users_start; ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; @@ -13,6 +13,11 @@ ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; ALTER TABLE job ADD COLUMN footprint TEXT DEFAULT NULL; ALTER TABLE tag ADD COLUMN tag_scope TEXT NOT NULL DEFAULT 'global'; +-- Do not use reserved keywords anymore +ALTER TABLE "user" RENAME TO hpc_user; +ALTER TABLE job RENAME COLUMN "user" TO hpc_user; +ALTER TABLE job RENAME COLUMN "partition" TO cluster_partition; + UPDATE job SET footprint = '{"flops_any_avg": 0.0}'; UPDATE job SET footprint = json_replace(footprint, '$.flops_any_avg', job.flops_any_avg); UPDATE job SET footprint = json_insert(footprint, '$.mem_bw_avg', job.mem_bw_avg); @@ -35,7 +40,7 @@ ALTER TABLE job DROP file_data_vol_total; -- Indices for: Single filters, combined filters, sorting, sorting with filters -- Cluster Filter CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); -CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, hpc_user); CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); -- Cluster Filter Sorting @@ -47,30 +52,30 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_numacc ON job (cluster, num_acc); CREATE INDEX IF NOT EXISTS jobs_cluster_energy ON job (cluster, energy); -- Cluster+Partition Filter -CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, cluster_partition); -- Cluster+Partition Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, partition, duration); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, partition, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (cluster, partition, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (cluster, partition, num_acc); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (cluster, partition, energy); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, cluster_partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_duration ON job (cluster, cluster_partition, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numnodes ON job (cluster, cluster_partition, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numhwthreads ON job (cluster, cluster_partition, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_numacc ON job (cluster, cluster_partition, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_energy ON job (cluster, cluster_partition, energy); -- Cluster+Partition+Jobstate Filter -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, cluster_partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, cluster_partition, job_state, hpc_user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, cluster_partition, job_state, project); -- Cluster+Partition+Jobstate Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, partition, job_state, duration); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, partition, job_state, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (cluster, partition, job_state, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (cluster, partition, job_state, num_acc); -CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (cluster, partition, job_state, energy); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, cluster_partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_duration ON job (cluster, cluster_partition, job_state, duration); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numnodes ON job (cluster, cluster_partition, job_state, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numhwthreads ON job (cluster, cluster_partition, job_state, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_numacc ON job (cluster, cluster_partition, job_state, num_acc); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_energy ON job (cluster, cluster_partition, job_state, energy); -- Cluster+JobState Filter CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, hpc_user); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); -- Cluster+JobState Filter Sorting CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); @@ -81,18 +86,18 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_numacc ON job (cluster, job_sta CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_energy ON job (cluster, job_state, energy); -- User Filter -CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +CREATE INDEX IF NOT EXISTS jobs_user ON job (hpc_user); -- User Filter Sorting -CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); -CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (user, duration); -CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (user, num_nodes); -CREATE INDEX IF NOT EXISTS jobs_user_numhwthreads ON job (user, num_hwthreads); -CREATE INDEX IF NOT EXISTS jobs_user_numacc ON job (user, num_acc); -CREATE INDEX IF NOT EXISTS jobs_user_energy ON job (user, energy); +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (hpc_user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user_duration ON job (hpc_user, duration); +CREATE INDEX IF NOT EXISTS jobs_user_numnodes ON job (hpc_user, num_nodes); +CREATE INDEX IF NOT EXISTS jobs_user_numhwthreads ON job (hpc_user, num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_user_numacc ON job (hpc_user, num_acc); +CREATE INDEX IF NOT EXISTS jobs_user_energy ON job (hpc_user, energy); -- Project Filter CREATE INDEX IF NOT EXISTS jobs_project ON job (project); -CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, hpc_user); -- Project Filter Sorting CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); CREATE INDEX IF NOT EXISTS jobs_project_duration ON job (project, duration); @@ -103,7 +108,7 @@ CREATE INDEX IF NOT EXISTS jobs_project_energy ON job (project, energy); -- JobState Filter CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); -CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, hpc_user); CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); -- JobState Filter Sorting diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ba7a8aa..ffc0e55 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -22,7 +22,7 @@ import ( // GraphQL validation should make sure that no unkown values can be specified. var groupBy2column = map[model.Aggregate]string{ - model.AggregateUser: "job.user", + model.AggregateUser: "job.hpc_user", model.AggregateProject: "job.project", model.AggregateCluster: "job.cluster", } @@ -86,7 +86,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user").GroupBy(col) + ).From("job").Join("hpc_user ON hpc_user.username = job.hpc_user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", @@ -109,7 +109,7 @@ func (r *JobRepository) buildStatsQuery( // func (r *JobRepository) getUserName(ctx context.Context, id string) string { // user := GetUserFromContext(ctx) -// name, _ := r.FindColumnValue(user, id, "user", "name", "username", false) +// name, _ := r.FindColumnValue(user, id, "hpc_user", "name", "username", false) // if name != "" { // return name // } else { @@ -210,7 +210,7 @@ func (r *JobRepository) JobsStatsGrouped( totalAccHours = int(accHours.Int64) } - if col == "job.user" { + if col == "job.hpc_user" { // name := r.getUserName(ctx, id.String) stats = append(stats, &model.JobsStatistics{ diff --git a/internal/repository/user.go b/internal/repository/user.go index a851b6b..9b7e94e 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -46,8 +46,8 @@ func GetUserRepository() *UserRepository { func (r *UserRepository) GetUser(username string) (*schema.User, error) { user := &schema.User{Username: username} var hashedPassword, name, rawRoles, email, rawProjects sql.NullString - if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("user"). - Where("user.username = ?", username).RunWith(r.DB). + if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user"). + Where("hpc_user.username = ?", username).RunWith(r.DB). QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil { log.Warnf("Error while querying user '%v' from database", username) return nil, err @@ -73,7 +73,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) { func (r *UserRepository) GetLdapUsernames() ([]string, error) { var users []string - rows, err := r.DB.Query(`SELECT username FROM user WHERE user.ldap = 1`) + rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE user.ldap = 1`) if err != nil { log.Warn("Error while querying usernames") return nil, err @@ -121,7 +121,7 @@ func (r *UserRepository) AddUser(user *schema.User) error { vals = append(vals, int(user.AuthSource)) } - if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil { log.Errorf("Error while inserting new user '%v' into DB", user.Username) return err } @@ -134,7 +134,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro // user contains updated info, apply to dbuser // TODO: Discuss updatable fields if dbUser.Name != user.Name { - if _, err := sq.Update("user").Set("name", user.Name).Where("user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { log.Errorf("error while updating name of user '%s'", user.Username) return err } @@ -143,7 +143,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro // Toggled until greenlit // if dbUser.HasRole(schema.RoleManager) && !reflect.DeepEqual(dbUser.Projects, user.Projects) { // projects, _ := json.Marshal(user.Projects) - // if _, err := sq.Update("user").Set("projects", projects).Where("user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { + // if _, err := sq.Update("hpc_user").Set("projects", projects).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil { // return err // } // } @@ -152,7 +152,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro } func (r *UserRepository) DelUser(username string) error { - _, err := r.DB.Exec(`DELETE FROM user WHERE user.username = ?`, username) + _, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username) if err != nil { log.Errorf("Error while deleting user '%s' from DB", username) return err @@ -162,7 +162,7 @@ func (r *UserRepository) DelUser(username string) error { } func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) { - q := sq.Select("username", "name", "email", "roles", "projects").From("user") + q := sq.Select("username", "name", "email", "roles", "projects").From("hpc_user") if specialsOnly { q = q.Where("(roles != '[\"user\"]' AND roles != '[]')") } @@ -223,7 +223,7 @@ func (r *UserRepository) AddRole( } roles, _ := json.Marshal(append(user.Roles, newRole)) - if _, err := sq.Update("user").Set("roles", roles).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { log.Errorf("error while adding new role for user '%s'", user.Username) return err } @@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr } mroles, _ := json.Marshal(newroles) - if _, err := sq.Update("user").Set("roles", mroles).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { log.Errorf("Error while removing role for user '%s'", user.Username) return err } @@ -285,7 +285,7 @@ func (r *UserRepository) AddProject( } projects, _ := json.Marshal(append(user.Projects, project)) - if _, err := sq.Update("user").Set("projects", projects).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("projects", projects).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { return err } @@ -323,7 +323,7 @@ func (r *UserRepository) RemoveProject(ctx context.Context, username string, pro } else { result, _ = json.Marshal(newprojects) } - if _, err := sq.Update("user").Set("projects", result).Where("user.username = ?", username).RunWith(r.DB).Exec(); err != nil { + if _, err := sq.Update("hpc_user").Set("projects", result).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil { return err } return nil @@ -355,7 +355,7 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (* user := &model.User{Username: username} var name, email sql.NullString - if err := sq.Select("name", "email").From("user").Where("user.username = ?", username). + if err := sq.Select("name", "email").From("hpc_user").Where("hpc_user.username = ?", username). RunWith(r.DB).QueryRow().Scan(&name, &email); err != nil { if err == sql.ErrNoRows { /* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */ diff --git a/pkg/schema/job.go b/pkg/schema/job.go index f5bcc62..5e3110b 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -18,9 +18,9 @@ import ( type BaseJob struct { Cluster string `json:"cluster" db:"cluster" example:"fritz"` SubCluster string `json:"subCluster" db:"subcluster" example:"main"` - Partition string `json:"partition,omitempty" db:"partition" example:"main"` + Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` - User string `json:"user" db:"user" example:"abcd100h"` + User string `json:"user" db:"hpc_user" example:"abcd100h"` State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` From 311c088d3dda748556a6660519ef6a5d0f9a7cab Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 21 Nov 2024 15:47:09 +0100 Subject: [PATCH 241/443] removes debug logging --- internal/repository/stats.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index f5677ad..ebfb1fb 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -226,8 +226,6 @@ func (r *JobRepository) JobsStatsGrouped( TotalAccHours: totalAccHours, }) } else { - log.Debugf(">>>> STATS ID %s", id.String) - log.Debugf(">>>> STATS TOTALNODES %d", totalNodes) stats = append(stats, &model.JobsStatistics{ ID: id.String, From 17906ec0eb00be5c7b124cb0c3e83fadc5349438 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 21 Nov 2024 15:54:46 +0100 Subject: [PATCH 242/443] Add down migrations for documentation --- .../mysql/08_add-footprint.down.sql | 62 ++++++++++++++ .../sqlite3/08_add-footprint.down.sql | 82 +++++++++++++++++++ 2 files changed, 144 insertions(+) diff --git a/internal/repository/migrations/mysql/08_add-footprint.down.sql b/internal/repository/migrations/mysql/08_add-footprint.down.sql index 8c99eb5..57f2145 100644 --- a/internal/repository/migrations/mysql/08_add-footprint.down.sql +++ b/internal/repository/migrations/mysql/08_add-footprint.down.sql @@ -19,3 +19,65 @@ UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); ALTER TABLE job DROP footprint; +-- Do not use reserved keywords anymore +RENAME TABLE hpc_user TO `user`; +ALTER TABLE job RENAME COLUMN hpc_user TO `user`; +ALTER TABLE job RENAME COLUMN cluster_partition TO `partition`; + +DROP INDEX IF EXISTS jobs_cluster; +DROP INDEX IF EXISTS jobs_cluster_user; +DROP INDEX IF EXISTS jobs_cluster_project; +DROP INDEX IF EXISTS jobs_cluster_subcluster; +DROP INDEX IF EXISTS jobs_cluster_starttime; +DROP INDEX IF EXISTS jobs_cluster_duration; +DROP INDEX IF EXISTS jobs_cluster_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_partition; +DROP INDEX IF EXISTS jobs_cluster_partition_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_cluster_jobstate; +DROP INDEX IF EXISTS jobs_cluster_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_jobstate_project; + +DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_user; +DROP INDEX IF EXISTS jobs_user_starttime; +DROP INDEX IF EXISTS jobs_user_duration; +DROP INDEX IF EXISTS jobs_user_numnodes; + +DROP INDEX IF EXISTS jobs_project; +DROP INDEX IF EXISTS jobs_project_user; +DROP INDEX IF EXISTS jobs_project_starttime; +DROP INDEX IF EXISTS jobs_project_duration; +DROP INDEX IF EXISTS jobs_project_numnodes; + +DROP INDEX IF EXISTS jobs_jobstate; +DROP INDEX IF EXISTS jobs_jobstate_user; +DROP INDEX IF EXISTS jobs_jobstate_project; +DROP INDEX IF EXISTS jobs_jobstate_starttime; +DROP INDEX IF EXISTS jobs_jobstate_duration; +DROP INDEX IF EXISTS jobs_jobstate_numnodes; + +DROP INDEX IF EXISTS jobs_arrayjobid_starttime; +DROP INDEX IF EXISTS jobs_cluster_arrayjobid_starttime; + +DROP INDEX IF EXISTS jobs_starttime; +DROP INDEX IF EXISTS jobs_duration; +DROP INDEX IF EXISTS jobs_numnodes; + +DROP INDEX IF EXISTS jobs_duration_starttime; +DROP INDEX IF EXISTS jobs_numnodes_starttime; +DROP INDEX IF EXISTS jobs_numacc_starttime; +DROP INDEX IF EXISTS jobs_energy_starttime; diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql index 8c99eb5..cc2d3e9 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.down.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.down.sql @@ -19,3 +19,85 @@ UPDATE job SET file_bw_avg = json_extract(footprint, '$.file_bw_avg'); UPDATE job SET file_data_vol_total = json_extract(footprint, '$.file_data_vol_total'); ALTER TABLE job DROP footprint; + +DROP INDEX IF EXISTS jobs_cluster; +DROP INDEX IF EXISTS jobs_cluster_user; +DROP INDEX IF EXISTS jobs_cluster_project; +DROP INDEX IF EXISTS jobs_cluster_subcluster; +DROP INDEX IF EXISTS jobs_cluster_starttime; +DROP INDEX IF EXISTS jobs_cluster_duration; +DROP INDEX IF EXISTS jobs_cluster_numnodes; +DROP INDEX IF EXISTS jobs_cluster_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_numacc; +DROP INDEX IF EXISTS jobs_cluster_energy; + +DROP INDEX IF EXISTS jobs_cluster_partition; +DROP INDEX IF EXISTS jobs_cluster_partition_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_energy; + +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_project; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_partition_jobstate_energy; + +DROP INDEX IF EXISTS jobs_cluster_jobstate; +DROP INDEX IF EXISTS jobs_cluster_jobstate_user; +DROP INDEX IF EXISTS jobs_cluster_jobstate_project; + +DROP INDEX IF EXISTS jobs_cluster_jobstate_starttime; +DROP INDEX IF EXISTS jobs_cluster_jobstate_duration; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_cluster_jobstate_numacc; +DROP INDEX IF EXISTS jobs_cluster_jobstate_energy; + +DROP INDEX IF EXISTS jobs_user; +DROP INDEX IF EXISTS jobs_user_starttime; +DROP INDEX IF EXISTS jobs_user_duration; +DROP INDEX IF EXISTS jobs_user_numnodes; +DROP INDEX IF EXISTS jobs_user_numhwthreads; +DROP INDEX IF EXISTS jobs_user_numacc; +DROP INDEX IF EXISTS jobs_user_energy; + +DROP INDEX IF EXISTS jobs_project; +DROP INDEX IF EXISTS jobs_project_user; +DROP INDEX IF EXISTS jobs_project_starttime; +DROP INDEX IF EXISTS jobs_project_duration; +DROP INDEX IF EXISTS jobs_project_numnodes; +DROP INDEX IF EXISTS jobs_project_numhwthreads; +DROP INDEX IF EXISTS jobs_project_numacc; +DROP INDEX IF EXISTS jobs_project_energy; + +DROP INDEX IF EXISTS jobs_jobstate; +DROP INDEX IF EXISTS jobs_jobstate_user; +DROP INDEX IF EXISTS jobs_jobstate_project; +DROP INDEX IF EXISTS jobs_jobstate_starttime; +DROP INDEX IF EXISTS jobs_jobstate_duration; +DROP INDEX IF EXISTS jobs_jobstate_numnodes; +DROP INDEX IF EXISTS jobs_jobstate_numhwthreads; +DROP INDEX IF EXISTS jobs_jobstate_numacc; + +DROP INDEX IF EXISTS jobs_arrayjobid_starttime; +DROP INDEX IF EXISTS jobs_cluster_arrayjobid_starttime; + +DROP INDEX IF EXISTS jobs_starttime; +DROP INDEX IF EXISTS jobs_duration; +DROP INDEX IF EXISTS jobs_numnodes; +DROP INDEX IF EXISTS jobs_numhwthreads; +DROP INDEX IF EXISTS jobs_numacc; +DROP INDEX IF EXISTS jobs_energy; + +DROP INDEX IF EXISTS jobs_duration_starttime; +DROP INDEX IF EXISTS jobs_numnodes_starttime; +DROP INDEX IF EXISTS jobs_numhwthreads_starttime; +DROP INDEX IF EXISTS jobs_numacc_starttime; +DROP INDEX IF EXISTS jobs_energy_starttime; From d89574ce7338b52d31822017869dfe020d2519ed Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 12:42:49 +0100 Subject: [PATCH 243/443] Use repo.loadStats, move transaction init --- .../taskManager/updateFootprintService.go | 74 ++++++++++--------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index efca6d1..546d5a7 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -10,7 +10,7 @@ import ( "time" "github.com/ClusterCockpit/cc-backend/internal/config" - "github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher" + "github.com/ClusterCockpit/cc-backend/internal/metricdata" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -37,11 +37,6 @@ func RegisterFootprintWorker() { cl := 0 log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) - t, err := jobRepo.TransactionInit() - if err != nil { - log.Errorf("Failed TransactionInit %v", err) - } - for _, cluster := range archive.Clusters { jobs, err := jobRepo.FindRunningJobs(cluster.Name) if err != nil { @@ -53,16 +48,21 @@ func RegisterFootprintWorker() { allMetrics = append(allMetrics, mc.Name) } - scopes := []schema.MetricScope{schema.MetricScopeNode} - scopes = append(scopes, schema.MetricScopeCore) - scopes = append(scopes, schema.MetricScopeAccelerator) + repo, err := metricdata.GetMetricDataRepo(cluster.Name) + if err != nil { + log.Warnf("no metric data repository configured for '%s'", cluster.Name) + continue + } + + pendingStatements := make([]sq.UpdateBuilder, len(jobs)) for _, job := range jobs { log.Debugf("Try job %d", job.JobID) cl++ - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background(), 0) // 0 Resolution-Value retrieves highest res + + jobStats, err := repo.LoadStats(job, allMetrics, context.Background()) if err != nil { - log.Errorf("Error wile loading job data for footprint update: %v", err) + log.Errorf("Error wile loading job data stats for footprint update: %v", err) ce++ continue } @@ -73,19 +73,19 @@ func RegisterFootprintWorker() { Statistics: make(map[string]schema.JobStatistics), } - for metric, data := range jobData { + for metric, data := range jobStats { // Metric, Hostname:Stats avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32 - nodeData, ok := data["node"] - if !ok { - // This should never happen ? - ce++ - continue - } + // nodeData, ok := data["node"] + // if !ok { + // // This should never happen ? + // ce++ + // continue + // } - for _, series := range nodeData.Series { - avg += series.Statistics.Avg - min = math.Min(min, series.Statistics.Min) - max = math.Max(max, series.Statistics.Max) + for _, hostStats := range data { + avg += hostStats.Avg + min = math.Min(min, hostStats.Min) + max = math.Max(max, hostStats.Max) } // Add values rounded to 2 digits @@ -100,25 +100,34 @@ func RegisterFootprintWorker() { } } - // Init UpdateBuilder + // Build Statement per Job, Add to Pending Array stmt := sq.Update("job") - // Add SET queries stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta) if err != nil { - log.Errorf("Update job (dbid: %d) failed at update Footprint step: %s", job.ID, err.Error()) + log.Errorf("Update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error()) ce++ continue } stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta) if err != nil { - log.Errorf("Update job (dbid: %d) failed at update Energy step: %s", job.ID, err.Error()) + log.Errorf("update job (dbid: %d) statement build failed at energy step: %s", job.ID, err.Error()) ce++ continue } - // Add WHERE Filter stmt = stmt.Where("job.id = ?", job.ID) - query, args, err := stmt.ToSql() + pendingStatements = append(pendingStatements, stmt) + log.Debugf("Finish Job Preparation %d", job.JobID) + } + + t, err := jobRepo.TransactionInit() + if err != nil { + log.Errorf("Failed TransactionInit %v", err) + } + + for _, ps := range pendingStatements { + + query, args, err := ps.ToSql() if err != nil { log.Errorf("Failed in ToSQL conversion: %v", err) ce++ @@ -127,17 +136,12 @@ func RegisterFootprintWorker() { // Args: JSON, JSON, ENERGY, JOBID jobRepo.TransactionAdd(t, query, args...) - // if err := jobRepo.Execute(stmt); err != nil { - // log.Errorf("Update job footprint (dbid: %d) failed at db execute: %s", job.ID, err.Error()) - // continue - // } c++ - log.Debugf("Finish Job %d", job.JobID) } - jobRepo.TransactionCommit(t) + + jobRepo.TransactionEnd(t) log.Debugf("Finish Cluster %s", cluster.Name) } - jobRepo.TransactionEnd(t) log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) })) } From 21b3a67988a3a7be48336b0dd28ca8059761f53d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 13:13:43 +0100 Subject: [PATCH 244/443] add timers, add else case for transaction add --- .../taskManager/updateFootprintService.go | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 546d5a7..60ba988 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -38,6 +38,7 @@ func RegisterFootprintWorker() { log.Printf("Update Footprints started at %s", s.Format(time.RFC3339)) for _, cluster := range archive.Clusters { + s_cluster := time.Now() jobs, err := jobRepo.FindRunningJobs(cluster.Name) if err != nil { continue @@ -60,6 +61,8 @@ func RegisterFootprintWorker() { log.Debugf("Try job %d", job.JobID) cl++ + s_job := time.Now() + jobStats, err := repo.LoadStats(job, allMetrics, context.Background()) if err != nil { log.Errorf("Error wile loading job data stats for footprint update: %v", err) @@ -75,12 +78,6 @@ func RegisterFootprintWorker() { for metric, data := range jobStats { // Metric, Hostname:Stats avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32 - // nodeData, ok := data["node"] - // if !ok { - // // This should never happen ? - // ce++ - // continue - // } for _, hostStats := range data { avg += hostStats.Avg @@ -117,7 +114,7 @@ func RegisterFootprintWorker() { stmt = stmt.Where("job.id = ?", job.ID) pendingStatements = append(pendingStatements, stmt) - log.Debugf("Finish Job Preparation %d", job.JobID) + log.Debugf("Finish Job Preparation %d, took %s", job.JobID, time.Since(s_job)) } t, err := jobRepo.TransactionInit() @@ -129,18 +126,19 @@ func RegisterFootprintWorker() { query, args, err := ps.ToSql() if err != nil { + log.Debugf(">>> Query: %v", query) + log.Debugf(">>> Args: %v", args) log.Errorf("Failed in ToSQL conversion: %v", err) ce++ - continue + } else { + // Args: JSON, JSON, ENERGY, JOBID + jobRepo.TransactionAdd(t, query, args...) + c++ } - - // Args: JSON, JSON, ENERGY, JOBID - jobRepo.TransactionAdd(t, query, args...) - c++ } jobRepo.TransactionEnd(t) - log.Debugf("Finish Cluster %s", cluster.Name) + log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster)) } log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) })) From 69f8a34aaccffda7675386e7846a047d8b52540d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 13:36:26 +0100 Subject: [PATCH 245/443] more logging --- internal/taskManager/updateFootprintService.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 60ba988..fba0ec7 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -65,7 +65,7 @@ func RegisterFootprintWorker() { jobStats, err := repo.LoadStats(job, allMetrics, context.Background()) if err != nil { - log.Errorf("Error wile loading job data stats for footprint update: %v", err) + log.Errorf("error wile loading job data stats for footprint update: %v", err) ce++ continue } @@ -101,7 +101,7 @@ func RegisterFootprintWorker() { stmt := sq.Update("job") stmt, err = jobRepo.UpdateFootprint(stmt, jobMeta) if err != nil { - log.Errorf("Update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error()) + log.Errorf("update job (dbid: %d) statement build failed at footprint step: %s", job.ID, err.Error()) ce++ continue } @@ -114,24 +114,24 @@ func RegisterFootprintWorker() { stmt = stmt.Where("job.id = ?", job.ID) pendingStatements = append(pendingStatements, stmt) - log.Debugf("Finish Job Preparation %d, took %s", job.JobID, time.Since(s_job)) + log.Debugf("Job %d took %s", job.JobID, time.Since(s_job)) } + log.Debugf("Finish preparation for %d jobs: %d statements", len(jobs), len(pendingStatements)) t, err := jobRepo.TransactionInit() if err != nil { - log.Errorf("Failed TransactionInit %v", err) + log.Errorf("failed TransactionInit %v", err) } - for _, ps := range pendingStatements { + for idx, ps := range pendingStatements { query, args, err := ps.ToSql() if err != nil { - log.Debugf(">>> Query: %v", query) - log.Debugf(">>> Args: %v", args) - log.Errorf("Failed in ToSQL conversion: %v", err) + log.Errorf("failed in ToSQL conversion: %v", err) ce++ } else { // Args: JSON, JSON, ENERGY, JOBID + log.Infof("add transaction on index %d", idx) jobRepo.TransactionAdd(t, query, args...) c++ } From baa7367ebec344b19a76eaf5d831f50389109c73 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 13:39:59 +0100 Subject: [PATCH 246/443] change array init to empty array --- internal/taskManager/updateFootprintService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index fba0ec7..24c8a3e 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -55,7 +55,7 @@ func RegisterFootprintWorker() { continue } - pendingStatements := make([]sq.UpdateBuilder, len(jobs)) + pendingStatements := []sq.UpdateBuilder{} for _, job := range jobs { log.Debugf("Try job %d", job.JobID) From a8eff6fbd196a0452e4cde6762af39d2d03e2edd Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 15:08:53 +0100 Subject: [PATCH 247/443] small logging changes --- internal/taskManager/updateFootprintService.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 24c8a3e..b884ed6 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -51,14 +51,14 @@ func RegisterFootprintWorker() { repo, err := metricdata.GetMetricDataRepo(cluster.Name) if err != nil { - log.Warnf("no metric data repository configured for '%s'", cluster.Name) + log.Errorf("no metric data repository configured for '%s'", cluster.Name) continue } pendingStatements := []sq.UpdateBuilder{} for _, job := range jobs { - log.Debugf("Try job %d", job.JobID) + log.Debugf("Prepare job %d", job.JobID) cl++ s_job := time.Now() @@ -116,7 +116,6 @@ func RegisterFootprintWorker() { pendingStatements = append(pendingStatements, stmt) log.Debugf("Job %d took %s", job.JobID, time.Since(s_job)) } - log.Debugf("Finish preparation for %d jobs: %d statements", len(jobs), len(pendingStatements)) t, err := jobRepo.TransactionInit() if err != nil { @@ -131,7 +130,7 @@ func RegisterFootprintWorker() { ce++ } else { // Args: JSON, JSON, ENERGY, JOBID - log.Infof("add transaction on index %d", idx) + log.Debugf("add transaction on index %d", idx) jobRepo.TransactionAdd(t, query, args...) c++ } From 5f4a74f8bad56576f292b1bd343ba31ac6194010 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 15:57:28 +0100 Subject: [PATCH 248/443] add check on returned stats --- internal/taskManager/updateFootprintService.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index b884ed6..d542371 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -79,10 +79,16 @@ func RegisterFootprintWorker() { for metric, data := range jobStats { // Metric, Hostname:Stats avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32 - for _, hostStats := range data { - avg += hostStats.Avg - min = math.Min(min, hostStats.Min) - max = math.Max(max, hostStats.Max) + for hostname := range data { + hostStats, ok := data[hostname] + if !ok { + log.Debugf("footprintWorker: NAN stats returned for job %d @ %s", job.JobID, hostname) + } else { + log.Debugf("stats returned for job %d : %#v", job.JobID, hostStats) + avg += hostStats.Avg + min = math.Min(min, hostStats.Min) + max = math.Max(max, hostStats.Max) + } } // Add values rounded to 2 digits From 00ddc462d20a5731e5de256aeff3d0e63143c0b5 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 16:31:35 +0100 Subject: [PATCH 249/443] expand check, change to zero init --- .../taskManager/updateFootprintService.go | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index d542371..d131dc5 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -76,19 +76,21 @@ func RegisterFootprintWorker() { Statistics: make(map[string]schema.JobStatistics), } - for metric, data := range jobStats { // Metric, Hostname:Stats - avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32 + for metric := range jobStats { // Metric, Hostname:Stats + avg, min, max := 0.0, 0.0, 0.0 // math.MaxFloat32, -math.MaxFloat32 - for hostname := range data { - hostStats, ok := data[hostname] - if !ok { - log.Debugf("footprintWorker: NAN stats returned for job %d @ %s", job.JobID, hostname) - } else { - log.Debugf("stats returned for job %d : %#v", job.JobID, hostStats) - avg += hostStats.Avg - min = math.Min(min, hostStats.Min) - max = math.Max(max, hostStats.Max) + data, ok := jobStats[metric] + if ok { + for hostname := range data { + hostStats, ok := data[hostname] + if ok { + avg += hostStats.Avg + min = math.Min(min, hostStats.Min) + max = math.Max(max, hostStats.Max) + } } + } else { + log.Debugf("no stats data return for job %d, metric %s", job.JobID, metric) } // Add values rounded to 2 digits From 93d5a0e532c1a5399e132a7fd66c5ded6533c11c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 16:59:18 +0100 Subject: [PATCH 250/443] correct input for check --- internal/taskManager/updateFootprintService.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index d131dc5..2a1e590 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -76,17 +76,18 @@ func RegisterFootprintWorker() { Statistics: make(map[string]schema.JobStatistics), } - for metric := range jobStats { // Metric, Hostname:Stats + for _, metric := range allMetrics { avg, min, max := 0.0, 0.0, 0.0 // math.MaxFloat32, -math.MaxFloat32 - - data, ok := jobStats[metric] + data, ok := jobStats[metric] // Metric:[Hostname:Stats] if ok { - for hostname := range data { - hostStats, ok := data[hostname] + for _, res := range job.Resources { + hostStats, ok := data[res.Hostname] if ok { avg += hostStats.Avg min = math.Min(min, hostStats.Min) max = math.Max(max, hostStats.Max) + } else { + log.Debugf("no stats data return for host %s in job %d, metric %s", res.Hostname, job.JobID, metric) } } } else { From d4f487d5546c3a0cac4d62fe10785cf91f134f36 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 22 Nov 2024 17:56:55 +0100 Subject: [PATCH 251/443] comment debug logging --- internal/taskManager/updateFootprintService.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2a1e590..580e338 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -77,8 +77,8 @@ func RegisterFootprintWorker() { } for _, metric := range allMetrics { - avg, min, max := 0.0, 0.0, 0.0 // math.MaxFloat32, -math.MaxFloat32 - data, ok := jobStats[metric] // Metric:[Hostname:Stats] + avg, min, max := 0.0, 0.0, 0.0 + data, ok := jobStats[metric] // Metric:[Hostname:Stats] if ok { for _, res := range job.Resources { hostStats, ok := data[res.Hostname] @@ -86,13 +86,15 @@ func RegisterFootprintWorker() { avg += hostStats.Avg min = math.Min(min, hostStats.Min) max = math.Max(max, hostStats.Max) - } else { - log.Debugf("no stats data return for host %s in job %d, metric %s", res.Hostname, job.JobID, metric) } + // else { + // log.Debugf("no stats data return for host %s in job %d, metric %s", res.Hostname, job.JobID, metric) + // } } - } else { - log.Debugf("no stats data return for job %d, metric %s", job.JobID, metric) } + // else { + // log.Debugf("no stats data return for job %d, metric %s", job.JobID, metric) + // } // Add values rounded to 2 digits jobMeta.Statistics[metric] = schema.JobStatistics{ From a11f165f2adb30ba788990d6b5784871be813090 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:09:31 +0100 Subject: [PATCH 252/443] Cleanup --- internal/repository/testdata/job.db-shm | Bin 32768 -> 0 bytes internal/repository/testdata/job.db-wal | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 internal/repository/testdata/job.db-shm delete mode 100644 internal/repository/testdata/job.db-wal diff --git a/internal/repository/testdata/job.db-shm b/internal/repository/testdata/job.db-shm deleted file mode 100644 index fe9ac2845eca6fe6da8a63cd096d9cf9e24ece10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32768 zcmeIuAr62r3 Date: Sun, 24 Nov 2024 07:41:39 +0100 Subject: [PATCH 253/443] Update test sqlite db --- internal/repository/testdata/job.db | Bin 118784 -> 118784 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db index 23eba6fb5066702a5bf6c48539e75e69da8cfd53..43ec9d3c7f36c7ea505a96cc1208c4ce7a148eed 100644 GIT binary patch delta 1028 zcmaJy~Gdc6_nYnZCxhHpY0gW!8 zNQTux5QJ+RJU+{=A1ZP?i2T7zCd_y`N(eK8#|)-dd@)baQR6%A4L6%~EvZqpTCP&u zbiEZhY&Mv&%12l?H|^2s&7!|_s!ykKy-rW*^hTdfo!iuj={LHyqgj#?u^L=z+H{b~+&$Sh61!E1YzgYtJhLrn<(HsqTa5=F z;=va^c!)p`G-OFcBK#Ec@N(E-b7wg!c_;goea23(h0H2*nGxydv_v&fZqf>yc;IEw z=9Zv`R4b_qq|CBj?QRKtViRs@*sb*wi1cHJNI(9eh~&b<;lXf!xR-V+cNW3U;z`-m z8RdxrUhU}^+(vs^LnW-ywx(c;@%+!0*rJ005eB4j5%_@;eHp!P%wf6UXs))Qz&sLn zFSSzyYwt(O_##|M6k`7!K#2|D(KZ%g4wdXtOc*72ZjKbzgi`Zo^Q5`h^x8CLvhyj% zH7?4{Y2rO-5NNFXtbCstzO@1<&xjd^k%Ezl5&IewWyTONEEradWDFbD_(J#tU(gkO delta 1852 zcmcgtTTCNW7@mJ-I)_fT9Y_mZ0&ItFxOdC69Vk;Zz;2rCMOj?|BgX9xP*^sl1(w?? z$TdE!Y(jUUqYtq7WMZOGCuSmUA}^bm_#oK?6HIh5`{3(tVxkch&kQ#UM)AeJ>CE46 z&N=^ozWKh>$t5(ogysu!@(6+`+WC_BUYYiVq&&iZZPkK*0k?^-L?O57*n%p@efyX8 zTeeNxedZLySU#|HSzNS6%QQuONS!7&h>djmEYn!XxLc~#M*%plEVB+zGCmRubfl79 zJ>5eC@?cLY8N<+Vylj@Swg!$8V5d?vayy+c6IH)nhWulLayMVj@;G`rl})RVm^|FQ|A1!k4E47N_?(H9+`0d7Z>suB1nw$NSWET%C*gA-C9AT? z*4fx;l!e@7Wia!M@&oJfSkGLFHa183=El=cNBImEF+Wy?x4FTpu7tpyZylBsc&|8y z<$;c?@<4g8rl!J&2fb`72CMR#&8vQQ7iyU*#JKD8)zw-ka2SjPuBF=XJ*&i`E-gce zQp&9v@7|U3NjA+>3^$@7;g5@mS|hCAQfpdHwn9bbipXMu%5~C3xlSM{k5WG8BKZS| zjr1HqNb-cTG@FOan5xh|n@w5Lx63A8^=1(1jR=0B*5@^7Zyzz(^F~7cKyVw`eG%p; zpSPoLD7H(o360O4gcF&CrwhT1?Yd$1%2RS8J}Qes$HR5Cw@)2E4|Z+zX2NKu7!M(|q8m2T2`95QOP94pdE_M8EIL~0uUO0kN@vwcT()L?) z8`eN8tv{}XhYXDmhN!Z{*4kKnT(JM2bhG~|T`bqKbo!)TS`QBz5R4Jl?_uoYbOfaN zS7LMCOW~b8?z%HjCt}Ce9C72@B^L;^fR!|g)amzNLYoxE5Cdm% zGgLZVe)vcT2tvqTC+*bOFC6fTQr&+jY-blqZk?OtB*%}A*Bzq$8~c0SvO(#XoU>0!M=at^fc4 From c523e93564dd2e31ddf8c2bff93fdecdb11b6c71 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:48:30 +0100 Subject: [PATCH 254/443] Update to new db schema --- internal/repository/tags.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 48ea9ec..6239495 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -17,7 +17,6 @@ import ( // Add the tag with id `tagId` to the job with the database id `jobId`. func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) if err != nil { log.Warn("Error while finding job by id") @@ -49,7 +48,6 @@ func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*sc // Removes a tag from a job func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) if err != nil { log.Warn("Error while finding job by id") @@ -81,7 +79,6 @@ func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schem // CreateTag creates a new tag with the specified type and name and returns its database id. func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope string) (tagId int64, err error) { - // Default to "Global" scope if none defined if tagScope == "" { tagScope = "global" @@ -147,9 +144,9 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count // Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case } else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs // Build ("project1", "project2", ...) list of variable length directly in SQL string - q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ? OR job.project IN (\""+strings.Join(user.Projects, "\",\"")+"\"))", user.Username) + q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.hpc_user = ? OR job.project IN (\""+strings.Join(user.Projects, "\",\"")+"\"))", user.Username) } else if user != nil { // USER OR NO ROLE (Compatibility): Only count own jobs - q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ?)", user.Username) + q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.hpc_user = ?)", user.Username) } rows, err := q.RunWith(r.stmtCache).Query() @@ -176,7 +173,6 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count // AddTagOrCreate adds the tag with the specified type and name to the job with the database id `jobId`. // If such a tag does not yet exist, it is created. func (r *JobRepository) AddTagOrCreate(ctx context.Context, jobId int64, tagType string, tagName string, tagScope string) (tagId int64, err error) { - // Default to "Global" scope if none defined if tagScope == "" { tagScope = "global" From 0d923cc9206ecd8a363a8eef807d6564aae84f5e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Sun, 24 Nov 2024 07:49:26 +0100 Subject: [PATCH 255/443] Ignore generated test artefacts --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 2f7c206..e23a17b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ var/job.db-wal dist/ *.db +internal/repository/testdata/job.db-shm +internal/repository/testdata/job.db-wal From 81b8d578f2ead1df8d55fa1c4a19f4544a1883ef Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 25 Nov 2024 16:44:50 +0100 Subject: [PATCH 256/443] feat: Add buffered channel with worker thread for job start API Fixes #293 Refactoring on the way --- cmd/cc-backend/server.go | 4 ++ internal/api/rest.go | 72 +++------------------------ internal/graph/schema.resolvers.go | 22 ++++---- internal/repository/dbConnection.go | 2 + internal/repository/jobFind.go | 17 +++++++ internal/repository/jobQuery.go | 9 +++- internal/repository/jobStartWorker.go | 70 ++++++++++++++++++++++++++ internal/repository/job_test.go | 2 +- internal/repository/stats.go | 10 ++-- internal/repository/tags.go | 32 ++++++------ internal/routerConfig/routes.go | 2 +- pkg/schema/cluster.go | 6 +-- pkg/schema/config.go | 6 +-- pkg/schema/metrics.go | 14 +++--- pkg/schema/user.go | 4 +- 15 files changed, 156 insertions(+), 116 deletions(-) create mode 100644 internal/repository/jobStartWorker.go diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 3c6fa55..fc620c8 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -25,6 +25,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" + "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" @@ -316,6 +317,9 @@ func serverShutdown() { // First shut down the server gracefully (waiting for all ongoing requests) server.Shutdown(context.Background()) + // Then, wait for any async jobStarts still pending... + repository.WaitForJobStart() + // Then, wait for any async archivings still pending... archiver.WaitForArchiving() } diff --git a/internal/api/rest.go b/internal/api/rest.go index 369faf4..b60521b 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -72,7 +72,6 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut) - r.HandleFunc("/jobs/stop_job/{id}", api.stopJobById).Methods(http.MethodPost, http.MethodPut) // r.HandleFunc("/jobs/import/", api.importJob).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) @@ -421,7 +420,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { StartTime: job.StartTime.Unix(), } - res.Tags, err = api.JobRepository.GetTags(r.Context(), &job.ID) + res.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) if err != nil { handleError(err, http.StatusInternalServerError, rw) return @@ -494,7 +493,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) return } - job.Tags, err = api.JobRepository.GetTags(r.Context(), &job.ID) + job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) if err != nil { handleError(err, http.StatusInternalServerError, rw) return @@ -587,7 +586,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { return } - job.Tags, err = api.JobRepository.GetTags(r.Context(), &job.ID) + job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) if err != nil { handleError(err, http.StatusInternalServerError, rw) return @@ -728,7 +727,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { return } - job.Tags, err = api.JobRepository.GetTags(r.Context(), &job.ID) + job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -741,7 +740,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { } for _, tag := range req { - tagId, err := api.JobRepository.AddTagOrCreate(r.Context(), job.ID, tag.Type, tag.Name, tag.Scope) + tagId, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), job.ID, tag.Type, tag.Name, tag.Scope) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -791,11 +790,6 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - // aquire lock to avoid race condition between API calls - var unlockOnce sync.Once - api.RepositoryMutex.Lock() - defer unlockOnce.Do(api.RepositoryMutex.Unlock) - // Check if combination of (job_id, cluster_id, start_time) already exists: jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil) if err != nil && err != sql.ErrNoRows { @@ -810,16 +804,16 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { } } + repository.TriggerJobStart(repository.JobWithUser{Job: &req, User: repository.GetUserFromContext(r.Context())}) + id, err := api.JobRepository.Start(&req) if err != nil { handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw) return } - // unlock here, adding Tags can be async - unlockOnce.Do(api.RepositoryMutex.Unlock) for _, tag := range req.Tags { - if _, err := api.JobRepository.AddTagOrCreate(r.Context(), id, tag.Type, tag.Name, tag.Scope); err != nil { + if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) return @@ -834,56 +828,6 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { }) } -// stopJobById godoc -// @summary Marks job as completed and triggers archiving -// @tags Job add and modify -// @description Job to stop is specified by database ID. Only stopTime and final state are required in request body. -// @description Returns full job resource information according to 'JobMeta' scheme. -// @accept json -// @produce json -// @param id path int true "Database ID of Job" -// @param request body api.StopJobApiRequest true "stopTime and final state in request body" -// @success 200 {object} schema.JobMeta "Job resource" -// @failure 400 {object} api.ErrorResponse "Bad Request" -// @failure 401 {object} api.ErrorResponse "Unauthorized" -// @failure 403 {object} api.ErrorResponse "Forbidden" -// @failure 404 {object} api.ErrorResponse "Resource not found" -// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" -// @failure 500 {object} api.ErrorResponse "Internal Server Error" -// @security ApiKeyAuth -// @router /jobs/stop_job/{id} [post] -func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { - // Parse request body: Only StopTime and State - req := StopJobApiRequest{} - if err := decode(r.Body, &req); err != nil { - handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) - return - } - - // Fetch job (that will be stopped) from db - id, ok := mux.Vars(r)["id"] - var job *schema.Job - var err error - if ok { - id, e := strconv.ParseInt(id, 10, 64) - if e != nil { - handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) - return - } - - job, err = api.JobRepository.FindById(r.Context(), id) - } else { - handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) - return - } - if err != nil { - handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) - return - } - - api.checkAndHandleStopJob(rw, job, req) -} - // stopJobByRequest godoc // @summary Marks job as completed and triggers archiving // @tags Job add and modify diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 58d664b..9fd7260 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -31,7 +31,7 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ( // Tags is the resolver for the tags field. func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) { - return r.Repo.GetTags(ctx, &obj.ID) + return r.Repo.GetTags(repository.GetUserFromContext(ctx), &obj.ID) } // ConcurrentJobs is the resolver for the concurrentJobs field. @@ -159,7 +159,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds return nil, err } - if tags, err = r.Repo.AddTag(ctx, jid, tid); err != nil { + if tags, err = r.Repo.AddTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { log.Warn("Error while adding tag") return nil, err } @@ -185,7 +185,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta return nil, err } - if tags, err = r.Repo.RemoveTag(ctx, jid, tid); err != nil { + if tags, err = r.Repo.RemoveTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { log.Warn("Error while removing tag") return nil, err } @@ -211,7 +211,7 @@ func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) // Tags is the resolver for the tags field. func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) { - return r.Repo.GetTags(ctx, nil) + return r.Repo.GetTags(repository.GetUserFromContext(ctx), nil) } // GlobalMetrics is the resolver for the globalMetrics field. @@ -493,9 +493,11 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type clusterResolver struct{ *Resolver } -type jobResolver struct{ *Resolver } -type metricValueResolver struct{ *Resolver } -type mutationResolver struct{ *Resolver } -type queryResolver struct{ *Resolver } -type subClusterResolver struct{ *Resolver } +type ( + clusterResolver struct{ *Resolver } + jobResolver struct{ *Resolver } + metricValueResolver struct{ *Resolver } + mutationResolver struct{ *Resolver } + queryResolver struct{ *Resolver } + subClusterResolver struct{ *Resolver } +) diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go index 418eef9..d062052 100644 --- a/internal/repository/dbConnection.go +++ b/internal/repository/dbConnection.go @@ -82,6 +82,8 @@ func Connect(driver string, db string) { if err != nil { log.Fatal(err) } + + startJobStartWorker() }) } diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go index ff5a936..0354df0 100644 --- a/internal/repository/jobFind.go +++ b/internal/repository/jobFind.go @@ -99,6 +99,23 @@ func (r *JobRepository) FindById(ctx context.Context, jobId int64) (*schema.Job, return scanJob(q.RunWith(r.stmtCache).QueryRow()) } +// FindByIdWithUser executes a SQL query to find a specific batch job. +// The job is queried using the database id. The user is passed directly, +// instead as part of the context. +// It returns a pointer to a schema.Job data structure and an error variable. +// To check if no job was found test err == sql.ErrNoRows +func (r *JobRepository) FindByIdWithUser(user *schema.User, jobId int64) (*schema.Job, error) { + q := sq.Select(jobColumns...). + From("job").Where("job.id = ?", jobId) + + q, qerr := SecurityCheckWithUser(user, q) + if qerr != nil { + return nil, qerr + } + + return scanJob(q.RunWith(r.stmtCache).QueryRow()) +} + // FindByIdDirect executes a SQL query to find a specific batch job. // The job is queried using the database id. // It returns a pointer to a schema.Job data structure and an error variable. diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index c9812a3..0ab2ea2 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -107,8 +107,7 @@ func (r *JobRepository) CountJobs( return count, nil } -func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { - user := GetUserFromContext(ctx) +func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.SelectBuilder, error) { if user == nil { var qnil sq.SelectBuilder return qnil, fmt.Errorf("user context is nil") @@ -134,6 +133,12 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde } } +func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) { + user := GetUserFromContext(ctx) + + return SecurityCheckWithUser(user, query) +} + // Build a sq.SelectBuilder out of a schema.JobFilter. func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { diff --git a/internal/repository/jobStartWorker.go b/internal/repository/jobStartWorker.go new file mode 100644 index 0000000..dbd2247 --- /dev/null +++ b/internal/repository/jobStartWorker.go @@ -0,0 +1,70 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package repository + +import ( + "sync" + + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" +) + +type JobWithUser struct { + Job *schema.JobMeta + User *schema.User +} + +var ( + jobStartPending sync.WaitGroup + jobStartChannel chan JobWithUser +) + +func startJobStartWorker() { + jobStartChannel = make(chan JobWithUser, 128) + + go jobStartWorker() +} + +// Archiving worker thread +func jobStartWorker() { + for { + select { + case req, ok := <-jobStartChannel: + if !ok { + break + } + jobRepo := GetJobRepository() + + id, err := jobRepo.Start(req.Job) + if err != nil { + log.Errorf("insert into database failed: %v", err) + } + + for _, tag := range req.Job.Tags { + if _, err := jobRepo.AddTagOrCreate(req.User, id, tag.Type, tag.Name, tag.Scope); err != nil { + log.Errorf("adding tag to new job %d failed: %v", id, err) + } + } + + jobStartPending.Done() + } + } +} + +// Trigger async archiving +func TriggerJobStart(req JobWithUser) { + if jobStartChannel == nil { + log.Fatal("Cannot start Job without jobStart channel. Did you Start the worker?") + } + + jobStartPending.Add(1) + jobStartChannel <- req +} + +// Wait for background thread to finish pending archiving operations +func WaitForJobStart() { + // close channel and wait for worker to process remaining jobs + jobStartPending.Wait() +} diff --git a/internal/repository/job_test.go b/internal/repository/job_test.go index f7b3783..363bb6c 100644 --- a/internal/repository/job_test.go +++ b/internal/repository/job_test.go @@ -59,7 +59,7 @@ func TestGetTags(t *testing.T) { ctx := context.WithValue(getContext(t), contextUserKey, contextUserValue) // Test Tag has Scope "global" - tags, counts, err := r.CountTags(ctx) + tags, counts, err := r.CountTags(GetUserFromContext(ctx)) if err != nil { t.Fatal(err) } diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ea195b8..484851d 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -560,9 +560,9 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( ) (*model.MetricHistoPoints, error) { // Get specific Peak or largest Peak var metricConfig *schema.MetricConfig - var peak float64 = 0.0 - var unit string = "" - var footprintStat string = "" + var peak float64 + var unit string + var footprintStat string for _, f := range filters { if f.Cluster != nil { @@ -712,8 +712,8 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram( for idx, metric := range metrics { // Get specific Peak or largest Peak var metricConfig *schema.MetricConfig - var peak float64 = 0.0 - var unit string = "" + var peak float64 + var unit string for _, f := range filters { if f.Cluster != nil { diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 6239495..8120364 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -5,7 +5,6 @@ package repository import ( - "context" "fmt" "strings" @@ -16,8 +15,8 @@ import ( ) // Add the tag with id `tagId` to the job with the database id `jobId`. -func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) +func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) { + j, err := r.FindByIdWithUser(user, job) if err != nil { log.Warn("Error while finding job by id") return nil, err @@ -31,7 +30,7 @@ func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*sc return nil, err } - tags, err := r.GetTags(ctx, &job) + tags, err := r.GetTags(user, &job) if err != nil { log.Warn("Error while getting tags for job") return nil, err @@ -47,8 +46,8 @@ func (r *JobRepository) AddTag(ctx context.Context, job int64, tag int64) ([]*sc } // Removes a tag from a job -func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schema.Tag, error) { - j, err := r.FindById(ctx, job) +func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) { + j, err := r.FindByIdWithUser(user, job) if err != nil { log.Warn("Error while finding job by id") return nil, err @@ -62,7 +61,7 @@ func (r *JobRepository) RemoveTag(ctx context.Context, job, tag int64) ([]*schem return nil, err } - tags, err := r.GetTags(ctx, &job) + tags, err := r.GetTags(user, &job) if err != nil { log.Warn("Error while getting tags for job") return nil, err @@ -96,7 +95,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin return res.LastInsertId() } -func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, counts map[string]int, err error) { +func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts map[string]int, err error) { // Fetch all Tags in DB for Display in Frontend Tag-View tags = make([]schema.Tag, 0, 100) xrows, err := r.DB.Queryx("SELECT id, tag_type, tag_name, tag_scope FROM tag") @@ -111,7 +110,7 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count } // Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags - readable, err := r.checkScopeAuth(ctx, "read", t.Scope) + readable, err := r.checkScopeAuth(user, "read", t.Scope) if err != nil { return nil, nil, err } @@ -120,8 +119,6 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count } } - user := GetUserFromContext(ctx) - // Query and Count Jobs with attached Tags q := sq.Select("t.tag_name, t.id, count(jt.tag_id)"). From("tag t"). @@ -172,13 +169,13 @@ func (r *JobRepository) CountTags(ctx context.Context) (tags []schema.Tag, count // AddTagOrCreate adds the tag with the specified type and name to the job with the database id `jobId`. // If such a tag does not yet exist, it is created. -func (r *JobRepository) AddTagOrCreate(ctx context.Context, jobId int64, tagType string, tagName string, tagScope string) (tagId int64, err error) { +func (r *JobRepository) AddTagOrCreate(user *schema.User, jobId int64, tagType string, tagName string, tagScope string) (tagId int64, err error) { // Default to "Global" scope if none defined if tagScope == "" { tagScope = "global" } - writable, err := r.checkScopeAuth(ctx, "write", tagScope) + writable, err := r.checkScopeAuth(user, "write", tagScope) if err != nil { return 0, err } @@ -194,7 +191,7 @@ func (r *JobRepository) AddTagOrCreate(ctx context.Context, jobId int64, tagType } } - if _, err := r.AddTag(ctx, jobId, tagId); err != nil { + if _, err := r.AddTag(user, jobId, tagId); err != nil { return 0, err } @@ -213,7 +210,7 @@ func (r *JobRepository) TagId(tagType string, tagName string, tagScope string) ( } // GetTags returns a list of all scoped tags if job is nil or of the tags that the job with that database ID has. -func (r *JobRepository) GetTags(ctx context.Context, job *int64) ([]*schema.Tag, error) { +func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, error) { q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag") if job != nil { q = q.Join("jobtag ON jobtag.tag_id = tag.id").Where("jobtag.job_id = ?", *job) @@ -234,7 +231,7 @@ func (r *JobRepository) GetTags(ctx context.Context, job *int64) ([]*schema.Tag, return nil, err } // Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags - readable, err := r.checkScopeAuth(ctx, "read", tag.Scope) + readable, err := r.checkScopeAuth(user, "read", tag.Scope) if err != nil { return nil, err } @@ -295,8 +292,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t return nil } -func (r *JobRepository) checkScopeAuth(ctx context.Context, operation string, scope string) (pass bool, err error) { - user := GetUserFromContext(ctx) +func (r *JobRepository) checkScopeAuth(user *schema.User, operation string, scope string) (pass bool, err error) { if user != nil { switch { case operation == "write" && scope == "admin": diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index abb7793..1e2fe73 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -132,7 +132,7 @@ func setupAnalysisRoute(i InfoType, r *http.Request) InfoType { func setupTaglistRoute(i InfoType, r *http.Request) InfoType { jobRepo := repository.GetJobRepository() - tags, counts, err := jobRepo.CountTags(r.Context()) + tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context())) tagMap := make(map[string][]map[string]interface{}) if err != nil { log.Warnf("GetTags failed: %s", err.Error()) diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index b9bf306..0c88c61 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -48,29 +48,29 @@ type SubCluster struct { type SubClusterConfig struct { Name string `json:"name"` Footprint string `json:"footprint,omitempty"` + Energy string `json:"energy"` Peak float64 `json:"peak"` Normal float64 `json:"normal"` Caution float64 `json:"caution"` Alert float64 `json:"alert"` Remove bool `json:"remove"` LowerIsBetter bool `json:"lowerIsBetter"` - Energy string `json:"energy"` } type MetricConfig struct { Unit Unit `json:"unit"` + Energy string `json:"energy"` Name string `json:"name"` Scope MetricScope `json:"scope"` Aggregation string `json:"aggregation"` Footprint string `json:"footprint,omitempty"` SubClusters []*SubClusterConfig `json:"subClusters,omitempty"` Peak float64 `json:"peak"` - Normal float64 `json:"normal"` Caution float64 `json:"caution"` Alert float64 `json:"alert"` Timestep int `json:"timestep"` + Normal float64 `json:"normal"` LowerIsBetter bool `json:"lowerIsBetter"` - Energy string `json:"energy"` } type Cluster struct { diff --git a/pkg/schema/config.go b/pkg/schema/config.go index b87841c..a7abefe 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -57,9 +57,9 @@ type IntRange struct { } type TimeRange struct { - Range string `json:"range,omitempty"` // Optional, e.g. 'last6h' From *time.Time `json:"from"` To *time.Time `json:"to"` + Range string `json:"range,omitempty"` } type FilterRanges struct { @@ -82,10 +82,10 @@ type Retention struct { } type ResampleConfig struct { - // Trigger next zoom level at less than this many visible datapoints - Trigger int `json:"trigger"` // Array of resampling target resolutions, in seconds; Example: [600,300,60] Resolutions []int `json:"resolutions"` + // Trigger next zoom level at less than this many visible datapoints + Trigger int `json:"trigger"` } type CronFrequency struct { diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index 9db853d..bbc3c74 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -17,17 +17,17 @@ import ( type JobData map[string]map[MetricScope]*JobMetric type JobMetric struct { - Unit Unit `json:"unit"` - Timestep int `json:"timestep"` - Series []Series `json:"series"` StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` + Unit Unit `json:"unit"` + Series []Series `json:"series"` + Timestep int `json:"timestep"` } type Series struct { - Hostname string `json:"hostname"` Id *string `json:"id,omitempty"` - Statistics MetricStatistics `json:"statistics"` + Hostname string `json:"hostname"` Data []Float `json:"data"` + Statistics MetricStatistics `json:"statistics"` } type MetricStatistics struct { @@ -37,11 +37,11 @@ type MetricStatistics struct { } type StatsSeries struct { + Percentiles map[int][]Float `json:"percentiles,omitempty"` Mean []Float `json:"mean"` Median []Float `json:"median"` Min []Float `json:"min"` Max []Float `json:"max"` - Percentiles map[int][]Float `json:"percentiles,omitempty"` } type MetricScope string @@ -229,7 +229,7 @@ func (jd *JobData) AddNodeScope(metric string) bool { return false } - var maxScope MetricScope = MetricScopeInvalid + maxScope := MetricScopeInvalid for scope := range scopes { maxScope = maxScope.Max(scope) } diff --git a/pkg/schema/user.go b/pkg/schema/user.go index 7b1ca13..c004254 100644 --- a/pkg/schema/user.go +++ b/pkg/schema/user.go @@ -42,11 +42,11 @@ type User struct { Username string `json:"username"` Password string `json:"-"` Name string `json:"name"` + Email string `json:"email"` Roles []string `json:"roles"` + Projects []string `json:"projects"` AuthType AuthType `json:"authType"` AuthSource AuthSource `json:"authSource"` - Email string `json:"email"` - Projects []string `json:"projects"` } func (u *User) HasProject(project string) bool { From 8ea1454c066b5c69fb251f0a9bbe0afda71432dd Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 25 Nov 2024 17:03:59 +0100 Subject: [PATCH 257/443] improve transaction init error handling --- .../taskManager/updateFootprintService.go | 41 +++++++++---------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 580e338..59c1b12 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -43,6 +43,9 @@ func RegisterFootprintWorker() { if err != nil { continue } + // NOTE: Additional Subcluster Loop Could Allow For Limited List Of (Energy)Footprint-Metrics Only. + // - Chunk-Size Would Then Be 'SubCluster' (Running Jobs, Transactions) as Lists Can Change Within SCs + // - Would Require Review of 'updateFootprint' And 'updateEnergy' Usage allMetrics := make([]string, 0) metricConfigs := archive.GetCluster(cluster.Name).MetricConfig for _, mc := range metricConfigs { @@ -78,7 +81,7 @@ func RegisterFootprintWorker() { for _, metric := range allMetrics { avg, min, max := 0.0, 0.0, 0.0 - data, ok := jobStats[metric] // Metric:[Hostname:Stats] + data, ok := jobStats[metric] // JobStats[Metric1:[Hostname1:[Stats], Hostname2:[Stats], ...], Metric2[...] ...] if ok { for _, res := range job.Resources { hostStats, ok := data[res.Hostname] @@ -87,14 +90,9 @@ func RegisterFootprintWorker() { min = math.Min(min, hostStats.Min) max = math.Max(max, hostStats.Max) } - // else { - // log.Debugf("no stats data return for host %s in job %d, metric %s", res.Hostname, job.JobID, metric) - // } + } } - // else { - // log.Debugf("no stats data return for job %d, metric %s", job.JobID, metric) - // } // Add values rounded to 2 digits jobMeta.Statistics[metric] = schema.JobStatistics{ @@ -131,23 +129,22 @@ func RegisterFootprintWorker() { t, err := jobRepo.TransactionInit() if err != nil { log.Errorf("failed TransactionInit %v", err) - } - - for idx, ps := range pendingStatements { - - query, args, err := ps.ToSql() - if err != nil { - log.Errorf("failed in ToSQL conversion: %v", err) - ce++ - } else { - // Args: JSON, JSON, ENERGY, JOBID - log.Debugf("add transaction on index %d", idx) - jobRepo.TransactionAdd(t, query, args...) - c++ + log.Errorf("skipped %d transactions for cluster %s", len(pendingStatements), cluster.Name) + ce += len(pendingStatements) + } else { + for _, ps := range pendingStatements { + query, args, err := ps.ToSql() + if err != nil { + log.Errorf("failed in ToSQL conversion: %v", err) + ce++ + } else { + // args...: Footprint-JSON, Energyfootprint-JSON, TotalEnergy, JobID + jobRepo.TransactionAdd(t, query, args...) + c++ + } } + jobRepo.TransactionEnd(t) } - - jobRepo.TransactionEnd(t) log.Debugf("Finish Cluster %s, took %s", cluster.Name, time.Since(s_cluster)) } log.Printf("Updating %d (of %d; Skipped %d) Footprints is done and took %s", c, cl, ce, time.Since(s)) From adb11b3ed070a26b28065bb98bc6c825c68f7eae Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 25 Nov 2024 17:35:22 +0100 Subject: [PATCH 258/443] Re-enable Footprint worker --- internal/taskManager/taskManager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/taskManager/taskManager.go b/internal/taskManager/taskManager.go index 4dbe4ad..101fc4a 100644 --- a/internal/taskManager/taskManager.go +++ b/internal/taskManager/taskManager.go @@ -79,7 +79,7 @@ func Start() { RegisterLdapSyncService(lc.SyncInterval) } - // RegisterFootprintWorker() + RegisterFootprintWorker() RegisterUpdateDurationWorker() s.Start() From 28539e60b06d12aab988dad818382850b4e9cd30 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 26 Nov 2024 07:02:53 +0100 Subject: [PATCH 259/443] Regenerate Swagger, fix tests, cleanup --- api/swagger.json | 239 +++++++++----------------- api/swagger.yaml | 194 +++++++-------------- internal/api/api_test.go | 23 ++- internal/api/docs.go | 239 +++++++++----------------- internal/api/rest.go | 20 +-- internal/repository/jobStartWorker.go | 21 ++- 6 files changed, 254 insertions(+), 482 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 7f5eaf7..3b59b5e 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -595,88 +595,6 @@ } } }, - "/jobs/stop_job/{id}": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Job to stop is specified by database ID. Only stopTime and final state are required in request body.\nReturns full job resource information according to 'JobMeta' scheme.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Job add and modify" - ], - "summary": "Marks job as completed and triggers archiving", - "parameters": [ - { - "type": "integer", - "description": "Database ID of Job", - "name": "id", - "in": "path", - "required": true - }, - { - "description": "stopTime and final state in request body", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/api.StopJobApiRequest" - } - } - ], - "responses": { - "200": { - "description": "Job resource", - "schema": { - "$ref": "#/definitions/schema.JobMeta" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "404": { - "description": "Resource not found", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "422": { - "description": "Unprocessable Entity: finding job failed: sql: no rows in result set", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - } - } - } - }, "/jobs/tag_job/{id}": { "post": { "security": [ @@ -684,7 +602,7 @@ "ApiKeyAuth": [] } ], - "description": "Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.\nIf tagged job is already finished: Tag will be written directly to respective archive files.", + "description": "Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.\nTag Scope for frontend visibility will default to \"global\" if none entered, other options: \"admin\" or specific username.\nIf tagged job is already finished: Tag will be written directly to respective archive files.", "consumes": [ "application/json" ], @@ -1277,6 +1195,11 @@ "type": "string", "example": "Testjob" }, + "scope": { + "description": "Tag Scope for Frontend Display", + "type": "string", + "example": "global" + }, "type": { "description": "Tag Type", "type": "string", @@ -1404,9 +1327,8 @@ "api.StartJobApiResponse": { "type": "object", "properties": { - "id": { - "description": "Database ID of new job", - "type": "integer" + "msg": { + "type": "string" } } }, @@ -1418,17 +1340,14 @@ ], "properties": { "cluster": { - "description": "Cluster of job", "type": "string", "example": "fritz" }, "jobId": { - "description": "Cluster Job ID of job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final job state", "allOf": [ { "$ref": "#/definitions/schema.JobState" @@ -1437,12 +1356,10 @@ "example": "completed" }, "startTime": { - "description": "Start Time of job as epoch", "type": "integer", "example": 1649723812 }, "stopTime": { - "description": "Stop Time of job as epoch", "type": "integer", "example": 1649763839 } @@ -1487,12 +1404,10 @@ "type": "object", "properties": { "arrayJobId": { - "description": "The unique identifier of an array job", "type": "integer", "example": 123000 }, "cluster": { - "description": "The unique identifier of a cluster", "type": "string", "example": "fritz" }, @@ -1500,33 +1415,39 @@ "$ref": "#/definitions/schema.JobLinkResultList" }, "duration": { - "description": "Duration of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 43200 }, + "energy": { + "type": "number" + }, + "energyFootprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "exclusive": { - "description": "Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user", "type": "integer", "maximum": 2, "minimum": 0, "example": 1 }, - "flopsAnyAvg": { - "description": "FlopsAnyAvg as Float64", - "type": "number" + "footprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } }, "id": { - "description": "The unique identifier of a job in the database", "type": "integer" }, "jobId": { - "description": "The unique identifier of a job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final state of job", "enum": [ "completed", "failed", @@ -1542,95 +1463,69 @@ ], "example": "completed" }, - "loadAvg": { - "description": "LoadAvg as Float64", - "type": "number" - }, - "memBwAvg": { - "description": "MemBwAvg as Float64", - "type": "number" - }, - "memUsedMax": { - "description": "MemUsedMax as Float64", - "type": "number" - }, "metaData": { - "description": "Additional information about the job", "type": "object", "additionalProperties": { "type": "string" } }, "monitoringStatus": { - "description": "State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull", "type": "integer", "maximum": 3, "minimum": 0, "example": 1 }, "numAcc": { - "description": "Number of accelerators used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "numHwthreads": { - "description": "NumCores int32 `json:\"numCores\" db:\"num_cores\" example:\"20\" minimum:\"1\"` // Number of HWThreads used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 20 }, "numNodes": { - "description": "Number of nodes used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "partition": { - "description": "The Slurm partition to which the job was submitted", "type": "string", "example": "main" }, "project": { - "description": "The unique identifier of a project", "type": "string", "example": "abcd200" }, "resources": { - "description": "Resources used by job", "type": "array", "items": { "$ref": "#/definitions/schema.Resource" } }, "smt": { - "description": "SMT threads used by job", "type": "integer", "example": 4 }, "startTime": { - "description": "Start time as 'time.Time' data type", "type": "string" }, "subCluster": { - "description": "The unique identifier of a sub cluster", "type": "string", "example": "main" }, "tags": { - "description": "List of tags", "type": "array", "items": { "$ref": "#/definitions/schema.Tag" } }, "user": { - "description": "The unique identifier of a user", "type": "string", "example": "abcd100h" }, "walltime": { - "description": "Requested walltime of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 86400 @@ -1667,12 +1562,10 @@ "type": "object", "properties": { "arrayJobId": { - "description": "The unique identifier of an array job", "type": "integer", "example": 123000 }, "cluster": { - "description": "The unique identifier of a cluster", "type": "string", "example": "fritz" }, @@ -1680,29 +1573,39 @@ "$ref": "#/definitions/schema.JobLinkResultList" }, "duration": { - "description": "Duration of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 43200 }, + "energy": { + "type": "number" + }, + "energyFootprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "exclusive": { - "description": "Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user", "type": "integer", "maximum": 2, "minimum": 0, "example": 1 }, + "footprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "id": { - "description": "The unique identifier of a job in the database", "type": "integer" }, "jobId": { - "description": "The unique identifier of a job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final state of job", "enum": [ "completed", "failed", @@ -1719,91 +1622,76 @@ "example": "completed" }, "metaData": { - "description": "Additional information about the job", "type": "object", "additionalProperties": { "type": "string" } }, "monitoringStatus": { - "description": "State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull", "type": "integer", "maximum": 3, "minimum": 0, "example": 1 }, "numAcc": { - "description": "Number of accelerators used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "numHwthreads": { - "description": "NumCores int32 `json:\"numCores\" db:\"num_cores\" example:\"20\" minimum:\"1\"` // Number of HWThreads used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 20 }, "numNodes": { - "description": "Number of nodes used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "partition": { - "description": "The Slurm partition to which the job was submitted", "type": "string", "example": "main" }, "project": { - "description": "The unique identifier of a project", "type": "string", "example": "abcd200" }, "resources": { - "description": "Resources used by job", "type": "array", "items": { "$ref": "#/definitions/schema.Resource" } }, "smt": { - "description": "SMT threads used by job", "type": "integer", "example": 4 }, "startTime": { - "description": "Start epoch time stamp in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 1649723812 }, "statistics": { - "description": "Metric statistics of job", "type": "object", "additionalProperties": { "$ref": "#/definitions/schema.JobStatistics" } }, "subCluster": { - "description": "The unique identifier of a sub cluster", "type": "string", "example": "main" }, "tags": { - "description": "List of tags", "type": "array", "items": { "$ref": "#/definitions/schema.Tag" } }, "user": { - "description": "The unique identifier of a user", "type": "string", "example": "abcd100h" }, "walltime": { - "description": "Requested walltime of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 86400 @@ -1892,6 +1780,15 @@ "caution": { "type": "number" }, + "energy": { + "type": "string" + }, + "footprint": { + "type": "string" + }, + "lowerIsBetter": { + "type": "boolean" + }, "name": { "type": "string" }, @@ -1969,22 +1866,18 @@ "type": "object", "properties": { "accelerators": { - "description": "List of of accelerator device ids", "type": "array", "items": { "type": "string" } }, "configuration": { - "description": "The configuration options of the node", "type": "string" }, "hostname": { - "description": "Name of the host (= node)", "type": "string" }, "hwthreads": { - "description": "List of OS processor ids", "type": "array", "items": { "type": "integer" @@ -2027,6 +1920,12 @@ "type": "number" } }, + "median": { + "type": "array", + "items": { + "type": "number" + } + }, "min": { "type": "array", "items": { @@ -2050,15 +1949,33 @@ "coresPerSocket": { "type": "integer" }, + "energyFootprint": { + "type": "array", + "items": { + "type": "string" + } + }, "flopRateScalar": { "$ref": "#/definitions/schema.MetricValue" }, "flopRateSimd": { "$ref": "#/definitions/schema.MetricValue" }, + "footprint": { + "type": "array", + "items": { + "type": "string" + } + }, "memoryBandwidth": { "$ref": "#/definitions/schema.MetricValue" }, + "metricConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.MetricConfig" + } + }, "name": { "type": "string" }, @@ -2088,6 +2005,15 @@ "caution": { "type": "number" }, + "energy": { + "type": "string" + }, + "footprint": { + "type": "string" + }, + "lowerIsBetter": { + "type": "boolean" + }, "name": { "type": "string" }, @@ -2107,16 +2033,17 @@ "type": "object", "properties": { "id": { - "description": "The unique DB identifier of a tag", "type": "integer" }, "name": { - "description": "Tag Name", "type": "string", "example": "Testjob" }, + "scope": { + "type": "string", + "example": "global" + }, "type": { - "description": "Tag Type", "type": "string", "example": "Debug" } diff --git a/api/swagger.yaml b/api/swagger.yaml index f47ac3f..4e3c47e 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -23,6 +23,10 @@ definitions: description: Tag Name example: Testjob type: string + scope: + description: Tag Scope for Frontend Display + example: global + type: string type: description: Tag Type example: Debug @@ -110,31 +114,25 @@ definitions: type: object api.StartJobApiResponse: properties: - id: - description: Database ID of new job - type: integer + msg: + type: string type: object api.StopJobApiRequest: properties: cluster: - description: Cluster of job example: fritz type: string jobId: - description: Cluster Job ID of job example: 123000 type: integer jobState: allOf: - $ref: '#/definitions/schema.JobState' - description: Final job state example: completed startTime: - description: Start Time of job as epoch example: 1649723812 type: integer stopTime: - description: Stop Time of job as epoch example: 1649763839 type: integer required: @@ -167,42 +165,40 @@ definitions: description: Information of a HPC job. properties: arrayJobId: - description: The unique identifier of an array job example: 123000 type: integer cluster: - description: The unique identifier of a cluster example: fritz type: string concurrentJobs: $ref: '#/definitions/schema.JobLinkResultList' duration: - description: Duration of job in seconds (Min > 0) example: 43200 minimum: 1 type: integer + energy: + type: number + energyFootprint: + additionalProperties: + type: number + type: object exclusive: - description: 'Specifies how nodes are shared: 0 - Shared among multiple jobs - of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple - jobs of same user' example: 1 maximum: 2 minimum: 0 type: integer - flopsAnyAvg: - description: FlopsAnyAvg as Float64 - type: number + footprint: + additionalProperties: + type: number + type: object id: - description: The unique identifier of a job in the database type: integer jobId: - description: The unique identifier of a job example: 123000 type: integer jobState: allOf: - $ref: '#/definitions/schema.JobState' - description: Final state of job enum: - completed - failed @@ -211,79 +207,53 @@ definitions: - timeout - out_of_memory example: completed - loadAvg: - description: LoadAvg as Float64 - type: number - memBwAvg: - description: MemBwAvg as Float64 - type: number - memUsedMax: - description: MemUsedMax as Float64 - type: number metaData: additionalProperties: type: string - description: Additional information about the job type: object monitoringStatus: - description: 'State of monitoring system during job run: 0 - Disabled, 1 - - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull' example: 1 maximum: 3 minimum: 0 type: integer numAcc: - description: Number of accelerators used (Min > 0) example: 2 minimum: 1 type: integer numHwthreads: - description: NumCores int32 `json:"numCores" db:"num_cores" - example:"20" minimum:"1"` // - Number of HWThreads used (Min > 0) example: 20 minimum: 1 type: integer numNodes: - description: Number of nodes used (Min > 0) example: 2 minimum: 1 type: integer partition: - description: The Slurm partition to which the job was submitted example: main type: string project: - description: The unique identifier of a project example: abcd200 type: string resources: - description: Resources used by job items: $ref: '#/definitions/schema.Resource' type: array smt: - description: SMT threads used by job example: 4 type: integer startTime: - description: Start time as 'time.Time' data type type: string subCluster: - description: The unique identifier of a sub cluster example: main type: string tags: - description: List of tags items: $ref: '#/definitions/schema.Tag' type: array user: - description: The unique identifier of a user example: abcd100h type: string walltime: - description: Requested walltime of job in seconds (Min > 0) example: 86400 minimum: 1 type: integer @@ -308,39 +278,40 @@ definitions: description: Meta data information of a HPC job. properties: arrayJobId: - description: The unique identifier of an array job example: 123000 type: integer cluster: - description: The unique identifier of a cluster example: fritz type: string concurrentJobs: $ref: '#/definitions/schema.JobLinkResultList' duration: - description: Duration of job in seconds (Min > 0) example: 43200 minimum: 1 type: integer + energy: + type: number + energyFootprint: + additionalProperties: + type: number + type: object exclusive: - description: 'Specifies how nodes are shared: 0 - Shared among multiple jobs - of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple - jobs of same user' example: 1 maximum: 2 minimum: 0 type: integer + footprint: + additionalProperties: + type: number + type: object id: - description: The unique identifier of a job in the database type: integer jobId: - description: The unique identifier of a job example: 123000 type: integer jobState: allOf: - $ref: '#/definitions/schema.JobState' - description: Final state of job enum: - completed - failed @@ -352,74 +323,56 @@ definitions: metaData: additionalProperties: type: string - description: Additional information about the job type: object monitoringStatus: - description: 'State of monitoring system during job run: 0 - Disabled, 1 - - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull' example: 1 maximum: 3 minimum: 0 type: integer numAcc: - description: Number of accelerators used (Min > 0) example: 2 minimum: 1 type: integer numHwthreads: - description: NumCores int32 `json:"numCores" db:"num_cores" - example:"20" minimum:"1"` // - Number of HWThreads used (Min > 0) example: 20 minimum: 1 type: integer numNodes: - description: Number of nodes used (Min > 0) example: 2 minimum: 1 type: integer partition: - description: The Slurm partition to which the job was submitted example: main type: string project: - description: The unique identifier of a project example: abcd200 type: string resources: - description: Resources used by job items: $ref: '#/definitions/schema.Resource' type: array smt: - description: SMT threads used by job example: 4 type: integer startTime: - description: Start epoch time stamp in seconds (Min > 0) example: 1649723812 minimum: 1 type: integer statistics: additionalProperties: $ref: '#/definitions/schema.JobStatistics' - description: Metric statistics of job type: object subCluster: - description: The unique identifier of a sub cluster example: main type: string tags: - description: List of tags items: $ref: '#/definitions/schema.Tag' type: array user: - description: The unique identifier of a user example: abcd100h type: string walltime: - description: Requested walltime of job in seconds (Min > 0) example: 86400 minimum: 1 type: integer @@ -486,6 +439,12 @@ definitions: type: number caution: type: number + energy: + type: string + footprint: + type: string + lowerIsBetter: + type: boolean name: type: string normal: @@ -541,18 +500,14 @@ definitions: description: A resource used by a job properties: accelerators: - description: List of of accelerator device ids items: type: string type: array configuration: - description: The configuration options of the node type: string hostname: - description: Name of the host (= node) type: string hwthreads: - description: List of OS processor ids items: type: integer type: array @@ -580,6 +535,10 @@ definitions: items: type: number type: array + median: + items: + type: number + type: array min: items: type: number @@ -595,12 +554,24 @@ definitions: properties: coresPerSocket: type: integer + energyFootprint: + items: + type: string + type: array flopRateScalar: $ref: '#/definitions/schema.MetricValue' flopRateSimd: $ref: '#/definitions/schema.MetricValue' + footprint: + items: + type: string + type: array memoryBandwidth: $ref: '#/definitions/schema.MetricValue' + metricConfig: + items: + $ref: '#/definitions/schema.MetricConfig' + type: array name: type: string nodes: @@ -620,6 +591,12 @@ definitions: type: number caution: type: number + energy: + type: string + footprint: + type: string + lowerIsBetter: + type: boolean name: type: string normal: @@ -633,14 +610,14 @@ definitions: description: Defines a tag using name and type. properties: id: - description: The unique DB identifier of a tag type: integer name: - description: Tag Name example: Testjob type: string + scope: + example: global + type: string type: - description: Tag Type example: Debug type: string type: object @@ -1197,68 +1174,13 @@ paths: summary: Marks job as completed and triggers archiving tags: - Job add and modify - /jobs/stop_job/{id}: - post: - consumes: - - application/json - description: |- - Job to stop is specified by database ID. Only stopTime and final state are required in request body. - Returns full job resource information according to 'JobMeta' scheme. - parameters: - - description: Database ID of Job - in: path - name: id - required: true - type: integer - - description: stopTime and final state in request body - in: body - name: request - required: true - schema: - $ref: '#/definitions/api.StopJobApiRequest' - produces: - - application/json - responses: - "200": - description: Job resource - schema: - $ref: '#/definitions/schema.JobMeta' - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.ErrorResponse' - "401": - description: Unauthorized - schema: - $ref: '#/definitions/api.ErrorResponse' - "403": - description: Forbidden - schema: - $ref: '#/definitions/api.ErrorResponse' - "404": - description: Resource not found - schema: - $ref: '#/definitions/api.ErrorResponse' - "422": - description: 'Unprocessable Entity: finding job failed: sql: no rows in - result set' - schema: - $ref: '#/definitions/api.ErrorResponse' - "500": - description: Internal Server Error - schema: - $ref: '#/definitions/api.ErrorResponse' - security: - - ApiKeyAuth: [] - summary: Marks job as completed and triggers archiving - tags: - - Job add and modify /jobs/tag_job/{id}: post: consumes: - application/json description: |- Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely. + Tag Scope for frontend visibility will default to "global" if none entered, other options: "admin" or specific username. If tagged job is already finished: Tag will be written directly to respective archive files. parameters: - description: Job Database ID diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 3d1d7bb..bcabd5f 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -14,9 +14,9 @@ import ( "os" "path/filepath" "reflect" - "strconv" "strings" "testing" + "time" "github.com/ClusterCockpit/cc-backend/internal/api" "github.com/ClusterCockpit/cc-backend/internal/archiver" @@ -200,6 +200,10 @@ func TestRestApi(t *testing.T) { r.StrictSlash(true) restapi.MountApiRoutes(r) + var TestJobId int64 = 123 + var TestClusterName string = "testcluster" + var TestStartTime int64 = 123456789 + const startJobBody string = `{ "jobId": 123, "user": "testuser", @@ -225,7 +229,6 @@ func TestRestApi(t *testing.T) { "startTime": 123456789 }` - var dbid int64 const contextUserKey repository.ContextKey = "user" contextUserValue := &schema.User{ Username: "testuser", @@ -247,13 +250,10 @@ func TestRestApi(t *testing.T) { t.Fatal(response.Status, recorder.Body.String()) } - var res api.StartJobApiResponse - if err := json.Unmarshal(recorder.Body.Bytes(), &res); err != nil { - t.Fatal(err) - } + time.Sleep(1 * time.Second) resolver := graph.GetResolverInstance() - job, err := resolver.Query().Job(ctx, strconv.Itoa(int(res.DBID))) + job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime) if err != nil { t.Fatal(err) } @@ -285,8 +285,6 @@ func TestRestApi(t *testing.T) { if len(job.Tags) != 1 || job.Tags[0].Type != "testTagType" || job.Tags[0].Name != "testTagName" || job.Tags[0].Scope != "testuser" { t.Fatalf("unexpected tags: %#v", job.Tags) } - - dbid = res.DBID }); !ok { return } @@ -314,8 +312,7 @@ func TestRestApi(t *testing.T) { } archiver.WaitForArchiving() - resolver := graph.GetResolverInstance() - job, err := resolver.Query().Job(ctx, strconv.Itoa(int(dbid))) + job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime) if err != nil { t.Fatal(err) } @@ -404,8 +401,10 @@ func TestRestApi(t *testing.T) { t.Fatal("subtest failed") } + time.Sleep(1 * time.Second) + const stopJobBodyFailed string = `{ - "jobId": 12345, + "jobId": 12345, "cluster": "testcluster", "jobState": "failed", diff --git a/internal/api/docs.go b/internal/api/docs.go index e5ec50b..7c1daac 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -601,88 +601,6 @@ const docTemplate = `{ } } }, - "/jobs/stop_job/{id}": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Job to stop is specified by database ID. Only stopTime and final state are required in request body.\nReturns full job resource information according to 'JobMeta' scheme.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Job add and modify" - ], - "summary": "Marks job as completed and triggers archiving", - "parameters": [ - { - "type": "integer", - "description": "Database ID of Job", - "name": "id", - "in": "path", - "required": true - }, - { - "description": "stopTime and final state in request body", - "name": "request", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/api.StopJobApiRequest" - } - } - ], - "responses": { - "200": { - "description": "Job resource", - "schema": { - "$ref": "#/definitions/schema.JobMeta" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "404": { - "description": "Resource not found", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "422": { - "description": "Unprocessable Entity: finding job failed: sql: no rows in result set", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "$ref": "#/definitions/api.ErrorResponse" - } - } - } - } - }, "/jobs/tag_job/{id}": { "post": { "security": [ @@ -690,7 +608,7 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.\nIf tagged job is already finished: Tag will be written directly to respective archive files.", + "description": "Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.\nTag Scope for frontend visibility will default to \"global\" if none entered, other options: \"admin\" or specific username.\nIf tagged job is already finished: Tag will be written directly to respective archive files.", "consumes": [ "application/json" ], @@ -1283,6 +1201,11 @@ const docTemplate = `{ "type": "string", "example": "Testjob" }, + "scope": { + "description": "Tag Scope for Frontend Display", + "type": "string", + "example": "global" + }, "type": { "description": "Tag Type", "type": "string", @@ -1410,9 +1333,8 @@ const docTemplate = `{ "api.StartJobApiResponse": { "type": "object", "properties": { - "id": { - "description": "Database ID of new job", - "type": "integer" + "msg": { + "type": "string" } } }, @@ -1424,17 +1346,14 @@ const docTemplate = `{ ], "properties": { "cluster": { - "description": "Cluster of job", "type": "string", "example": "fritz" }, "jobId": { - "description": "Cluster Job ID of job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final job state", "allOf": [ { "$ref": "#/definitions/schema.JobState" @@ -1443,12 +1362,10 @@ const docTemplate = `{ "example": "completed" }, "startTime": { - "description": "Start Time of job as epoch", "type": "integer", "example": 1649723812 }, "stopTime": { - "description": "Stop Time of job as epoch", "type": "integer", "example": 1649763839 } @@ -1493,12 +1410,10 @@ const docTemplate = `{ "type": "object", "properties": { "arrayJobId": { - "description": "The unique identifier of an array job", "type": "integer", "example": 123000 }, "cluster": { - "description": "The unique identifier of a cluster", "type": "string", "example": "fritz" }, @@ -1506,33 +1421,39 @@ const docTemplate = `{ "$ref": "#/definitions/schema.JobLinkResultList" }, "duration": { - "description": "Duration of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 43200 }, + "energy": { + "type": "number" + }, + "energyFootprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "exclusive": { - "description": "Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user", "type": "integer", "maximum": 2, "minimum": 0, "example": 1 }, - "flopsAnyAvg": { - "description": "FlopsAnyAvg as Float64", - "type": "number" + "footprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } }, "id": { - "description": "The unique identifier of a job in the database", "type": "integer" }, "jobId": { - "description": "The unique identifier of a job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final state of job", "enum": [ "completed", "failed", @@ -1548,95 +1469,69 @@ const docTemplate = `{ ], "example": "completed" }, - "loadAvg": { - "description": "LoadAvg as Float64", - "type": "number" - }, - "memBwAvg": { - "description": "MemBwAvg as Float64", - "type": "number" - }, - "memUsedMax": { - "description": "MemUsedMax as Float64", - "type": "number" - }, "metaData": { - "description": "Additional information about the job", "type": "object", "additionalProperties": { "type": "string" } }, "monitoringStatus": { - "description": "State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull", "type": "integer", "maximum": 3, "minimum": 0, "example": 1 }, "numAcc": { - "description": "Number of accelerators used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "numHwthreads": { - "description": "NumCores int32 ` + "`" + `json:\"numCores\" db:\"num_cores\" example:\"20\" minimum:\"1\"` + "`" + ` // Number of HWThreads used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 20 }, "numNodes": { - "description": "Number of nodes used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "partition": { - "description": "The Slurm partition to which the job was submitted", "type": "string", "example": "main" }, "project": { - "description": "The unique identifier of a project", "type": "string", "example": "abcd200" }, "resources": { - "description": "Resources used by job", "type": "array", "items": { "$ref": "#/definitions/schema.Resource" } }, "smt": { - "description": "SMT threads used by job", "type": "integer", "example": 4 }, "startTime": { - "description": "Start time as 'time.Time' data type", "type": "string" }, "subCluster": { - "description": "The unique identifier of a sub cluster", "type": "string", "example": "main" }, "tags": { - "description": "List of tags", "type": "array", "items": { "$ref": "#/definitions/schema.Tag" } }, "user": { - "description": "The unique identifier of a user", "type": "string", "example": "abcd100h" }, "walltime": { - "description": "Requested walltime of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 86400 @@ -1673,12 +1568,10 @@ const docTemplate = `{ "type": "object", "properties": { "arrayJobId": { - "description": "The unique identifier of an array job", "type": "integer", "example": 123000 }, "cluster": { - "description": "The unique identifier of a cluster", "type": "string", "example": "fritz" }, @@ -1686,29 +1579,39 @@ const docTemplate = `{ "$ref": "#/definitions/schema.JobLinkResultList" }, "duration": { - "description": "Duration of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 43200 }, + "energy": { + "type": "number" + }, + "energyFootprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "exclusive": { - "description": "Specifies how nodes are shared: 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive (Default), 2 - Shared among multiple jobs of same user", "type": "integer", "maximum": 2, "minimum": 0, "example": 1 }, + "footprint": { + "type": "object", + "additionalProperties": { + "type": "number" + } + }, "id": { - "description": "The unique identifier of a job in the database", "type": "integer" }, "jobId": { - "description": "The unique identifier of a job", "type": "integer", "example": 123000 }, "jobState": { - "description": "Final state of job", "enum": [ "completed", "failed", @@ -1725,91 +1628,76 @@ const docTemplate = `{ "example": "completed" }, "metaData": { - "description": "Additional information about the job", "type": "object", "additionalProperties": { "type": "string" } }, "monitoringStatus": { - "description": "State of monitoring system during job run: 0 - Disabled, 1 - Running or Archiving (Default), 2 - Archiving Failed, 3 - Archiving Successfull", "type": "integer", "maximum": 3, "minimum": 0, "example": 1 }, "numAcc": { - "description": "Number of accelerators used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "numHwthreads": { - "description": "NumCores int32 ` + "`" + `json:\"numCores\" db:\"num_cores\" example:\"20\" minimum:\"1\"` + "`" + ` // Number of HWThreads used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 20 }, "numNodes": { - "description": "Number of nodes used (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 2 }, "partition": { - "description": "The Slurm partition to which the job was submitted", "type": "string", "example": "main" }, "project": { - "description": "The unique identifier of a project", "type": "string", "example": "abcd200" }, "resources": { - "description": "Resources used by job", "type": "array", "items": { "$ref": "#/definitions/schema.Resource" } }, "smt": { - "description": "SMT threads used by job", "type": "integer", "example": 4 }, "startTime": { - "description": "Start epoch time stamp in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 1649723812 }, "statistics": { - "description": "Metric statistics of job", "type": "object", "additionalProperties": { "$ref": "#/definitions/schema.JobStatistics" } }, "subCluster": { - "description": "The unique identifier of a sub cluster", "type": "string", "example": "main" }, "tags": { - "description": "List of tags", "type": "array", "items": { "$ref": "#/definitions/schema.Tag" } }, "user": { - "description": "The unique identifier of a user", "type": "string", "example": "abcd100h" }, "walltime": { - "description": "Requested walltime of job in seconds (Min \u003e 0)", "type": "integer", "minimum": 1, "example": 86400 @@ -1898,6 +1786,15 @@ const docTemplate = `{ "caution": { "type": "number" }, + "energy": { + "type": "string" + }, + "footprint": { + "type": "string" + }, + "lowerIsBetter": { + "type": "boolean" + }, "name": { "type": "string" }, @@ -1975,22 +1872,18 @@ const docTemplate = `{ "type": "object", "properties": { "accelerators": { - "description": "List of of accelerator device ids", "type": "array", "items": { "type": "string" } }, "configuration": { - "description": "The configuration options of the node", "type": "string" }, "hostname": { - "description": "Name of the host (= node)", "type": "string" }, "hwthreads": { - "description": "List of OS processor ids", "type": "array", "items": { "type": "integer" @@ -2033,6 +1926,12 @@ const docTemplate = `{ "type": "number" } }, + "median": { + "type": "array", + "items": { + "type": "number" + } + }, "min": { "type": "array", "items": { @@ -2056,15 +1955,33 @@ const docTemplate = `{ "coresPerSocket": { "type": "integer" }, + "energyFootprint": { + "type": "array", + "items": { + "type": "string" + } + }, "flopRateScalar": { "$ref": "#/definitions/schema.MetricValue" }, "flopRateSimd": { "$ref": "#/definitions/schema.MetricValue" }, + "footprint": { + "type": "array", + "items": { + "type": "string" + } + }, "memoryBandwidth": { "$ref": "#/definitions/schema.MetricValue" }, + "metricConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.MetricConfig" + } + }, "name": { "type": "string" }, @@ -2094,6 +2011,15 @@ const docTemplate = `{ "caution": { "type": "number" }, + "energy": { + "type": "string" + }, + "footprint": { + "type": "string" + }, + "lowerIsBetter": { + "type": "boolean" + }, "name": { "type": "string" }, @@ -2113,16 +2039,17 @@ const docTemplate = `{ "type": "object", "properties": { "id": { - "description": "The unique DB identifier of a tag", "type": "integer" }, "name": { - "description": "Tag Name", "type": "string", "example": "Testjob" }, + "scope": { + "type": "string", + "example": "global" + }, "type": { - "description": "Tag Type", "type": "string", "example": "Debug" } diff --git a/internal/api/rest.go b/internal/api/rest.go index b60521b..3842596 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -124,8 +124,7 @@ func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { // StartJobApiResponse model type StartJobApiResponse struct { - // Database ID of new job - DBID int64 `json:"id"` + Message string `json:"msg"` } // DeleteJobApiResponse model @@ -806,25 +805,10 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { repository.TriggerJobStart(repository.JobWithUser{Job: &req, User: repository.GetUserFromContext(r.Context())}) - id, err := api.JobRepository.Start(&req) - if err != nil { - handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw) - return - } - - for _, tag := range req.Tags { - if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) - return - } - } - - log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusCreated) json.NewEncoder(rw).Encode(StartJobApiResponse{ - DBID: id, + Message: fmt.Sprintf("Successfully triggered job start"), }) } diff --git a/internal/repository/jobStartWorker.go b/internal/repository/jobStartWorker.go index dbd2247..18d2be7 100644 --- a/internal/repository/jobStartWorker.go +++ b/internal/repository/jobStartWorker.go @@ -6,6 +6,7 @@ package repository import ( "sync" + "time" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -36,18 +37,30 @@ func jobStartWorker() { break } jobRepo := GetJobRepository() + var id int64 - id, err := jobRepo.Start(req.Job) - if err != nil { - log.Errorf("insert into database failed: %v", err) + for i := 0; i < 5; i++ { + var err error + + id, err = jobRepo.Start(req.Job) + if err != nil { + log.Errorf("Attempt %d: insert into database failed: %v", i, err) + } else { + break + } + time.Sleep(1 * time.Second) } for _, tag := range req.Job.Tags { - if _, err := jobRepo.AddTagOrCreate(req.User, id, tag.Type, tag.Name, tag.Scope); err != nil { + if _, err := jobRepo.AddTagOrCreate(req.User, id, + tag.Type, tag.Name, tag.Scope); err != nil { log.Errorf("adding tag to new job %d failed: %v", id, err) } } + log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", + id, req.Job.Cluster, req.Job.JobID, req.Job.User, req.Job.StartTime) + jobStartPending.Done() } } From e1be6c713886912aa596d065ad181b0031f63d3f Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 26 Nov 2024 10:49:44 +0100 Subject: [PATCH 260/443] Remove UpdateEnergy from UpdateFootprint Task Conputing total energy for running jobs does not make any sense --- internal/taskManager/updateFootprintService.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 59c1b12..e9525d2 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -114,12 +114,6 @@ func RegisterFootprintWorker() { ce++ continue } - stmt, err = jobRepo.UpdateEnergy(stmt, jobMeta) - if err != nil { - log.Errorf("update job (dbid: %d) statement build failed at energy step: %s", job.ID, err.Error()) - ce++ - continue - } stmt = stmt.Where("job.id = ?", job.ID) pendingStatements = append(pendingStatements, stmt) From 38ce40ae7df5148f3e56a461c1235e4a8371023c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 26 Nov 2024 16:21:16 +0100 Subject: [PATCH 261/443] feat: redirect to requested page after login, solves #281 --- cmd/cc-backend/server.go | 19 ++++++++----------- internal/auth/auth.go | 8 ++++++-- .../taskManager/updateFootprintService.go | 4 ++-- web/templates/login.tmpl | 1 + web/web.go | 1 + 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index fc620c8..083b9e5 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -110,9 +110,7 @@ func serverInit() { if !config.Keys.DisableAuthentication { router.Handle("/login", authHandle.Login( - // On success: - http.RedirectHandler("/", http.StatusTemporaryRedirect), - + // On success: Handled within Login() // On failure: func(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Add("Content-Type", "text/html; charset=utf-8") @@ -127,9 +125,7 @@ func serverInit() { })).Methods(http.MethodPost) router.Handle("/jwt-login", authHandle.Login( - // On success: - http.RedirectHandler("/", http.StatusTemporaryRedirect), - + // On success: Handled within Login() // On failure: func(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Add("Content-Type", "text/html; charset=utf-8") @@ -165,11 +161,12 @@ func serverInit() { func(rw http.ResponseWriter, r *http.Request, err error) { rw.WriteHeader(http.StatusUnauthorized) web.RenderTemplate(rw, "login.tmpl", &web.Page{ - Title: "Authentication failed - ClusterCockpit", - MsgType: "alert-danger", - Message: err.Error(), - Build: buildInfo, - Infos: info, + Title: "Authentication failed - ClusterCockpit", + MsgType: "alert-danger", + Message: err.Error(), + Build: buildInfo, + Infos: info, + Redirect: r.RequestURI, }) }) }) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 270989f..a186cf6 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -201,7 +201,6 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, } func (auth *Authentication) Login( - onsuccess http.Handler, onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error), ) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { @@ -238,7 +237,12 @@ func (auth *Authentication) Login( log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) ctx := context.WithValue(r.Context(), repository.ContextUserKey, user) - onsuccess.ServeHTTP(rw, r.WithContext(ctx)) + + if r.FormValue("redirect") != "" { + http.RedirectHandler(r.FormValue("redirect"), http.StatusFound).ServeHTTP(rw, r.WithContext(ctx)) + } else { + http.RedirectHandler(r.FormValue("/"), http.StatusFound).ServeHTTP(rw, r.WithContext(ctx)) + } return } diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index e9525d2..d30d766 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -43,9 +43,9 @@ func RegisterFootprintWorker() { if err != nil { continue } - // NOTE: Additional Subcluster Loop Could Allow For Limited List Of (Energy)Footprint-Metrics Only. + // NOTE: Additional Subcluster Loop Could Allow For Limited List Of Footprint-Metrics Only. // - Chunk-Size Would Then Be 'SubCluster' (Running Jobs, Transactions) as Lists Can Change Within SCs - // - Would Require Review of 'updateFootprint' And 'updateEnergy' Usage + // - Would Require Review of 'updateFootprint' Usage (Logic Could Possibly Be Included Here Completely) allMetrics := make([]string, 0) metricConfigs := archive.GetCluster(cluster.Name).MetricConfig for _, mc := range metricConfigs { diff --git a/web/templates/login.tmpl b/web/templates/login.tmpl index f10e064..cd13926 100644 --- a/web/templates/login.tmpl +++ b/web/templates/login.tmpl @@ -41,6 +41,7 @@ {{- if .Infos.hasOpenIDConnect}} OpenID Connect Login {{end}} +
diff --git a/web/web.go b/web/web.go index 45ca9e3..1cfa176 100644 --- a/web/web.go +++ b/web/web.go @@ -99,6 +99,7 @@ type Page struct { Infos map[string]interface{} // For generic use (e.g. username for /monitoring/user/, job id for /monitoring/job/) Config map[string]interface{} // UI settings for the currently logged in user (e.g. line width, ...) Resampling *schema.ResampleConfig // If not nil, defines resampling trigger and resolutions + Redirect string // The originally requested URL, for intermediate login handling } func RenderTemplate(rw http.ResponseWriter, file string, page *Page) { From 00a578657c9f168b7d32ffd2b420ba686a232607 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 27 Nov 2024 10:50:11 +0100 Subject: [PATCH 262/443] feat: add edit of notice box content to admin settings --- internal/api/rest.go | 64 +++++++++++++++ internal/routerConfig/routes.go | 13 +++- web/frontend/src/Config.root.svelte | 3 +- web/frontend/src/config.entrypoint.js | 3 +- web/frontend/src/config/AdminSettings.svelte | 4 + .../src/config/admin/NoticeEdit.svelte | 78 +++++++++++++++++++ web/templates/config.tmpl | 1 + 7 files changed, 163 insertions(+), 3 deletions(-) create mode 100644 web/frontend/src/config/admin/NoticeEdit.svelte diff --git a/internal/api/rest.go b/internal/api/rest.go index 3842596..db747ce 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -110,6 +110,7 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet) r.HandleFunc("/users/", api.deleteUser).Methods(http.MethodDelete) r.HandleFunc("/user/{id}", api.updateUser).Methods(http.MethodPost) + r.HandleFunc("/notice/", api.editNotice).Methods(http.MethodPost) } } @@ -1285,6 +1286,69 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { } } +// editNotice godoc +// @summary Updates or empties the notice box content +// @tags User +// @description Modifies the content of notice.txt, shown as notice box on the homepage. +// @description If more than one formValue is set then only the highest priority field is used. +// @description Only accessible from IPs registered with apiAllowedIPs configuration option. +// @accept mpfd +// @produce plain +// @param new-content formData string false "Priority 1: New content to display" +// @success 200 {string} string "Success Response Message" +// @failure 400 {string} string "Bad Request" +// @failure 401 {string} string "Unauthorized" +// @failure 403 {string} string "Forbidden" +// @failure 422 {string} string "Unprocessable Entity: The user could not be updated" +// @failure 500 {string} string "Internal Server Error" +// @security ApiKeyAuth +// @router /notice/ [post] +func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { + err := securedCheck(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusForbidden) + return + } + + if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { + http.Error(rw, "Only admins are allowed to update the notice.txt file", http.StatusForbidden) + return + } + + // Get Value + newContent := r.FormValue("new-content") + + // Check FIle + noticeExists := util.CheckFileExists("./var/notice.txt") + if !noticeExists { + ntxt, err := os.Create("./var/notice.txt") + if err != nil { + log.Errorf("Creating ./var/notice.txt failed: %s", err.Error()) + http.Error(rw, err.Error(), http.StatusUnprocessableEntity) + return + } + ntxt.Close() + } + + if newContent != "" { + if err := os.WriteFile("./var/notice.txt", []byte(newContent), 0o666); err != nil { + log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) + http.Error(rw, err.Error(), http.StatusUnprocessableEntity) + return + } else { + rw.Write([]byte("Update Notice Content Success")) + } + } else { + if err := os.WriteFile("./var/notice.txt", []byte(""), 0o666); err != nil { + log.Errorf("Writing to ./var/notice.txt failed: %s", err.Error()) + http.Error(rw, err.Error(), http.StatusUnprocessableEntity) + return + } else { + rw.Write([]byte("Empty Notice Content Success")) + } + } +} + func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 1e2fe73..8e943a0 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -35,7 +35,7 @@ type Route struct { var routes []Route = []Route{ {"/", "home.tmpl", "ClusterCockpit", false, setupHomeRoute}, - {"/config", "config.tmpl", "Settings", false, func(i InfoType, r *http.Request) InfoType { return i }}, + {"/config", "config.tmpl", "Settings", false, setupConfigRoute}, {"/monitoring/jobs/", "monitoring/jobs.tmpl", "Jobs - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { return i }}, {"/monitoring/job/{id:[0-9]+}", "monitoring/job.tmpl", "Job - ClusterCockpit", false, setupJobRoute}, {"/monitoring/users/", "monitoring/list.tmpl", "Users - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "USER"; return i }}, @@ -80,6 +80,17 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { return i } +func setupConfigRoute(i InfoType, r *http.Request) InfoType { + if util.CheckFileExists("./var/notice.txt") { + msg, err := os.ReadFile("./var/notice.txt") + if err == nil { + i["ncontent"] = string(msg) + } + } + + return i +} + func setupJobRoute(i InfoType, r *http.Request) InfoType { i["id"] = mux.Vars(r)["id"] if config.Keys.EmissionConstant != 0 { diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index 6dd68f1..dc45491 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -15,6 +15,7 @@ export let isAdmin; export let isApi; export let username; + export let ncontent; {#if isAdmin == true} @@ -22,7 +23,7 @@ Admin Options - + {/if} diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js index 345056b..feb3916 100644 --- a/web/frontend/src/config.entrypoint.js +++ b/web/frontend/src/config.entrypoint.js @@ -6,7 +6,8 @@ new Config({ props: { isAdmin: isAdmin, isApi: isApi, - username: username + username: username, + ncontent: ncontent, }, context: new Map([ ['cc-config', clusterCockpitConfig], diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index 9d3abf2..f512d40 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -10,6 +10,9 @@ import AddUser from "./admin/AddUser.svelte"; import ShowUsers from "./admin/ShowUsers.svelte"; import Options from "./admin/Options.svelte"; + import NoticeEdit from "./admin/NoticeEdit.svelte"; + + export let ncontent; let users = []; let roles = []; @@ -52,4 +55,5 @@ + diff --git a/web/frontend/src/config/admin/NoticeEdit.svelte b/web/frontend/src/config/admin/NoticeEdit.svelte new file mode 100644 index 0000000..325800b --- /dev/null +++ b/web/frontend/src/config/admin/NoticeEdit.svelte @@ -0,0 +1,78 @@ + + + + + + + + Edit Notice Shown On Homepage +

Empty content ("No Content.") hides notice card on homepage.

+
+ + + + + +
+

+ {#if displayMessage}Update: {message.msg}{/if} +

+
+
+ diff --git a/web/templates/config.tmpl b/web/templates/config.tmpl index 7993c3e..914dc88 100644 --- a/web/templates/config.tmpl +++ b/web/templates/config.tmpl @@ -13,6 +13,7 @@ const filterPresets = {{ .FilterPresets }}; const clusterCockpitConfig = {{ .Config }}; const resampleConfig = {{ .Resampling }}; + const ncontent = {{ .Infos.ncontent }}; {{end}} \ No newline at end of file From f89b5cd2ec562138a70b46249be887dbd6f59c05 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 27 Nov 2024 18:43:56 +0100 Subject: [PATCH 263/443] fix: solve inconsistencies with filters, fixes #280 --- internal/routerConfig/routes.go | 40 ++++++++++++++++++- web/frontend/src/generic/Filters.svelte | 33 ++++++++------- .../src/generic/filters/Resources.svelte | 10 ++--- web/frontend/src/generic/filters/Stats.svelte | 5 ++- web/frontend/src/generic/utils.js | 33 ++++++++++----- 5 files changed, 88 insertions(+), 33 deletions(-) diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 8e943a0..2267efb 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -244,6 +244,16 @@ func buildFilterPresets(query url.Values) map[string]interface{} { } } } + if query.Get("numHWThreads") != "" { + parts := strings.Split(query.Get("numHWThreads"), "-") + if len(parts) == 2 { + a, e1 := strconv.Atoi(parts[0]) + b, e2 := strconv.Atoi(parts[1]) + if e1 == nil && e2 == nil { + filterPresets["numHWThreads"] = map[string]int{"from": a, "to": b} + } + } + } if query.Get("numAccelerators") != "" { parts := strings.Split(query.Get("numAccelerators"), "-") if len(parts) == 2 { @@ -285,7 +295,35 @@ func buildFilterPresets(query url.Values) map[string]interface{} { } } } - + if query.Get("energy") != "" { + parts := strings.Split(query.Get("energy"), "-") + if len(parts) == 2 { + a, e1 := strconv.Atoi(parts[0]) + b, e2 := strconv.Atoi(parts[1]) + if e1 == nil && e2 == nil { + filterPresets["energy"] = map[string]int{"from": a, "to": b} + } + } + } + if len(query["stat"]) != 0 { + statList := make([]map[string]interface{}, 0) + for _, statEntry := range query["stat"] { + parts := strings.Split(statEntry, "-") + if len(parts) == 3 { // Metric Footprint Stat Field, from - to + a, e1 := strconv.ParseInt(parts[1], 10, 64) + b, e2 := strconv.ParseInt(parts[2], 10, 64) + if e1 == nil && e2 == nil { + statEntry := map[string]interface{}{ + "field": parts[0], + "from": a, + "to": b, + } + statList = append(statList, statEntry) + } + } + } + filterPresets["stats"] = statList + } return filterPresets } diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index 9580f17..312135a 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -76,7 +76,7 @@ numHWThreads: filterPresets.numHWThreads || { from: null, to: null }, numAccelerators: filterPresets.numAccelerators || { from: null, to: null }, - stats: [], + stats: filterPresets.stats || [], }; let isClusterOpen = false, @@ -127,27 +127,30 @@ items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } }); if (filters.arrayJobId != null) items.push({ arrayJobId: filters.arrayJobId }); - if (filters.numNodes.from != null || filters.numNodes.to != null) + if (filters.numNodes.from != null || filters.numNodes.to != null) { items.push({ numNodes: { from: filters.numNodes.from, to: filters.numNodes.to }, }); - if (filters.numHWThreads.from != null || filters.numHWThreads.to != null) + isNodesModified = true; + } + if (filters.numHWThreads.from != null || filters.numHWThreads.to != null) { items.push({ numHWThreads: { from: filters.numHWThreads.from, to: filters.numHWThreads.to, }, }); - if ( - filters.numAccelerators.from != null || - filters.numAccelerators.to != null - ) + isHwthreadsModified = true; + } + if (filters.numAccelerators.from != null || filters.numAccelerators.to != null) { items.push({ numAccelerators: { from: filters.numAccelerators.from, to: filters.numAccelerators.to, }, }); + isAccsModified = true; + } if (filters.user) items.push({ user: { [filters.userMatch]: filters.user } }); if (filters.project) @@ -197,10 +200,10 @@ opts.push(`energy=${filters.energy.from}-${filters.energy.to}`); if (filters.numNodes.from && filters.numNodes.to) opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`); + if (filters.numHWThreads.from && filters.numHWThreads.to) + opts.push(`numHWThreads=${filters.numHWThreads.from}-${filters.numHWThreads.to}`); if (filters.numAccelerators.from && filters.numAccelerators.to) - opts.push( - `numAccelerators=${filters.numAccelerators.from}-${filters.numAccelerators.to}`, - ); + opts.push(`numAccelerators=${filters.numAccelerators.from}-${filters.numAccelerators.to}`); if (filters.user.length != 0) if (filters.userMatch != "in") { opts.push(`user=${filters.user}`); @@ -214,7 +217,10 @@ if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`); if (filters.project && filters.projectMatch != "contains") opts.push(`projectMatch=${filters.projectMatch}`); - + if (filters.stats.length != 0) + for (let stat of filters.stats) { + opts.push(`stat=${stat?.field ? stat.field : stat.metricName}-${stat?.from ? stat.from : stat.range.from}-${stat?.to ? stat.to : stat.range.to}`); + } if (opts.length == 0 && window.location.search.length <= 1) return; let newurl = `${window.location.pathname}?${opts.join("&")}`; @@ -364,8 +370,7 @@ {#if (isNodesModified || isHwthreadsModified) && isAccsModified}, {/if} {#if isAccsModified} - Accelerators: {filters.numAccelerators.from} - {filters - .numAccelerators.to} + Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to} {/if} {/if} @@ -385,7 +390,7 @@ {#if filters.stats.length > 0} (isStatsOpen = true)}> {filters.stats - .map((stat) => `${stat.text}: ${stat.from} - ${stat.to}`) + .map((stat) => `${stat?.text ? stat.text : stat.field}: ${stat?.from ? stat.from : stat.range.from} - ${stat?.to ? stat.to : stat.range.to}`) .join(", ")} {/if} diff --git a/web/frontend/src/generic/filters/Resources.svelte b/web/frontend/src/generic/filters/Resources.svelte index 23eb9b7..750c4a6 100644 --- a/web/frontend/src/generic/filters/Resources.svelte +++ b/web/frontend/src/generic/filters/Resources.svelte @@ -5,10 +5,10 @@ - `cluster Object?`: The currently selected cluster config [Default: null] - `isOpen Bool?`: Is this filter component opened [Default: false] - `numNodes Object?`: The currently selected numNodes filter [Default: {from:null, to:null}] - - `numHWThreads Object?`: The currently selected numHWTreads filter [Default: {from:null, to:null}] + - `numHWThreads Object?`: The currently selected numHWThreads filter [Default: {from:null, to:null}] - `numAccelerators Object?`: The currently selected numAccelerators filter [Default: {from:null, to:null}] - `isNodesModified Bool?`: Is the node filter modified [Default: false] - - `isHwtreadsModified Bool?`: Is the Hwthreads filter modified [Default: false] + - `isHwthreadsModified Bool?`: Is the Hwthreads filter modified [Default: false] - `isAccsModified Bool?`: Is the Accelerator filter modified [Default: false] - `namedNode String?`: The currently selected single named node (= hostname) [Default: null] @@ -60,7 +60,7 @@ ); // Limited to Single-Node Thread Count - const findMaxNumHWTreadsPerNode = (clusters) => + const findMaxNumHWThreadsPerNode = (clusters) => clusters.reduce( (max, cluster) => Math.max( @@ -91,13 +91,13 @@ minNumNodes = filterRanges.numNodes.from; maxNumNodes = filterRanges.numNodes.to; maxNumAccelerators = findMaxNumAccels([{ subClusters }]); - maxNumHWThreads = findMaxNumHWTreadsPerNode([{ subClusters }]); + maxNumHWThreads = findMaxNumHWThreadsPerNode([{ subClusters }]); } else if (clusters.length > 0) { const { filterRanges } = header.clusters[0]; minNumNodes = filterRanges.numNodes.from; maxNumNodes = filterRanges.numNodes.to; maxNumAccelerators = findMaxNumAccels(clusters); - maxNumHWThreads = findMaxNumHWTreadsPerNode(clusters); + maxNumHWThreads = findMaxNumHWThreadsPerNode(clusters); for (let cluster of header.clusters) { const { filterRanges } = cluster; minNumNodes = Math.min(minNumNodes, filterRanges.numNodes.from); diff --git a/web/frontend/src/generic/filters/Stats.svelte b/web/frontend/src/generic/filters/Stats.svelte index 5b804a7..3252d39 100644 --- a/web/frontend/src/generic/filters/Stats.svelte +++ b/web/frontend/src/generic/filters/Stats.svelte @@ -29,10 +29,11 @@ export let isOpen = false; export let stats = []; - let statistics = [] + let statistics = []; + function loadRanges(isInitialized) { if (!isInitialized) return; - statistics = getStatsItems(); + statistics = getStatsItems(stats); } function resetRanges() { diff --git a/web/frontend/src/generic/utils.js b/web/frontend/src/generic/utils.js index 57248fc..fa357eb 100644 --- a/web/frontend/src/generic/utils.js +++ b/web/frontend/src/generic/utils.js @@ -307,23 +307,34 @@ export function checkMetricDisabled(m, c, s) { // [m]etric, [c]luster, [s]ubclus return !result } -export function getStatsItems() { +export function getStatsItems(presetStats = []) { // console.time('stats') const globalMetrics = getContext("globalMetrics") const result = globalMetrics.map((gm) => { if (gm?.footprint) { - // console.time('deep') const mc = getMetricConfigDeep(gm.name, null, null) - // console.timeEnd('deep') if (mc) { - return { - field: gm.name + '_' + gm.footprint, - text: gm.name + ' (' + gm.footprint + ')', - metric: gm.name, - from: 0, - to: mc.peak, - peak: mc.peak, - enabled: false + const presetEntry = presetStats.find((s) => s?.field === (gm.name + '_' + gm.footprint)) + if (presetEntry) { + return { + field: gm.name + '_' + gm.footprint, + text: gm.name + ' (' + gm.footprint + ')', + metric: gm.name, + from: presetEntry.from, + to: presetEntry.to, + peak: mc.peak, + enabled: true + } + } else { + return { + field: gm.name + '_' + gm.footprint, + text: gm.name + ' (' + gm.footprint + ')', + metric: gm.name, + from: 0, + to: mc.peak, + peak: mc.peak, + enabled: false + } } } } From 01b113631684b1f67073e9d7ff96c1ad1e307d82 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 28 Nov 2024 14:58:33 +0100 Subject: [PATCH 264/443] Fix Cookie settings, upgrade packages --- go.mod | 36 +++++++++---------- go.sum | 80 +++++++++++++++++++++---------------------- internal/auth/auth.go | 4 +++ 3 files changed, 62 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index e343d65..27a703c 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/ClusterCockpit/cc-backend -go 1.22 +go 1.23 require ( - github.com/99designs/gqlgen v0.17.49 + github.com/99designs/gqlgen v0.17.57 github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.4 github.com/coreos/go-oidc/v3 v3.11.0 @@ -15,7 +15,7 @@ require ( github.com/google/gops v0.3.28 github.com/gorilla/handlers v1.5.2 github.com/gorilla/mux v1.8.1 - github.com/gorilla/sessions v1.3.0 + github.com/gorilla/sessions v1.4.0 github.com/influxdata/influxdb-client-go/v2 v2.13.0 github.com/jmoiron/sqlx v1.4.0 github.com/mattn/go-sqlite3 v1.14.22 @@ -24,9 +24,9 @@ require ( github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/swaggo/http-swagger v1.3.4 - github.com/swaggo/swag v1.16.3 - github.com/vektah/gqlparser/v2 v2.5.16 - golang.org/x/crypto v0.25.0 + github.com/swaggo/swag v1.16.4 + github.com/vektah/gqlparser/v2 v2.5.20 + golang.org/x/crypto v0.29.0 golang.org/x/exp v0.0.0-20240707233637-46b078467d37 golang.org/x/oauth2 v0.21.0 ) @@ -35,11 +35,11 @@ require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/KyleBanks/depth v1.2.1 // indirect - github.com/agnivade/levenshtein v1.1.1 // indirect + github.com/agnivade/levenshtein v1.2.0 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-jose/go-jose/v4 v4.0.3 // indirect @@ -47,6 +47,7 @@ require ( github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect @@ -61,7 +62,6 @@ require ( github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -74,16 +74,16 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sosodev/duration v1.3.1 // indirect github.com/swaggo/files v1.0.1 // indirect - github.com/urfave/cli/v2 v2.27.2 // indirect - github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/urfave/cli/v2 v2.27.5 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.23.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.27.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index d8759fc..6506264 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ= -github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0= +github.com/99designs/gqlgen v0.17.57 h1:Ak4p60BRq6QibxY0lEc0JnQhDurfhxA67sp02lMjmPc= +github.com/99designs/gqlgen v0.17.57/go.mod h1:Jx61hzOSTcR4VJy/HFIgXiQ5rJ0Ypw8DxWLjbYDAUw0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= @@ -14,11 +14,11 @@ github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8 github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE= -github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0= +github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= @@ -36,13 +36,13 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= @@ -75,6 +75,8 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= @@ -99,8 +101,8 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/sessions v1.3.0 h1:XYlkq7KcpOB2ZhHBPv5WpjMIxrQosiZanfoy1HLZFzg= -github.com/gorilla/sessions v1.3.0/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= +github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= +github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -156,8 +158,6 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -214,20 +214,20 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg= github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64a5ww= github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ= -github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= -github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= -github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= -github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= -github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= -github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/vektah/gqlparser/v2 v2.5.20 h1:kPaWbhBntxoZPaNdBaIPT1Kh0i1b/onb5kXgEdP5JCo= +github.com/vektah/gqlparser/v2 v2.5.20/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -238,14 +238,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -255,15 +255,15 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -273,8 +273,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -287,17 +287,17 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/auth/auth.go b/internal/auth/auth.go index a186cf6..dbaf271 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -188,6 +188,10 @@ func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, if auth.SessionMaxAge != 0 { session.Options.MaxAge = int(auth.SessionMaxAge.Seconds()) } + if config.Keys.HttpsCertFile == "" && config.Keys.HttpsKeyFile == "" { + session.Options.Secure = false + } + session.Options.SameSite = http.SameSiteStrictMode session.Values["username"] = user.Username session.Values["projects"] = user.Projects session.Values["roles"] = user.Roles From 2aef6ed9c0170a1b4fcddef87525e135888bab76 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 28 Nov 2024 15:18:07 +0100 Subject: [PATCH 265/443] fix: oversight error on redirect target --- internal/auth/auth.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index a186cf6..43f7ec3 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -240,9 +240,10 @@ func (auth *Authentication) Login( if r.FormValue("redirect") != "" { http.RedirectHandler(r.FormValue("redirect"), http.StatusFound).ServeHTTP(rw, r.WithContext(ctx)) - } else { - http.RedirectHandler(r.FormValue("/"), http.StatusFound).ServeHTTP(rw, r.WithContext(ctx)) + return } + + http.RedirectHandler("/", http.StatusFound).ServeHTTP(rw, r.WithContext(ctx)) return } From a53d473b58dc62694da6574c51b35b2a1262e19b Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 28 Nov 2024 17:12:05 +0100 Subject: [PATCH 266/443] Update subcluster-generate Perl Skript Fixes #278 --- configs/generate-subcluster.pl | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/configs/generate-subcluster.pl b/configs/generate-subcluster.pl index 171db92..7648358 100755 --- a/configs/generate-subcluster.pl +++ b/configs/generate-subcluster.pl @@ -117,10 +117,12 @@ foreach my $ln (split("\n", $topo)) { my $node; my @sockets; +my @nodeCores; foreach my $socket ( @{$DOMAINS{socket}} ) { push @sockets, "[".join(",", @{$socket})."]"; - $node .= join(",", @{$socket}) + push @nodeCores, join(",", @{$socket}); } +$node = join(",", @nodeCores); $INFO{sockets} = join(",\n", @sockets); my @memDomains; @@ -212,9 +214,27 @@ print <<"END"; "socketsPerNode": $INFO{socketsPerNode}, "coresPerSocket": $INFO{coresPerSocket}, "threadsPerCore": $INFO{threadsPerCore}, - "flopRateScalar": $flopsScalar, - "flopRateSimd": $flopsSimd, - "memoryBandwidth": $memBw, + "flopRateScalar": { + "unit": { + "base": "F/s", + "prefix": "G" + }, + "value": $flopsScalar + }, + "flopRateSimd": { + "unit": { + "base": "F/s", + "prefix": "G" + }, + "value": $flopsSimd + }, + "memoryBandwidth": { + "unit": { + "base": "B/s", + "prefix": "G" + }, + "value": $memBw + }, "nodes": "", "topology": { "node": [$node], From 5da6baf82865bacd83600e858f0f7b44748baa14 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 29 Nov 2024 12:00:28 +0100 Subject: [PATCH 267/443] fix: prevent jump to table head on continuous scroll load --- web/frontend/src/generic/JobList.svelte | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index c4aacd2..7915699 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -107,7 +107,11 @@ let jobs = [] $: if ($initialized && $jobsStore.data) { - jobs = [...$jobsStore.data.jobs.items] + if (usePaging) { + jobs = [...$jobsStore.data.jobs.items] + } else { // Prevents jump to table head: Extends existing list instead of rendering new list + jobs = jobs.concat([...$jobsStore.data.jobs.items]) + } } $: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : -1; @@ -170,7 +174,6 @@ } if (!usePaging) { - let scrollMultiplier = 1 window.addEventListener('scroll', () => { let { scrollTop, @@ -181,8 +184,7 @@ // Add 100 px offset to trigger load earlier if (scrollTop + clientHeight >= scrollHeight - 100 && $jobsStore.data != null && $jobsStore.data.jobs.hasNextPage) { let pendingPaging = { ...paging } - scrollMultiplier += 1 - pendingPaging.itemsPerPage = itemsPerPage * scrollMultiplier + pendingPaging.page += 1 paging = pendingPaging }; }); From 0b9f74f4f413767f384869cfce298a42932e4eb7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 29 Nov 2024 12:56:53 +0100 Subject: [PATCH 268/443] fix: fix plot render for summed metrics on scope change --- .../src/generic/plots/MetricPlot.svelte | 26 ++++++++++++++----- web/frontend/src/job/Metric.svelte | 19 ++++++++++---- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 09f313c..536739d 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -64,6 +64,12 @@ return null; } + // handle special *-stat scopes + if (scope.match(/(.*)-stat$/)) { + const statParts = scope.split('-'); + scope = statParts[0] + } + if ( (scope == "node" && isShared == false) || metricConfig?.aggregation == "avg" @@ -130,6 +136,7 @@ export let numhwthreads = 0; export let numaccs = 0; export let zoomState = null; + export let thresholdState = null; if (useStatsSeries == null) useStatsSeries = statisticsSeries != null; if (useStatsSeries == false && series == null) useStatsSeries = true; @@ -468,12 +475,14 @@ // console.log('Dispatch Zoom with Res from / to', timestep, closest) dispatch('zoom', { newRes: closest, - lastZoomState: u?.scales + lastZoomState: u?.scales, + lastThreshold: thresholds?.normal }); } } else { dispatch('zoom', { - lastZoomState: u?.scales + lastZoomState: u?.scales, + lastThreshold: thresholds?.normal }); }; }; @@ -498,16 +507,19 @@ let timeoutId = null; function render(ren_width, ren_height) { - if (!uplot) { // Init uPlot + if (!uplot) { opts.width = ren_width; opts.height = ren_height; - if (zoomState) { + if (zoomState && metricConfig?.aggregation == "avg") { opts.scales = {...zoomState} + } else if (zoomState && metricConfig?.aggregation == "sum") { + // Allow Zoom In === Ymin changed + if (zoomState.y.min !== 0) { // scope change?: only use zoomState if thresholds match + if ((thresholdState === thresholds?.normal)) { opts.scales = {...zoomState} }; + } // else: reset scaling to default } - // console.log('Init Sizes ...', { width: opts.width, height: opts.height }) uplot = new uPlot(opts, plotData, plotWrapper); - } else { // Update size - // console.log('Update uPlot ...', { width: ren_width, height: ren_height }) + } else { uplot.setSize({ width: ren_width, height: ren_height }); } } diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index adbb44a..2e3967d 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -54,6 +54,7 @@ let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); let zoomState = null; let pendingZoomState = null; + let thresholdState = null; const dispatch = createEventDispatcher(); const statsPattern = /(.*)-stat$/; @@ -96,18 +97,24 @@ (pendingZoomState?.x?.min !== detail?.lastZoomState?.x?.min) && (pendingZoomState?.y?.max !== detail?.lastZoomState?.y?.max) ) { - pendingZoomState = {...detail.lastZoomState} + pendingZoomState = {...detail.lastZoomState}; + } + + if (detail?.lastThreshold) { // Handle to correctly reset on summed metric scope change + thresholdState = detail.lastThreshold; + } else { + thresholdState = null; } if (detail?.newRes) { // Triggers GQL - pendingResolution = detail.newRes + pendingResolution = detail.newRes; } - } + }; let metricData; - let selectedScopes = [...scopes] + let selectedScopes = [...scopes]; const dbid = job.id; - const selectedMetrics = [metricName] + const selectedMetrics = [metricName]; $: if (selectedScope || pendingResolution) { @@ -209,6 +216,7 @@ {series} {isShared} {zoomState} + {thresholdState} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} From c853d74ba0a4b71146396afb914f2636ef8f03fb Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 29 Nov 2024 12:57:34 +0100 Subject: [PATCH 269/443] Update frontend dependencies --- web/frontend/package-lock.json | 273 +++++++++++++++++++-------------- web/frontend/package.json | 6 +- 2 files changed, 161 insertions(+), 118 deletions(-) diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index e4c1d1e..e21171f 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -1,18 +1,18 @@ { "name": "cc-frontend", - "version": "1.0.1", + "version": "1.0.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "cc-frontend", - "version": "1.0.1", + "version": "1.0.2", "license": "MIT", "dependencies": { "@rollup/plugin-replace": "^5.0.7", "@sveltestrap/sveltestrap": "^6.2.7", - "@urql/svelte": "^4.2.1", - "chart.js": "^4.4.5", + "@urql/svelte": "^4.2.2", + "chart.js": "^4.4.6", "date-fns": "^2.30.0", "graphql": "^16.9.0", "mathjs": "^12.4.3", @@ -25,16 +25,16 @@ "@rollup/plugin-node-resolve": "^15.3.0", "@rollup/plugin-terser": "^0.4.4", "@timohausmann/quadtree-js": "^1.2.6", - "rollup": "^4.24.0", + "rollup": "^4.27.4", "rollup-plugin-css-only": "^4.5.2", "rollup-plugin-svelte": "^7.2.2", "svelte": "^4.2.19" } }, "node_modules/@0no-co/graphql.web": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.8.tgz", - "integrity": "sha512-8BG6woLtDMvXB9Ajb/uE+Zr/U7y4qJ3upXi0JQHZmsKUJa7HjF/gFvmL2f3/mSmfZoQGRr9VoY97LCX2uaFMzA==", + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.11.tgz", + "integrity": "sha512-xuSJ9WXwTmtngWkbdEoopMo6F8NLtjy84UNAMsAr5C3/2SgAL/dEU10TMqTIsipqPQ8HA/7WzeqQ9DEQxSvPPA==", "license": "MIT", "peerDependencies": { "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0" @@ -59,9 +59,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.25.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.7.tgz", - "integrity": "sha512-FjoyLe754PMiYsFaN5C94ttGiOmBNYTf6pLr4xXHAT5uctHb092PBszndLDR5XA/jghQvn4n7JMHl7dmTgbm9w==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz", + "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -130,9 +130,9 @@ } }, "node_modules/@kurkle/color": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.2.tgz", - "integrity": "sha512-fuscdXJ9G1qb7W8VdHi+IwRqij3lBkosAm4ydQtEmbY58OzHXqQhvlxqEkoz0yssNVn38bcpRWgA9PP+OGoisw==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz", + "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", "license": "MIT" }, "node_modules/@popperjs/core": { @@ -241,14 +241,14 @@ } }, "node_modules/@rollup/pluginutils": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.2.tgz", - "integrity": "sha512-/FIdS3PyZ39bjZlwqFnWqCOVnW7o963LtKMwQOD0NhQqw22gSr2YY1afu3FxRip4ZCZNsD5jq6Aaz6QV3D/Njw==", + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.3.tgz", + "integrity": "sha512-Pnsb6f32CD2W3uCaLZIzDmeFyQ2b8UWMFI7xtwUezpcGBDVDW6y9XgAWIlARiGAo6eNF5FK5aQTr0LFyNyqq5A==", "license": "MIT", "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", - "picomatch": "^2.3.1" + "picomatch": "^4.0.2" }, "engines": { "node": ">=14.0.0" @@ -263,9 +263,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.24.0.tgz", - "integrity": "sha512-Q6HJd7Y6xdB48x8ZNVDOqsbh2uByBhgK8PiQgPhwkIw/HC/YX5Ghq2mQY5sRMZWHb3VsFkWooUVOZHKr7DmDIA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.27.4.tgz", + "integrity": "sha512-2Y3JT6f5MrQkICUyRVCw4oa0sutfAsgaSsb0Lmmy1Wi2y7X5vT9Euqw4gOsCyy0YfKURBg35nhUKZS4mDcfULw==", "cpu": [ "arm" ], @@ -277,9 +277,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.24.0.tgz", - "integrity": "sha512-ijLnS1qFId8xhKjT81uBHuuJp2lU4x2yxa4ctFPtG+MqEE6+C5f/+X/bStmxapgmwLwiL3ih122xv8kVARNAZA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.27.4.tgz", + "integrity": "sha512-wzKRQXISyi9UdCVRqEd0H4cMpzvHYt1f/C3CoIjES6cG++RHKhrBj2+29nPF0IB5kpy9MS71vs07fvrNGAl/iA==", "cpu": [ "arm64" ], @@ -291,9 +291,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.24.0.tgz", - "integrity": "sha512-bIv+X9xeSs1XCk6DVvkO+S/z8/2AMt/2lMqdQbMrmVpgFvXlmde9mLcbQpztXm1tajC3raFDqegsH18HQPMYtA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.27.4.tgz", + "integrity": "sha512-PlNiRQapift4LNS8DPUHuDX/IdXiLjf8mc5vdEmUR0fF/pyy2qWwzdLjB+iZquGr8LuN4LnUoSEvKRwjSVYz3Q==", "cpu": [ "arm64" ], @@ -305,9 +305,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.24.0.tgz", - "integrity": "sha512-X6/nOwoFN7RT2svEQWUsW/5C/fYMBe4fnLK9DQk4SX4mgVBiTA9h64kjUYPvGQ0F/9xwJ5U5UfTbl6BEjaQdBQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.27.4.tgz", + "integrity": "sha512-o9bH2dbdgBDJaXWJCDTNDYa171ACUdzpxSZt+u/AAeQ20Nk5x+IhA+zsGmrQtpkLiumRJEYef68gcpn2ooXhSQ==", "cpu": [ "x64" ], @@ -318,10 +318,38 @@ "darwin" ] }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.27.4.tgz", + "integrity": "sha512-NBI2/i2hT9Q+HySSHTBh52da7isru4aAAo6qC3I7QFVsuhxi2gM8t/EI9EVcILiHLj1vfi+VGGPaLOUENn7pmw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.27.4.tgz", + "integrity": "sha512-wYcC5ycW2zvqtDYrE7deary2P2UFmSh85PUpAx+dwTCO9uw3sgzD6Gv9n5X4vLaQKsrfTSZZ7Z7uynQozPVvWA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.24.0.tgz", - "integrity": "sha512-0KXvIJQMOImLCVCz9uvvdPgfyWo93aHHp8ui3FrtOP57svqrF/roSSR5pjqL2hcMp0ljeGlU4q9o/rQaAQ3AYA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.27.4.tgz", + "integrity": "sha512-9OwUnK/xKw6DyRlgx8UizeqRFOfi9mf5TYCw1uolDaJSbUmBxP85DE6T4ouCMoN6pXw8ZoTeZCSEfSaYo+/s1w==", "cpu": [ "arm" ], @@ -333,9 +361,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.24.0.tgz", - "integrity": "sha512-it2BW6kKFVh8xk/BnHfakEeoLPv8STIISekpoF+nBgWM4d55CZKc7T4Dx1pEbTnYm/xEKMgy1MNtYuoA8RFIWw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.27.4.tgz", + "integrity": "sha512-Vgdo4fpuphS9V24WOV+KwkCVJ72u7idTgQaBoLRD0UxBAWTF9GWurJO9YD9yh00BzbkhpeXtm6na+MvJU7Z73A==", "cpu": [ "arm" ], @@ -347,9 +375,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.24.0.tgz", - "integrity": "sha512-i0xTLXjqap2eRfulFVlSnM5dEbTVque/3Pi4g2y7cxrs7+a9De42z4XxKLYJ7+OhE3IgxvfQM7vQc43bwTgPwA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.27.4.tgz", + "integrity": "sha512-pleyNgyd1kkBkw2kOqlBx+0atfIIkkExOTiifoODo6qKDSpnc6WzUY5RhHdmTdIJXBdSnh6JknnYTtmQyobrVg==", "cpu": [ "arm64" ], @@ -361,9 +389,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.24.0.tgz", - "integrity": "sha512-9E6MKUJhDuDh604Qco5yP/3qn3y7SLXYuiC0Rpr89aMScS2UAmK1wHP2b7KAa1nSjWJc/f/Lc0Wl1L47qjiyQw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.27.4.tgz", + "integrity": "sha512-caluiUXvUuVyCHr5DxL8ohaaFFzPGmgmMvwmqAITMpV/Q+tPoaHZ/PWa3t8B2WyoRcIIuu1hkaW5KkeTDNSnMA==", "cpu": [ "arm64" ], @@ -375,9 +403,9 @@ ] }, "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.24.0.tgz", - "integrity": "sha512-2XFFPJ2XMEiF5Zi2EBf4h73oR1V/lycirxZxHZNc93SqDN/IWhYYSYj8I9381ikUFXZrz2v7r2tOVk2NBwxrWw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.27.4.tgz", + "integrity": "sha512-FScrpHrO60hARyHh7s1zHE97u0KlT/RECzCKAdmI+LEoC1eDh/RDji9JgFqyO+wPDb86Oa/sXkily1+oi4FzJQ==", "cpu": [ "ppc64" ], @@ -389,9 +417,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.24.0.tgz", - "integrity": "sha512-M3Dg4hlwuntUCdzU7KjYqbbd+BLq3JMAOhCKdBE3TcMGMZbKkDdJ5ivNdehOssMCIokNHFOsv7DO4rlEOfyKpg==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.27.4.tgz", + "integrity": "sha512-qyyprhyGb7+RBfMPeww9FlHwKkCXdKHeGgSqmIXw9VSUtvyFZ6WZRtnxgbuz76FK7LyoN8t/eINRbPUcvXB5fw==", "cpu": [ "riscv64" ], @@ -403,9 +431,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.24.0.tgz", - "integrity": "sha512-mjBaoo4ocxJppTorZVKWFpy1bfFj9FeCMJqzlMQGjpNPY9JwQi7OuS1axzNIk0nMX6jSgy6ZURDZ2w0QW6D56g==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.27.4.tgz", + "integrity": "sha512-PFz+y2kb6tbh7m3A7nA9++eInGcDVZUACulf/KzDtovvdTizHpZaJty7Gp0lFwSQcrnebHOqxF1MaKZd7psVRg==", "cpu": [ "s390x" ], @@ -417,9 +445,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.24.0.tgz", - "integrity": "sha512-ZXFk7M72R0YYFN5q13niV0B7G8/5dcQ9JDp8keJSfr3GoZeXEoMHP/HlvqROA3OMbMdfr19IjCeNAnPUG93b6A==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.27.4.tgz", + "integrity": "sha512-Ni8mMtfo+o/G7DVtweXXV/Ol2TFf63KYjTtoZ5f078AUgJTmaIJnj4JFU7TK/9SVWTaSJGxPi5zMDgK4w+Ez7Q==", "cpu": [ "x64" ], @@ -431,9 +459,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.24.0.tgz", - "integrity": "sha512-w1i+L7kAXZNdYl+vFvzSZy8Y1arS7vMgIy8wusXJzRrPyof5LAb02KGr1PD2EkRcl73kHulIID0M501lN+vobQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.27.4.tgz", + "integrity": "sha512-5AeeAF1PB9TUzD+3cROzFTnAJAcVUGLuR8ng0E0WXGkYhp6RD6L+6szYVX+64Rs0r72019KHZS1ka1q+zU/wUw==", "cpu": [ "x64" ], @@ -445,9 +473,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.24.0.tgz", - "integrity": "sha512-VXBrnPWgBpVDCVY6XF3LEW0pOU51KbaHhccHw6AS6vBWIC60eqsH19DAeeObl+g8nKAz04QFdl/Cefta0xQtUQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.27.4.tgz", + "integrity": "sha512-yOpVsA4K5qVwu2CaS3hHxluWIK5HQTjNV4tWjQXluMiiiu4pJj4BN98CvxohNCpcjMeTXk/ZMJBRbgRg8HBB6A==", "cpu": [ "arm64" ], @@ -459,9 +487,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.24.0.tgz", - "integrity": "sha512-xrNcGDU0OxVcPTH/8n/ShH4UevZxKIO6HJFK0e15XItZP2UcaiLFd5kiX7hJnqCbSztUF8Qot+JWBC/QXRPYWQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.27.4.tgz", + "integrity": "sha512-KtwEJOaHAVJlxV92rNYiG9JQwQAdhBlrjNRp7P9L8Cb4Rer3in+0A+IPhJC9y68WAi9H0sX4AiG2NTsVlmqJeQ==", "cpu": [ "ia32" ], @@ -473,9 +501,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.24.0.tgz", - "integrity": "sha512-fbMkAF7fufku0N2dE5TBXcNlg0pt0cJue4xBRE2Qc5Vqikxr4VCgKj/ht6SMdFcOacVA9rqF70APJ8RN/4vMJw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.27.4.tgz", + "integrity": "sha512-3j4jx1TppORdTAoBJRd+/wJRGCPC0ETWkXOecJ6PPZLj6SptXkrXcNqdj0oclbKML6FkQltdz7bBA3rUSirZug==", "cpu": [ "x64" ], @@ -519,9 +547,9 @@ "license": "MIT" }, "node_modules/@urql/core": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.0.6.tgz", - "integrity": "sha512-38rgSDqVNihFDauw1Pm9V7XLWIKuK8V9CKgrUF7/xEKinze8ENKP1ZeBhkG+dxWzJan7CHK+SLl46kAdvZwIlA==", + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.0.8.tgz", + "integrity": "sha512-1GOnUw7/a9bzkcM0+U8U5MmxW2A7FE5YquuEmcJzTtW5tIs2EoS4F2ITpuKBjRBbyRjZgO860nWFPo1m4JImGA==", "license": "MIT", "dependencies": { "@0no-co/graphql.web": "^1.0.5", @@ -529,9 +557,9 @@ } }, "node_modules/@urql/svelte": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.1.tgz", - "integrity": "sha512-tzjt5qElu6EF4ns+AWLUFvvGFH+bDGEgLStHQTBu76puQcMCW374MrjxWM9lKA6lfA7iUyu1KXkIRhxNy09l4Q==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.2.tgz", + "integrity": "sha512-6ntLGsWcnNtaMZVmFpePfFTSpYxYpznCAqnuvLDjt7Oa7YqHcFiyPnz7IIsiPD9VE6hZSi0+RwmRk5BMba/teQ==", "license": "MIT", "dependencies": { "@urql/core": "^5.0.0", @@ -543,9 +571,9 @@ } }, "node_modules/acorn": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.13.0.tgz", - "integrity": "sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "license": "MIT", "bin": { "acorn": "bin/acorn" @@ -597,9 +625,9 @@ "license": "MIT" }, "node_modules/chart.js": { - "version": "4.4.5", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.5.tgz", - "integrity": "sha512-CVVjg1RYTJV9OCC8WeJPMx8gsV8K6WIyIEQUE3ui4AR9Hfgls9URri6Ja3hyMVBbTF8Q2KFa19PE815gWcWhng==", + "version": "4.4.6", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.6.tgz", + "integrity": "sha512-8Y406zevUPbbIBA/HRk33khEmQPk5+cxeflWE/2rx1NJsjVWMPw/9mSP9rxHP5eqi6LNoPBVMfZHxbwLSgldYA==", "license": "MIT", "dependencies": { "@kurkle/color": "^0.3.0" @@ -645,9 +673,9 @@ "license": "MIT" }, "node_modules/complex.js": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.3.0.tgz", - "integrity": "sha512-wWHzifVdUPbPBhh+ObvpVGIzrAQjTvmnnEJKBfLW5YbyAB6OXQ0r+Q92fByMIrSSlxUuCujqxriJSR6R/kVxPA==", + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.4.2.tgz", + "integrity": "sha512-qtx7HRhPGSCBtGiST4/WGHuW+zeaND/6Ld+db6PbrulIB1i2Ev/2UPiqcmpQNPSyfBKraC0EOvOKCB5dGZKt3g==", "license": "MIT", "engines": { "node": "*" @@ -867,9 +895,9 @@ "license": "MIT" }, "node_modules/magic-string": { - "version": "0.30.12", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.12.tgz", - "integrity": "sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw==", + "version": "0.30.14", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.14.tgz", + "integrity": "sha512-5c99P1WKTed11ZC0HMJOj6CDIue6F8ySu+bJL+85q1zBEIY8IklrJ1eiKC2NDRh3Ct3FcvmJPyQHb9erXMTJNw==", "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" @@ -955,21 +983,21 @@ } }, "node_modules/periscopic/node_modules/is-reference": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", - "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", + "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", "license": "MIT", "dependencies": { - "@types/estree": "*" + "@types/estree": "^1.0.6" } }, "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", "license": "MIT", "engines": { - "node": ">=8.6" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" @@ -1020,9 +1048,9 @@ } }, "node_modules/rollup": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.24.0.tgz", - "integrity": "sha512-DOmrlGSXNk1DM0ljiQA+i+o0rSLhtii1je5wgk60j49d1jHT5YYttBv1iWOnYSTG+fZZESUOSNiAl89SIet+Cg==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.27.4.tgz", + "integrity": "sha512-RLKxqHEMjh/RGLsDxAEsaLO3mWgyoU6x9w6n1ikAzet4B3gI2/3yP6PWY2p9QzRTh6MfEIXB3MwsOY0Iv3vNrw==", "devOptional": true, "license": "MIT", "dependencies": { @@ -1036,22 +1064,24 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.24.0", - "@rollup/rollup-android-arm64": "4.24.0", - "@rollup/rollup-darwin-arm64": "4.24.0", - "@rollup/rollup-darwin-x64": "4.24.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.24.0", - "@rollup/rollup-linux-arm-musleabihf": "4.24.0", - "@rollup/rollup-linux-arm64-gnu": "4.24.0", - "@rollup/rollup-linux-arm64-musl": "4.24.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.24.0", - "@rollup/rollup-linux-riscv64-gnu": "4.24.0", - "@rollup/rollup-linux-s390x-gnu": "4.24.0", - "@rollup/rollup-linux-x64-gnu": "4.24.0", - "@rollup/rollup-linux-x64-musl": "4.24.0", - "@rollup/rollup-win32-arm64-msvc": "4.24.0", - "@rollup/rollup-win32-ia32-msvc": "4.24.0", - "@rollup/rollup-win32-x64-msvc": "4.24.0", + "@rollup/rollup-android-arm-eabi": "4.27.4", + "@rollup/rollup-android-arm64": "4.27.4", + "@rollup/rollup-darwin-arm64": "4.27.4", + "@rollup/rollup-darwin-x64": "4.27.4", + "@rollup/rollup-freebsd-arm64": "4.27.4", + "@rollup/rollup-freebsd-x64": "4.27.4", + "@rollup/rollup-linux-arm-gnueabihf": "4.27.4", + "@rollup/rollup-linux-arm-musleabihf": "4.27.4", + "@rollup/rollup-linux-arm64-gnu": "4.27.4", + "@rollup/rollup-linux-arm64-musl": "4.27.4", + "@rollup/rollup-linux-powerpc64le-gnu": "4.27.4", + "@rollup/rollup-linux-riscv64-gnu": "4.27.4", + "@rollup/rollup-linux-s390x-gnu": "4.27.4", + "@rollup/rollup-linux-x64-gnu": "4.27.4", + "@rollup/rollup-linux-x64-musl": "4.27.4", + "@rollup/rollup-win32-arm64-msvc": "4.27.4", + "@rollup/rollup-win32-ia32-msvc": "4.27.4", + "@rollup/rollup-win32-x64-msvc": "4.27.4", "fsevents": "~2.3.2" } }, @@ -1103,6 +1133,19 @@ "node": ">= 8.0.0" } }, + "node_modules/rollup-plugin-svelte/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -1235,12 +1278,12 @@ } }, "node_modules/svelte/node_modules/is-reference": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", - "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.3.tgz", + "integrity": "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==", "license": "MIT", "dependencies": { - "@types/estree": "*" + "@types/estree": "^1.0.6" } }, "node_modules/terser": { diff --git a/web/frontend/package.json b/web/frontend/package.json index 1079440..389ffe6 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -11,7 +11,7 @@ "@rollup/plugin-node-resolve": "^15.3.0", "@rollup/plugin-terser": "^0.4.4", "@timohausmann/quadtree-js": "^1.2.6", - "rollup": "^4.24.0", + "rollup": "^4.27.4", "rollup-plugin-css-only": "^4.5.2", "rollup-plugin-svelte": "^7.2.2", "svelte": "^4.2.19" @@ -19,8 +19,8 @@ "dependencies": { "@rollup/plugin-replace": "^5.0.7", "@sveltestrap/sveltestrap": "^6.2.7", - "@urql/svelte": "^4.2.1", - "chart.js": "^4.4.5", + "@urql/svelte": "^4.2.2", + "chart.js": "^4.4.6", "date-fns": "^2.30.0", "graphql": "^16.9.0", "mathjs": "^12.4.3", From 93377f53fc1dc6a9dd2305b7556a585371da2e8b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 29 Nov 2024 14:15:15 +0100 Subject: [PATCH 270/443] add lastThreshold to jobListRow --- web/frontend/src/generic/joblist/JobListRow.svelte | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 0900730..82cf2ed 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -37,6 +37,7 @@ : ["node"]; let selectedResolution = resampleDefault; let zoomStates = {}; + let thresholdStates = {}; const cluster = getContext("clusters").find((c) => c.name == job.cluster); const client = getContextClient(); @@ -80,6 +81,13 @@ zoomStates[metric] = {...detail.lastZoomState} } + if ( // States have to differ, causes deathloop if just set + detail?.lastThreshold && + thresholdStates[metric] !== detail.lastThreshold + ) { // Handle to correctly reset on summed metric scope change + thresholdStates[metric] = detail.lastThreshold; + } + if (detail?.newRes) { // Triggers GQL selectedResolution = detail.newRes } @@ -191,6 +199,7 @@ numhwthreads={job.numHWThreads} numaccs={job.numAcc} zoomState={zoomStates[metric.data.name] || null} + thresholdState={thresholdStates[metric.data.name] || null} /> {:else if metric.disabled == true && metric.data} Date: Mon, 2 Dec 2024 12:49:43 +0100 Subject: [PATCH 271/443] fix: fix job list render for continuous mode on filter or sort changes --- web/frontend/src/generic/JobList.svelte | 33 ++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index 7915699..89b8fad 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -30,6 +30,10 @@ initialized = getContext("initialized"), globalMetrics = getContext("globalMetrics"); + const equalsCheck = (a, b) => { + return JSON.stringify(a) === JSON.stringify(b); + } + export let sorting = { field: "startTime", type: "col", order: "DESC" }; export let matchedJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; @@ -40,6 +44,8 @@ let page = 1; let paging = { itemsPerPage, page }; let filter = []; + let lastFilter = []; + let lastSorting = null; let triggerMetricRefresh = false; function getUnit(m) { @@ -105,12 +111,33 @@ variables: { paging, sorting, filter }, }); - let jobs = [] + $: if (!usePaging && sorting) { + // console.log('Reset Paging ...') + paging = { itemsPerPage: 10, page: 1 } + }; + + let jobs = []; $: if ($initialized && $jobsStore.data) { if (usePaging) { jobs = [...$jobsStore.data.jobs.items] - } else { // Prevents jump to table head: Extends existing list instead of rendering new list - jobs = jobs.concat([...$jobsStore.data.jobs.items]) + } else { // Prevents jump to table head in continiuous mode, only if no change in sort or filter + if (equalsCheck(filter, lastFilter) && equalsCheck(sorting, lastSorting)) { + // console.log('Both Equal: Continuous Addition ... Set None') + jobs = jobs.concat([...$jobsStore.data.jobs.items]) + } else if (equalsCheck(filter, lastFilter)) { + // console.log('Filter Equal: Continuous Reset ... Set lastSorting') + lastSorting = { ...sorting } + jobs = [...$jobsStore.data.jobs.items] + } else if (equalsCheck(sorting, lastSorting)) { + // console.log('Sorting Equal: Continuous Reset ... Set lastFilter') + lastFilter = [ ...filter ] + jobs = [...$jobsStore.data.jobs.items] + } else { + // console.log('None Equal: Continuous Reset ... Set lastBoth') + lastSorting = { ...sorting } + lastFilter = [ ...filter ] + jobs = [...$jobsStore.data.jobs.items] + } } } From 1a3cf7edd6d8d91365a8df671c0ae70ca44b4fad Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Dec 2024 17:02:04 +0100 Subject: [PATCH 272/443] fix wrong var insert --- web/frontend/src/Analysis.root.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index d287cf3..2e977e8 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -422,14 +422,14 @@ {#if groupSelection.key == "user"} {te.id} {:else} {te.id} From 8a10b697162abccf7bff1dede079afa46fb6def9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Dec 2024 17:27:41 +0100 Subject: [PATCH 273/443] review findThresholds logic in metricPlot --- .../src/generic/plots/MetricPlot.svelte | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 536739d..f08c86d 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -84,19 +84,20 @@ if (metricConfig?.aggregation == "sum") { - let divisor = 1 + let divisor; if (isShared == true) { // Shared if (numaccs > 0) divisor = subClusterTopology.accelerators.length / numaccs; - else if (numhwthreads > 0) divisor = subClusterTopology.node.length / numhwthreads; + else if (numhwthreads > 0) divisor = subClusterTopology.core.length / numhwthreads; } - else if (scope == 'socket') divisor = subClusterTopology.socket.length; - else if (scope == "core") divisor = subClusterTopology.core.length; - else if (scope == "accelerator") - divisor = subClusterTopology.accelerators.length; - else if (scope == "hwthread") divisor = subClusterTopology.node.length; + else if (scope == 'node') divisor = 1; // Use as configured for nodes + else if (scope == 'socket') divisor = subClusterTopology.socket.length; + else if (scope == "memoryDomain") divisor = subClusterTopology.memoryDomain.length; + else if (scope == "core") divisor = subClusterTopology.core.length; + else if (scope == "hwthread") divisor = subClusterTopology.core.length; // alt. name for core + else if (scope == "accelerator") divisor = subClusterTopology.accelerators.length; else { - // console.log('TODO: how to calc thresholds for ', scope) - return null; + console.log('Unknown scope, return default thresholds ', scope) + divisor = 1; } return { From 907e80a01c5a78c996ca038f32059d49a0845717 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Dec 2024 07:26:36 +0100 Subject: [PATCH 274/443] Update config json schema Fixes #256 --- pkg/schema/config.go | 2 +- pkg/schema/schemas/config.schema.json | 893 ++++++++++++++------------ 2 files changed, 471 insertions(+), 424 deletions(-) diff --git a/pkg/schema/config.go b/pkg/schema/config.go index a7abefe..0a30d86 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -91,7 +91,7 @@ type ResampleConfig struct { type CronFrequency struct { // Duration Update Worker [Defaults to '5m'] DurationWorker string `json:"duration-worker"` - // Metric- and Energy Footprint Update Worker [Defaults to '10m'] + // Metric-Footprint Update Worker [Defaults to '10m'] FootprintWorker string `json:"footprint-worker"` } diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index cc6c553..a9d7c47 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -1,454 +1,501 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://config.schema.json", - "title": "cc-backend configuration file schema", - "type": "object", - "properties": { - "addr": { - "description": "Address where the http (or https) server will listen on (for example: 'localhost:80').", - "type": "string" + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://config.schema.json", + "title": "cc-backend configuration file schema", + "type": "object", + "properties": { + "addr": { + "description": "Address where the http (or https) server will listen on (for example: 'localhost:80').", + "type": "string" + }, + "apiAllowedIPs": { + "description": "Addresses from which secured API endpoints can be reached", + "type": "string" + }, + "user": { + "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", + "type": "string" + }, + "group": { + "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", + "type": "string" + }, + "disable-authentication": { + "description": "Disable authentication (for everything: API, Web-UI, ...).", + "type": "boolean" + }, + "embed-static-files": { + "description": "If all files in `web/frontend/public` should be served from within the binary itself (they are embedded) or not.", + "type": "boolean" + }, + "static-files": { + "description": "Folder where static assets can be found, if embed-static-files is false.", + "type": "string" + }, + "db-driver": { + "description": "sqlite3 or mysql (mysql will work for mariadb as well).", + "type": "string", + "enum": [ + "sqlite3", + "mysql" + ] + }, + "db": { + "description": "For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).", + "type": "string" + }, + "archive": { + "description": "Configuration keys for job-archive", + "type": "object", + "properties": { + "kind": { + "description": "Backend type for job-archive", + "type": "string", + "enum": [ + "file", + "s3" + ] }, - "user": { - "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", - "type": "string" + "path": { + "description": "Path to job archive for file backend", + "type": "string" }, - "group": { - "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", - "type": "string" + "compression": { + "description": "Setup automatic compression for jobs older than number of days", + "type": "integer" }, - "disable-authentication": { - "description": "Disable authentication (for everything: API, Web-UI, ...).", - "type": "boolean" - }, - "embed-static-files": { - "description": "If all files in `web/frontend/public` should be served from within the binary itself (they are embedded) or not.", - "type": "boolean" - }, - "static-files": { - "description": "Folder where static assets can be found, if embed-static-files is false.", - "type": "string" - }, - "db-driver": { - "description": "sqlite3 or mysql (mysql will work for mariadb as well).", - "type": "string", - "enum": [ - "sqlite3", - "mysql" - ] - }, - "db": { - "description": "For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).", - "type": "string" - }, - "job-archive": { - "description": "Configuration keys for job-archive", - "type": "object", - "properties": { - "kind": { - "description": "Backend type for job-archive", - "type": "string", - "enum": [ - "file", - "s3" - ] - }, - "path": { - "description": "Path to job archive for file backend", - "type": "string" - }, - "compression": { - "description": "Setup automatic compression for jobs older than number of days", - "type": "integer" - }, - "retention": { - "description": "Configuration keys for retention", - "type": "object", - "properties": { - "policy": { - "description": "Retention policy", - "type": "string", - "enum": [ - "none", - "delete", - "move" - ] - }, - "includeDB": { - "description": "Also remove jobs from database", - "type": "boolean" - }, - "age": { - "description": "Act on jobs with startTime older than age (in days)", - "type": "integer" - }, - "location": { - "description": "The target directory for retention. Only applicable for retention move.", - "type": "string" - } - }, - "required": [ - "policy" - ] - } + "retention": { + "description": "Configuration keys for retention", + "type": "object", + "properties": { + "policy": { + "description": "Retention policy", + "type": "string", + "enum": [ + "none", + "delete", + "move" + ] }, - "required": [ - "kind" - ] + "includeDB": { + "description": "Also remove jobs from database", + "type": "boolean" + }, + "age": { + "description": "Act on jobs with startTime older than age (in days)", + "type": "integer" + }, + "location": { + "description": "The target directory for retention. Only applicable for retention move.", + "type": "string" + } + }, + "required": [ + "policy" + ] + } + }, + "required": [ + "kind" + ] + }, + "disable-archive": { + "description": "Keep all metric data in the metric data repositories, do not write to the job-archive.", + "type": "boolean" + }, + "validate": { + "description": "Validate all input json documents against json schema.", + "type": "boolean" + }, + "session-max-age": { + "description": "Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire!", + "type": "string" + }, + "https-cert-file": { + "description": "Filepath to SSL certificate. If also https-key-file is set use HTTPS using those certificates.", + "type": "string" + }, + "https-key-file": { + "description": "Filepath to SSL key file. If also https-cert-file is set use HTTPS using those certificates.", + "type": "string" + }, + "redirect-http-to": { + "description": "If not the empty string and addr does not end in :80, redirect every request incoming at port 80 to that url.", + "type": "string" + }, + "stop-jobs-exceeding-walltime": { + "description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.", + "type": "integer" + }, + "short-running-jobs-duration": { + "description": "Do not show running jobs shorter than X seconds.", + "type": "integer" + }, + "emission-constant": { + "description": ".", + "type": "integer" + }, + "cron-frequency": { + "description": "Frequency of cron job workers.", + "type": "object", + "properties": { + "duration-worker": { + "description": "Duration Update Worker [Defaults to '5m']", + "type": "string" }, - "disable-archive": { - "description": "Keep all metric data in the metric data repositories, do not write to the job-archive.", - "type": "boolean" + "footprint-worker": { + "description": "Metric-Footprint Update Worker [Defaults to '10m']", + "type": "string" + } + } + }, + "enable-resampling": { + "description": "Enable dynamic zoom in frontend metric plots.", + "type": "object", + "properties": { + "trigger": { + "description": "Trigger next zoom level at less than this many visible datapoints.", + "type": "integer" }, - "validate": { - "description": "Validate all input json documents against json schema.", - "type": "boolean" - }, - "session-max-age": { - "description": "Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire!", - "type": "string" - }, - "https-cert-file": { - "description": "Filepath to SSL certificate. If also https-key-file is set use HTTPS using those certificates.", - "type": "string" - }, - "https-key-file": { - "description": "Filepath to SSL key file. If also https-cert-file is set use HTTPS using those certificates.", - "type": "string" - }, - "redirect-http-to": { - "description": "If not the empty string and addr does not end in :80, redirect every request incoming at port 80 to that url.", - "type": "string" - }, - "stop-jobs-exceeding-walltime": { - "description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.", + "resolutions": { + "description": "Array of resampling target resolutions, in seconds.", + "type": "array", + "items": { "type": "integer" + } + } + }, + "required": [ + "trigger", + "resolutions" + ] + }, + "jwts": { + "description": "For JWT token authentication.", + "type": "object", + "properties": { + "max-age": { + "description": "Configure how long a token is valid. As string parsable by time.ParseDuration()", + "type": "string" }, - "short-running-jobs-duration": { - "description": "Do not show running jobs shorter than X seconds.", - "type": "integer" + "cookieName": { + "description": "Cookie that should be checked for a JWT token.", + "type": "string" }, - "jwts": { - "description": "For JWT token authentication.", + "validateUser": { + "description": "Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles.", + "type": "boolean" + }, + "trustedIssuer": { + "description": "Issuer that should be accepted when validating external JWTs ", + "type": "string" + }, + "syncUserOnLogin": { + "description": "Add non-existent user to DB at login attempt with values provided in JWT.", + "type": "boolean" + } + }, + "required": [ + "max-age" + ] + }, + "oidc": { + "provider": { + "description": "", + "type": "string" + }, + "syncUserOnLogin": { + "description": "", + "type": "boolean" + }, + "updateUserOnLogin": { + "description": "", + "type": "boolean" + }, + "required": [ + "provider" + ] + }, + "ldap": { + "description": "For LDAP Authentication and user synchronisation.", + "type": "object", + "properties": { + "url": { + "description": "URL of LDAP directory server.", + "type": "string" + }, + "user_base": { + "description": "Base DN of user tree root.", + "type": "string" + }, + "search_dn": { + "description": "DN for authenticating LDAP admin account with general read rights.", + "type": "string" + }, + "user_bind": { + "description": "Expression used to authenticate users via LDAP bind. Must contain uid={username}.", + "type": "string" + }, + "user_filter": { + "description": "Filter to extract users for syncing.", + "type": "string" + }, + "username_attr": { + "description": "Attribute with full username. Default: gecos", + "type": "string" + }, + "sync_interval": { + "description": "Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.", + "type": "string" + }, + "sync_del_old_users": { + "description": "Delete obsolete users in database.", + "type": "boolean" + }, + "syncUserOnLogin": { + "description": "Add non-existent user to DB at login attempt if user exists in Ldap directory", + "type": "boolean" + } + }, + "required": [ + "url", + "user_base", + "search_dn", + "user_bind", + "user_filter" + ] + }, + "clusters": { + "description": "Configuration for the clusters to be displayed.", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "description": "The name of the cluster.", + "type": "string" + }, + "metricDataRepository": { + "description": "Type of the metric data repository for this cluster", "type": "object", "properties": { - "max-age": { - "description": "Configure how long a token is valid. As string parsable by time.ParseDuration()", - "type": "string" - }, - "cookieName": { - "description": "Cookie that should be checked for a JWT token.", - "type": "string" - }, - "validateUser": { - "description": "Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles.", - "type": "boolean" - }, - "trustedIssuer": { - "description": "Issuer that should be accepted when validating external JWTs ", - "type": "string" - }, - "syncUserOnLogin": { - "description": "Add non-existent user to DB at login attempt with values provided in JWT.", - "type": "boolean" - } + "kind": { + "type": "string", + "enum": [ + "influxdb", + "prometheus", + "cc-metric-store", + "test" + ] + }, + "url": { + "type": "string" + }, + "token": { + "type": "string" + } }, "required": [ - "max-age" + "kind", + "url" ] - }, - "ldap": { - "description": "For LDAP Authentication and user synchronisation.", + }, + "filterRanges": { + "description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.", "type": "object", "properties": { - "url": { - "description": "URL of LDAP directory server.", - "type": "string" - }, - "user_base": { - "description": "Base DN of user tree root.", - "type": "string" - }, - "search_dn": { - "description": "DN for authenticating LDAP admin account with general read rights.", - "type": "string" - }, - "user_bind": { - "description": "Expression used to authenticate users via LDAP bind. Must contain uid={username}.", - "type": "string" - }, - "user_filter": { - "description": "Filter to extract users for syncing.", - "type": "string" - }, - "username_attr": { - "description": "Attribute with full username. Default: gecos", - "type": "string" - }, - "sync_interval": { - "description": "Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.", - "type": "string" - }, - "sync_del_old_users": { - "description": "Delete obsolete users in database.", - "type": "boolean" - }, - "syncUserOnLogin": { - "description": "Add non-existent user to DB at login attempt if user exists in Ldap directory", - "type": "boolean" - } - }, - "required": [ - "url", - "user_base", - "search_dn", - "user_bind", - "user_filter" - ] - }, - "clusters": { - "description": "Configuration for the clusters to be displayed.", - "type": "array", - "items": { + "numNodes": { + "description": "UI slider range for number of nodes", "type": "object", "properties": { - "name": { - "description": "The name of the cluster.", - "type": "string" - }, - "metricDataRepository": { - "description": "Type of the metric data repository for this cluster", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "influxdb", - "prometheus", - "cc-metric-store", - "test" - ] - }, - "url": { - "type": "string" - }, - "token": { - "type": "string" - } - }, - "required": [ - "kind", - "url" - ] - }, - "filterRanges": { - "description": "This option controls the slider ranges for the UI controls of numNodes, duration, and startTime.", - "type": "object", - "properties": { - "numNodes": { - "description": "UI slider range for number of nodes", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": [ - "from", - "to" - ] - }, - "duration": { - "description": "UI slider range for duration", - "type": "object", - "properties": { - "from": { - "type": "integer" - }, - "to": { - "type": "integer" - } - }, - "required": [ - "from", - "to" - ] - }, - "startTime": { - "description": "UI slider range for start time", - "type": "object", - "properties": { - "from": { - "type": "string", - "format": "date-time" - }, - "to": { - "type": "null" - } - }, - "required": [ - "from", - "to" - ] - } - }, - "required": [ - "numNodes", - "duration", - "startTime" - ] - } + "from": { + "type": "integer" + }, + "to": { + "type": "integer" + } }, "required": [ - "name", - "metricDataRepository", - "filterRanges" - ], - "minItems": 1 - } - }, - "ui-defaults": { - "description": "Default configuration for web UI", - "type": "object", - "properties": { - "plot_general_colorBackground": { - "description": "Color plot background according to job average threshold limits", - "type": "boolean" - }, - "plot_general_lineWidth": { - "description": "Initial linewidth", + "from", + "to" + ] + }, + "duration": { + "description": "UI slider range for duration", + "type": "object", + "properties": { + "from": { "type": "integer" - }, - "plot_list_jobsPerPage": { - "description": "Jobs shown per page in job lists", + }, + "to": { "type": "integer" + } }, - "plot_view_plotsPerRow": { - "description": "Number of plots per row in single job view", - "type": "integer" + "required": [ + "from", + "to" + ] + }, + "startTime": { + "description": "UI slider range for start time", + "type": "object", + "properties": { + "from": { + "type": "string", + "format": "date-time" + }, + "to": { + "type": "null" + } }, - "plot_view_showPolarplot": { - "description": "Option to toggle polar plot in single job view", - "type": "boolean" - }, - "plot_view_showRoofline": { - "description": "Option to toggle roofline plot in single job view", - "type": "boolean" - }, - "plot_view_showStatTable": { - "description": "Option to toggle the node statistic table in single job view", - "type": "boolean" - }, - "system_view_selectedMetric": { - "description": "Initial metric shown in system view", - "type": "string" - }, - "analysis_view_histogramMetrics": { - "description": "Metrics to show as job count histograms in analysis view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "analysis_view_scatterPlotMetrics": { - "description": "Initial scatter plto configuration in analysis view", - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string", - "minItems": 2, - "maxItems": 2 - }, - "minItems": 1 - } - }, - "job_view_nodestats_selectedMetrics": { - "description": "Initial metrics shown in node statistics table of single job view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "job_view_polarPlotMetrics": { - "description": "Metrics shown in polar plot of single job view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "job_view_selectedMetrics": { - "description": "", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "plot_general_colorscheme": { - "description": "Initial color scheme", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, - "plot_list_selectedMetrics": { - "description": "Initial metric plots shown in jobs lists", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - } + "required": [ + "from", + "to" + ] + } }, "required": [ - "plot_general_colorBackground", - "plot_general_lineWidth", - "plot_list_jobsPerPage", - "plot_view_plotsPerRow", - "plot_view_showPolarplot", - "plot_view_showRoofline", - "plot_view_showStatTable", - "system_view_selectedMetric", - "analysis_view_histogramMetrics", - "analysis_view_scatterPlotMetrics", - "job_view_nodestats_selectedMetrics", - "job_view_polarPlotMetrics", - "job_view_selectedMetrics", - "plot_general_colorscheme", - "plot_list_selectedMetrics" + "numNodes", + "duration", + "startTime" ] + } }, - "enable-resampling": { - "description": "Enable dynamic zoom in frontend metric plots.", - "type": "object", - "properties": { - "trigger": { - "description": "Trigger next zoom level at less than this many visible datapoints.", - "type": "integer" - }, - "resolutions": { - "description": "Array of resampling target resolutions, in seconds.", - "type": "array", - "items": { - "type": "integer" - } - } - }, - "required": [ - "trigger", - "resolutions" - ] - } + "required": [ + "name", + "metricDataRepository", + "filterRanges" + ], + "minItems": 1 + } }, - "required": [ - "jwts", - "clusters" - ] + "ui-defaults": { + "description": "Default configuration for web UI", + "type": "object", + "properties": { + "plot_general_colorBackground": { + "description": "Color plot background according to job average threshold limits", + "type": "boolean" + }, + "plot_general_lineWidth": { + "description": "Initial linewidth", + "type": "integer" + }, + "plot_list_jobsPerPage": { + "description": "Jobs shown per page in job lists", + "type": "integer" + }, + "plot_view_plotsPerRow": { + "description": "Number of plots per row in single job view", + "type": "integer" + }, + "plot_view_showPolarplot": { + "description": "Option to toggle polar plot in single job view", + "type": "boolean" + }, + "plot_view_showRoofline": { + "description": "Option to toggle roofline plot in single job view", + "type": "boolean" + }, + "plot_view_showStatTable": { + "description": "Option to toggle the node statistic table in single job view", + "type": "boolean" + }, + "system_view_selectedMetric": { + "description": "Initial metric shown in system view", + "type": "string" + }, + "job_view_showFootprint": { + "description": "Option to toggle footprint ui in single job view", + "type": "boolean" + }, + "job_list_usePaging": { + "description": "Option to switch from continous scroll to paging", + "type": "boolean" + }, + "analysis_view_histogramMetrics": { + "description": "Metrics to show as job count histograms in analysis view", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + }, + "analysis_view_scatterPlotMetrics": { + "description": "Initial scatter plto configuration in analysis view", + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string", + "minItems": 2, + "maxItems": 2 + }, + "minItems": 1 + } + }, + "job_view_nodestats_selectedMetrics": { + "description": "Initial metrics shown in node statistics table of single job view", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + }, + "job_view_polarPlotMetrics": { + "description": "Metrics shown in polar plot of single job view", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + }, + "job_view_selectedMetrics": { + "description": "", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + }, + "plot_general_colorscheme": { + "description": "Initial color scheme", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + }, + "plot_list_selectedMetrics": { + "description": "Initial metric plots shown in jobs lists", + "type": "array", + "items": { + "type": "string", + "minItems": 1 + } + } + }, + "required": [ + "plot_general_colorBackground", + "plot_general_lineWidth", + "plot_list_jobsPerPage", + "plot_view_plotsPerRow", + "plot_view_showPolarplot", + "plot_view_showRoofline", + "plot_view_showStatTable", + "system_view_selectedMetric", + "analysis_view_histogramMetrics", + "analysis_view_scatterPlotMetrics", + "job_view_nodestats_selectedMetrics", + "job_view_polarPlotMetrics", + "job_view_selectedMetrics", + "plot_general_colorscheme", + "plot_list_selectedMetrics" + ] + }, + }, + "required": [ + "jwts", + "clusters" + ] } From 1aae1c59d020bc7a4255913fa8dbb5723f4ea630 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Dec 2024 07:27:10 +0100 Subject: [PATCH 275/443] Make continous scroll the default --- internal/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/config.go b/internal/config/config.go index 0217d85..1ba49cf 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -32,7 +32,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_showFootprint": true, - "job_list_usePaging": true, + "job_list_usePaging": false, "plot_general_colorBackground": true, "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, "plot_general_lineWidth": 3, From 3ac341517852e979ec2c32a5fc280a98a7148e60 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Dec 2024 07:41:23 +0100 Subject: [PATCH 276/443] Mark new ui options as required --- pkg/schema/schemas/config.schema.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index a9d7c47..e1b7dc8 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -484,6 +484,8 @@ "plot_view_showRoofline", "plot_view_showStatTable", "system_view_selectedMetric", + "job_view_showFootprint", + "job_list_usePaging", "analysis_view_histogramMetrics", "analysis_view_scatterPlotMetrics", "job_view_nodestats_selectedMetrics", @@ -492,7 +494,7 @@ "plot_general_colorscheme", "plot_list_selectedMetrics" ] - }, + } }, "required": [ "jwts", From 9de5879786cc1a9bd66dfd464ee78616172bc75e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 3 Dec 2024 09:01:21 +0100 Subject: [PATCH 277/443] Prepare release v1.4.0 --- .goreleaser.yaml | 4 ++-- ReleaseNotes.md | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 19d29cf..7eedfeb 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -70,7 +70,7 @@ archives: {{- else }}{{ .Arch }}{{ end }} {{- if .Arm }}v{{ .Arm }}{{ end }} checksum: - name_template: 'checksums.txt' + name_template: "checksums.txt" snapshot: name_template: "{{ incpatch .Version }}-next" changelog: @@ -100,7 +100,7 @@ changelog: release: draft: false footer: | - Supports job archive version 1 and database version 6. + Supports job archive version 2 and database version 8. Please check out the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for further details on breaking changes. # vim: set ts=2 sw=2 tw=0 fo=cnqoj diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 547a1f4..35cff69 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,11 +1,37 @@ -# `cc-backend` version 1.3.1 +# `cc-backend` version 1.4.0 -Supports job archive version 1 and database version 7. +Supports job archive version 2 and database version 8. -This is a bugfix release of `cc-backend`, the API backend and frontend +This is a minor release of `cc-backend`, the API backend and frontend implementation of ClusterCockpit. For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). ## Breaking changes -None +- You need to perform a database migration. Depending on your database size the + migration might require several hours! +- You need to adapt the `cluster.json` configuration files in the job-archive, + add new required attributes to the metric list and after that edit + `./job-archive/version.txt` to version 2. +- Continuous scrolling is default now in all job lists. You can change this back + to paging globally, also every user can configure to use paging or continuous + scrolling individually. +- Tags have a scope now. Existing tags will get global scope in the database + migration. + +## New features + +- Tags have a scope now. Tags created by a basic user are only visible by that + user. Tags created by an admin/support role can be configured to be visible by + all users (global scope) or only be admin/support role. +- Re-sampling support for running (requires a recent `cc-metric-store`) and + archived jobs. This greatly speeds up loading of large or very long jobs. You + need to add the new configuration key `enable-resampling` to the `config.json` + file. +- For finished jobs a total job energy is shown in the job view. +- Continuous scrolling in job lists is default now. +- All database queries (especially for sqlite) were optimized resulting in + dramatically faster load times. +- A performance and energy footprint can be freely configured on a per + subcluster base. One can filter for footprint statistics for running and + finished jobs. From 763c9dfa6b79559cc78f44a9967f5a3b53a6bae9 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Dec 2024 15:22:34 +0100 Subject: [PATCH 278/443] fix schema definition of apiAllowedIPs --- pkg/schema/schemas/config.schema.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index e1b7dc8..c04dd74 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -10,7 +10,10 @@ }, "apiAllowedIPs": { "description": "Addresses from which secured API endpoints can be reached", - "type": "string" + "type": "array", + "items": { + "type": "string" + } }, "user": { "description": "Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.", From 4fecbe820d0e6491598df42156373e8935adeb3b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Dec 2024 17:11:32 +0100 Subject: [PATCH 279/443] change order to match docs --- pkg/schema/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 0a30d86..f9116cf 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -164,13 +164,13 @@ type ProgramConfig struct { // Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views. ShortRunningJobsDuration int `json:"short-running-jobs-duration"` - // Array of Clusters - Clusters []*ClusterConfig `json:"clusters"` - // Energy Mix CO2 Emission Constant [g/kWh] // If entered, displays estimated CO2 emission for job based on jobs totalEnergy EmissionConstant int `json:"emission-constant"` // Frequency of cron job workers CronFrequency *CronFrequency `json:"cron-frequency"` + + // Array of Clusters + Clusters []*ClusterConfig `json:"clusters"` } From b0c0d1550543d373c527dabb89e60876cef93e81 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Dec 2024 10:55:29 +0100 Subject: [PATCH 280/443] fix stat filter url write --- web/frontend/src/generic/Filters.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index 312135a..481211b 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -219,7 +219,7 @@ opts.push(`projectMatch=${filters.projectMatch}`); if (filters.stats.length != 0) for (let stat of filters.stats) { - opts.push(`stat=${stat?.field ? stat.field : stat.metricName}-${stat?.from ? stat.from : stat.range.from}-${stat?.to ? stat.to : stat.range.to}`); + opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`); } if (opts.length == 0 && window.location.search.length <= 1) return; @@ -390,7 +390,7 @@ {#if filters.stats.length > 0} (isStatsOpen = true)}> {filters.stats - .map((stat) => `${stat?.text ? stat.text : stat.field}: ${stat?.from ? stat.from : stat.range.from} - ${stat?.to ? stat.to : stat.range.to}`) + .map((stat) => `${stat.field}: ${stat.from} - ${stat.to}`) .join(", ")} {/if} From ab07c7928f4b640fb9f751b51e472d74b2132654 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Dec 2024 13:56:00 +0100 Subject: [PATCH 281/443] fix: fix footprint logic, do not scale thresholds on multi node jobs --- .../src/generic/helper/JobFootprint.svelte | 58 +++++++++++++------ web/frontend/src/job/JobSummary.svelte | 58 +++++++++++++------ 2 files changed, 78 insertions(+), 38 deletions(-) diff --git a/web/frontend/src/generic/helper/JobFootprint.svelte b/web/frontend/src/generic/helper/JobFootprint.svelte index b6087a7..dd56a95 100644 --- a/web/frontend/src/generic/helper/JobFootprint.svelte +++ b/web/frontend/src/generic/helper/JobFootprint.svelte @@ -23,26 +23,46 @@ alert: metricConfig.alert }; - // Job_Exclusivity does not matter, only aggregation - if (metricConfig.aggregation === "avg") { - return defaultThresholds; - } else if (metricConfig.aggregation === "sum") { + /* + NEW: Footprints should be comparable: Always use Unchanged Single Node Thresholds, except for shared jobs. + HW Clocks, HW Temperatures and File/Net IO Thresholds will be scaled down too, even if they are independent. + 'jf.stats' is one of: avg, min, max -> Always relative to one nodes' thresholds as configured. + */ + if (job.exclusive === 1) { + return defaultThresholds + } else { const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster) const jobFraction = job.numHWThreads / topol.node.length; - return { peak: round(defaultThresholds.peak * jobFraction, 0), normal: round(defaultThresholds.normal * jobFraction, 0), caution: round(defaultThresholds.caution * jobFraction, 0), alert: round(defaultThresholds.alert * jobFraction, 0), }; - } else { - console.warn( - "Missing or unkown aggregation mode (sum/avg) for metric:", - metricConfig, - ); - return defaultThresholds; } + + /* OLD: Based on Metric Aggregation Setting + // Job_Exclusivity does not matter, only aggregation + if (metricConfig.aggregation === "avg") { + return defaultThresholds; + } else if (metricConfig.aggregation === "sum") { + const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster) + const jobFraction = job.numHWThreads / topol.node.length; + + return { + peak: round(defaultThresholds.peak * jobFraction, 0), + normal: round(defaultThresholds.normal * jobFraction, 0), + caution: round(defaultThresholds.caution * jobFraction, 0), + alert: round(defaultThresholds.alert * jobFraction, 0), + }; + } else { + console.warn( + "Missing or unkown aggregation mode (sum/avg) for metric:", + metricConfig, + ); + return defaultThresholds; + } + */ } @@ -136,25 +156,25 @@ return a.impact - b.impact || ((a.name > b.name) ? 1 : ((b.name > a.name) ? -1 : 0)); });; - function evalFootprint(mean, thresholds, lowerIsBetter, level) { + function evalFootprint(value, thresholds, lowerIsBetter, level) { // Handle Metrics in which less value is better switch (level) { case "peak": if (lowerIsBetter) return false; // metric over peak -> return false to trigger impact -1 - else return mean <= thresholds.peak && mean > thresholds.normal; + else return value <= thresholds.peak && value > thresholds.normal; case "alert": if (lowerIsBetter) - return mean <= thresholds.peak && mean >= thresholds.alert; - else return mean <= thresholds.alert && mean >= 0; + return value <= thresholds.peak && value >= thresholds.alert; + else return value <= thresholds.alert && value >= 0; case "caution": if (lowerIsBetter) - return mean < thresholds.alert && mean >= thresholds.caution; - else return mean <= thresholds.caution && mean > thresholds.alert; + return value < thresholds.alert && value >= thresholds.caution; + else return value <= thresholds.caution && value > thresholds.alert; case "normal": if (lowerIsBetter) - return mean < thresholds.caution && mean >= 0; - else return mean <= thresholds.normal && mean > thresholds.caution; + return value < thresholds.caution && value >= 0; + else return value <= thresholds.normal && value > thresholds.caution; default: return false; } diff --git a/web/frontend/src/job/JobSummary.svelte b/web/frontend/src/job/JobSummary.svelte index f2295f5..303782f 100644 --- a/web/frontend/src/job/JobSummary.svelte +++ b/web/frontend/src/job/JobSummary.svelte @@ -23,26 +23,46 @@ alert: metricConfig.alert }; - // Job_Exclusivity does not matter, only aggregation - if (metricConfig.aggregation === "avg") { - return defaultThresholds; - } else if (metricConfig.aggregation === "sum") { + /* + NEW: Footprints should be comparable: Always use Unchanged Single Node Thresholds, except for shared jobs. + HW Clocks, HW Temperatures and File/Net IO Thresholds will be scaled down too, even if they are independent. + 'jf.stats' is one of: avg, min, max -> Always relative to one nodes' thresholds as configured. + */ + if (job.exclusive === 1) { + return defaultThresholds + } else { const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster) const jobFraction = job.numHWThreads / topol.node.length; - return { peak: round(defaultThresholds.peak * jobFraction, 0), normal: round(defaultThresholds.normal * jobFraction, 0), caution: round(defaultThresholds.caution * jobFraction, 0), alert: round(defaultThresholds.alert * jobFraction, 0), }; - } else { - console.warn( - "Missing or unkown aggregation mode (sum/avg) for metric:", - metricConfig, - ); - return defaultThresholds; } + + /* OLD: Based on Metric Aggregation Setting + // Job_Exclusivity does not matter, only aggregation + if (metricConfig.aggregation === "avg") { + return defaultThresholds; + } else if (metricConfig.aggregation === "sum") { + const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster) + const jobFraction = job.numHWThreads / topol.node.length; + + return { + peak: round(defaultThresholds.peak * jobFraction, 0), + normal: round(defaultThresholds.normal * jobFraction, 0), + caution: round(defaultThresholds.caution * jobFraction, 0), + alert: round(defaultThresholds.alert * jobFraction, 0), + }; + } else { + console.warn( + "Missing or unkown aggregation mode (sum/avg) for metric:", + metricConfig, + ); + return defaultThresholds; + } + */ } @@ -142,25 +162,25 @@ return a.impact - b.impact || ((a.name > b.name) ? 1 : ((b.name > a.name) ? -1 : 0)); });; - function evalFootprint(mean, thresholds, lowerIsBetter, level) { + function evalFootprint(value, thresholds, lowerIsBetter, level) { // Handle Metrics in which less value is better switch (level) { case "peak": if (lowerIsBetter) return false; // metric over peak -> return false to trigger impact -1 - else return mean <= thresholds.peak && mean > thresholds.normal; + else return value <= thresholds.peak && value > thresholds.normal; case "alert": if (lowerIsBetter) - return mean <= thresholds.peak && mean >= thresholds.alert; - else return mean <= thresholds.alert && mean >= 0; + return value <= thresholds.peak && value >= thresholds.alert; + else return value <= thresholds.alert && value >= 0; case "caution": if (lowerIsBetter) - return mean < thresholds.alert && mean >= thresholds.caution; - else return mean <= thresholds.caution && mean > thresholds.alert; + return value < thresholds.alert && value >= thresholds.caution; + else return value <= thresholds.caution && value > thresholds.alert; case "normal": if (lowerIsBetter) - return mean < thresholds.caution && mean >= 0; - else return mean <= thresholds.normal && mean > thresholds.caution; + return value < thresholds.caution && value >= 0; + else return value <= thresholds.normal && value > thresholds.caution; default: return false; } From a7395ed45bb3ea623550be4bbebf221757eb8cc3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Dec 2024 13:57:05 +0100 Subject: [PATCH 282/443] remove config for polarPlotMetrics --- internal/config/config.go | 1 - pkg/schema/schemas/config.schema.json | 9 --------- web/frontend/src/Job.root.svelte | 3 --- 3 files changed, 13 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 1ba49cf..4f1a8c3 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -29,7 +29,6 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ "analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}}, "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, - "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_showFootprint": true, "job_list_usePaging": false, diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index c04dd74..0a3905a 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -445,14 +445,6 @@ "minItems": 1 } }, - "job_view_polarPlotMetrics": { - "description": "Metrics shown in polar plot of single job view", - "type": "array", - "items": { - "type": "string", - "minItems": 1 - } - }, "job_view_selectedMetrics": { "description": "", "type": "array", @@ -492,7 +484,6 @@ "analysis_view_histogramMetrics", "analysis_view_scatterPlotMetrics", "job_view_nodestats_selectedMetrics", - "job_view_polarPlotMetrics", "job_view_selectedMetrics", "plot_general_colorscheme", "plot_list_selectedMetrics" diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index bb48479..ad9a0c7 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -139,9 +139,6 @@ return names; }, []) ), - ...(ccconfig[`job_view_polarPlotMetrics:${job.cluster}`] || - ccconfig[`job_view_polarPlotMetrics`] - ), ...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || ccconfig[`job_view_nodestats_selectedMetrics`] ), From 3b769c30596bf8298960a982659c918cbef0f15a Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Wed, 4 Dec 2024 14:19:56 +0100 Subject: [PATCH 283/443] fix: Update to resampler handling different resolutions --- pkg/resampler/resampler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go index 26cead0..192e56f 100644 --- a/pkg/resampler/resampler.go +++ b/pkg/resampler/resampler.go @@ -9,7 +9,7 @@ import ( ) func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, error) { - if old_frequency == 0 || new_frequency == 0 { + if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency { return nil, errors.New("either old or new frequency is set to 0") } @@ -37,7 +37,7 @@ func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int // Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) { - if old_frequency == 0 || new_frequency == 0 { + if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency { return data, old_frequency, nil } From 257250714d9a4835eca5316d5716a843a53a7527 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Dec 2024 15:22:19 +0100 Subject: [PATCH 284/443] review polar plot component, adds min dataset --- web/frontend/src/generic/plots/Polar.svelte | 52 +++++++++++++++------ 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/web/frontend/src/generic/plots/Polar.svelte b/web/frontend/src/generic/plots/Polar.svelte index b55e4f0..7e6957c 100644 --- a/web/frontend/src/generic/plots/Polar.svelte +++ b/web/frontend/src/generic/plots/Polar.svelte @@ -45,7 +45,7 @@ if (footprintData) { return footprintData.filter(fpd => { if (!jobMetrics.find(m => m.name == fpd.name && m.scope == "node" || fpd.impact == 4)) { - console.warn(`PolarPlot: No metric data (or config) for '${fpd.name}'`) + console.warn(`PolarPlot: No metric data for '${fpd.name}'`) return false } return true @@ -72,6 +72,7 @@ const getMetricConfig = getContext("getMetricConfig"); const getValuesForStatGeneric = (getStat) => labels.map(name => { + // TODO: Requires Scaling if Shared Job const peak = getMetricConfig(cluster, subCluster, name).peak const metric = jobMetrics.find(m => m.name == name && m.scope == "node") const value = getStat(metric.metric) / peak @@ -79,6 +80,7 @@ }) const getValuesForStatFootprint = (getStat) => labels.map(name => { + // FootprintData 'Peak' is pre-scaled for Shared Jobs in JobSummary Component const peak = footprintData.find(fpd => fpd.name === name).peak const metric = jobMetrics.find(m => m.name == name && m.scope == "node") const value = getStat(metric.metric) / peak @@ -86,14 +88,21 @@ }) function getMax(metric) { - let max = 0 + let max = metric.series[0].statistics.max; for (let series of metric.series) max = Math.max(max, series.statistics.max) return max } + function getMin(metric) { + let min = metric.series[0].statistics.min; + for (let series of metric.series) + min = Math.min(min, series.statistics.min) + return min + } + function getAvg(metric) { - let avg = 0 + let avg = 0; for (let series of metric.series) avg += series.statistics.avg return avg / metric.series.length @@ -104,6 +113,8 @@ return getValuesForStatGeneric(getAvg) } else if (type === 'max') { return getValuesForStatGeneric(getMax) + } else if (type === 'min') { + return getValuesForStatGeneric(getMin) } console.log('Unknown Type For Polar Data') return [] @@ -114,6 +125,8 @@ return getValuesForStatFootprint(getAvg) } else if (type === 'max') { return getValuesForStatFootprint(getMax) + } else if (type === 'min') { + return getValuesForStatFootprint(getMin) } console.log('Unknown Type For Polar Data') return [] @@ -124,25 +137,36 @@ datasets: [ { label: 'Max', - data: footprintData ? loadDataForFootprint('max') : loadDataGeneric('max'), // + data: footprintData ? loadDataForFootprint('max') : loadDataGeneric('max'), // Node Scope Only fill: 1, - backgroundColor: 'rgba(0, 102, 255, 0.25)', - borderColor: 'rgb(0, 102, 255)', - pointBackgroundColor: 'rgb(0, 102, 255)', + backgroundColor: 'rgba(0, 0, 255, 0.25)', + borderColor: 'rgb(0, 0, 255)', + pointBackgroundColor: 'rgb(0, 0, 255)', pointBorderColor: '#fff', pointHoverBackgroundColor: '#fff', - pointHoverBorderColor: 'rgb(0, 102, 255)' + pointHoverBorderColor: 'rgb(0, 0, 255)' }, { label: 'Avg', - data: footprintData ? loadDataForFootprint('avg') : loadDataGeneric('avg'), // getValuesForStat(getAvg) - fill: true, - backgroundColor: 'rgba(255, 153, 0, 0.25)', - borderColor: 'rgb(255, 153, 0)', - pointBackgroundColor: 'rgb(255, 153, 0)', + data: footprintData ? loadDataForFootprint('avg') : loadDataGeneric('avg'), // Node Scope Only + fill: 2, + backgroundColor: 'rgba(255, 210, 0, 0.25)', + borderColor: 'rgb(255, 210, 0)', + pointBackgroundColor: 'rgb(255, 210, 0)', pointBorderColor: '#fff', pointHoverBackgroundColor: '#fff', - pointHoverBorderColor: 'rgb(255, 153, 0)' + pointHoverBorderColor: 'rgb(255, 210, 0)' + }, + { + label: 'Min', + data: footprintData ? loadDataForFootprint('min') : loadDataGeneric('min'), // Node Scope Only + fill: true, + backgroundColor: 'rgba(255, 0, 0, 0.25)', + borderColor: 'rgb(255, 0, 0)', + pointBackgroundColor: 'rgb(255, 0, 0)', + pointBorderColor: '#fff', + pointHoverBackgroundColor: '#fff', + pointHoverBorderColor: 'rgb(255, 0, 0)' } ] } From 01c06728ebf6c4dc1957251dd1fc59d6b25ca79f Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 4 Dec 2024 16:09:06 +0100 Subject: [PATCH 285/443] review footprint iconography and messages --- .../src/generic/helper/JobFootprint.svelte | 20 +++++++++++-------- web/frontend/src/job/JobSummary.svelte | 18 ++++++++++------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/generic/helper/JobFootprint.svelte b/web/frontend/src/generic/helper/JobFootprint.svelte index dd56a95..187eff9 100644 --- a/web/frontend/src/generic/helper/JobFootprint.svelte +++ b/web/frontend/src/generic/helper/JobFootprint.svelte @@ -109,21 +109,21 @@ return { ...fmBase, color: "danger", - message: `Metric average way ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + message: `Footprint value way ${fmc.lowerIsBetter ? "above" : "below"} expected normal threshold.`, impact: 3 }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "caution")) { return { ...fmBase, color: "warning", - message: `Metric average ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + message: `Footprint value ${fmc.lowerIsBetter ? "above" : "below"} expected normal threshold.`, impact: 2, }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "normal")) { return { ...fmBase, color: "success", - message: "Metric average within expected thresholds.", + message: "Footprint value within expected thresholds.", impact: 1, }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "peak")) { @@ -131,7 +131,7 @@ ...fmBase, color: "info", message: - "Metric average above expected normal thresholds: Check for artifacts recommended.", + "Footprint value above expected normal threshold: Check for artifacts recommended.", impact: 0, }; } else { @@ -139,7 +139,7 @@ ...fmBase, color: "secondary", message: - "Metric average above expected peak threshold: Check for artifacts!", + "Footprint value above expected peak threshold: Check for artifacts!", impact: -1, }; } @@ -201,10 +201,14 @@ >
- {#if fpd.impact === 3 || fpd.impact === -1} - + {#if fpd.impact === 3} + {:else if fpd.impact === 2} + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + {/if} {#if fpd.impact === 3} @@ -214,7 +218,7 @@ {:else if fpd.impact === 1} {:else if fpd.impact === 0} - + {:else if fpd.impact === -1} {/if} diff --git a/web/frontend/src/job/JobSummary.svelte b/web/frontend/src/job/JobSummary.svelte index 303782f..f7772c7 100644 --- a/web/frontend/src/job/JobSummary.svelte +++ b/web/frontend/src/job/JobSummary.svelte @@ -114,21 +114,21 @@ return { ...fmBase, color: "danger", - message: `Metric average way ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + message: `Footprint value way ${fmc.lowerIsBetter ? "above" : "below"} expected normal threshold.`, impact: 3 }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "caution")) { return { ...fmBase, color: "warning", - message: `Metric average ${fmc.lowerIsBetter ? "above" : "below"} expected normal thresholds.`, + message: `Footprint value ${fmc.lowerIsBetter ? "above" : "below"} expected normal threshold.`, impact: 2, }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "normal")) { return { ...fmBase, color: "success", - message: "Metric average within expected thresholds.", + message: "Footprint value within expected thresholds.", impact: 1, }; } else if (evalFootprint(jf.value, fmt, fmc.lowerIsBetter, "peak")) { @@ -136,7 +136,7 @@ ...fmBase, color: "info", message: - "Metric average above expected normal thresholds: Check for artifacts recommended.", + "Footprint value above expected normal threshold: Check for artifacts recommended.", impact: 0, }; } else { @@ -144,7 +144,7 @@ ...fmBase, color: "secondary", message: - "Metric average above expected peak threshold: Check for artifacts!", + "Footprint value above expected peak threshold: Check for artifacts!", impact: -1, }; } @@ -264,10 +264,14 @@ id={`footprint-${job.jobId}-${index}`} >
- {#if fpd.impact === 3 || fpd.impact === -1} + {#if fpd.impact === 3} {:else if fpd.impact === 2} + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + {/if} {#if fpd.impact === 3} @@ -276,7 +280,7 @@ {:else if fpd.impact === 1} {:else if fpd.impact === 0} - + {:else if fpd.impact === -1} {/if} From 85dc0362c12c0ebe5c8304c1e1b40e85daf3cb6a Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Wed, 4 Dec 2024 17:54:54 +0100 Subject: [PATCH 286/443] fix: SimpleResampler fixed --- pkg/resampler/resampler.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go index 192e56f..ebc7e88 100644 --- a/pkg/resampler/resampler.go +++ b/pkg/resampler/resampler.go @@ -8,20 +8,20 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/schema" ) -func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, error) { +func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, int64, error) { if old_frequency == 0 || new_frequency == 0 || new_frequency <= old_frequency { - return nil, errors.New("either old or new frequency is set to 0") + return data, old_frequency, nil } if new_frequency%old_frequency != 0 { - return nil, errors.New("new sampling frequency should be multiple of the old frequency") + return nil, 0, errors.New("new sampling frequency should be multiple of the old frequency") } var step int = int(new_frequency / old_frequency) var new_data_length = len(data) / step if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { - return data, nil + return data, old_frequency, nil } new_data := make([]schema.Float, new_data_length) @@ -30,7 +30,7 @@ func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int new_data[i] = data[i*step] } - return new_data, nil + return new_data, new_frequency, nil } // Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf From 49a94170d2adba4af74d9abcba8d2b7ce035a885 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Dec 2024 07:49:52 +0100 Subject: [PATCH 287/443] Add Fixme note for Energy calculation --- internal/repository/job.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/repository/job.go b/internal/repository/job.go index 592997e..5f73bad 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -604,9 +604,10 @@ func (r *JobRepository) UpdateEnergy( for _, fp := range sc.EnergyFootprint { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh - // Energy: Power (in Watts) * Time (in Seconds) - if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh) + // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) + // Energy: Power (in Watts) * Time (in Seconds) // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits energy = math.Round(((LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 } From 010c903c746da719bed544f4085c5a792eb893d0 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Dec 2024 08:35:10 +0100 Subject: [PATCH 288/443] Add known issues section to release notes --- ReleaseNotes.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 35cff69..cb8e2db 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -35,3 +35,11 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus - A performance and energy footprint can be freely configured on a per subcluster base. One can filter for footprint statistics for running and finished jobs. + +## Known issues + +- Currently energy footprint metrics of type energy are ignored for calculating + total energy. +- Resampling for running jobs only works with cc-metric-store +- With energy footprint metrics of type power the unit is ignored and it is + assumed the metric has the unit Watt. From 0bbedd160073f569bf51d7c8bed5df97bf28ee3e Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 5 Dec 2024 10:41:54 +0100 Subject: [PATCH 289/443] Remove obsolete archive migration from build list --- .goreleaser.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 7eedfeb..3edcb7d 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -34,19 +34,6 @@ builds: main: ./tools/archive-manager tags: - static_build - - env: - - CGO_ENABLED=0 - goos: - - linux - goarch: - - amd64 - goamd64: - - v3 - id: "archive-migration" - binary: archive-migration - main: ./tools/archive-migration - tags: - - static_build - env: - CGO_ENABLED=0 goos: From 9248ee88683e47cce0884a739b879e83d04ec3da Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 9 Dec 2024 11:06:12 +0100 Subject: [PATCH 290/443] fix: fix renamed column reference in searchbar workflow --- cmd/cc-backend/cli.go | 2 +- internal/repository/job.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/cc-backend/cli.go b/cmd/cc-backend/cli.go index f828a24..8bc6681 100644 --- a/cmd/cc-backend/cli.go +++ b/cmd/cc-backend/cli.go @@ -14,7 +14,7 @@ var ( func cliInit() { flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env") flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") - flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap") + flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap") flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling") flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)") flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI") diff --git a/internal/repository/job.go b/internal/repository/job.go index 5f73bad..cc44ca9 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -308,17 +308,17 @@ func (r *JobRepository) FindUserOrProjectOrJobname(user *schema.User, searchterm return searchterm, "", "", "" } else { // Has to have letters and logged-in user for other guesses if user != nil { - // Find username in jobs (match) - uresult, _ := r.FindColumnValue(user, searchterm, "job", "user", "user", false) + // Find username by username in job table (match) + uresult, _ := r.FindColumnValue(user, searchterm, "job", "hpc_user", "hpc_user", false) if uresult != "" { return "", uresult, "", "" } - // Find username by name (like) + // Find username by real name in hpc_user table (like) nresult, _ := r.FindColumnValue(user, searchterm, "hpc_user", "username", "name", true) if nresult != "" { return "", nresult, "", "" } - // Find projectId in jobs (match) + // Find projectId by projectId in job table (match) presult, _ := r.FindColumnValue(user, searchterm, "job", "project", "project", false) if presult != "" { return "", "", presult, "" From 484992828875d13b14375b1bdf4dc92773486ef7 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 10 Dec 2024 16:35:43 +0100 Subject: [PATCH 291/443] Rename old column name for user Fixes #314 --- internal/repository/user.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/user.go b/internal/repository/user.go index 9b7e94e..9beca26 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -73,7 +73,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) { func (r *UserRepository) GetLdapUsernames() ([]string, error) { var users []string - rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE user.ldap = 1`) + rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`) if err != nil { log.Warn("Error while querying usernames") return nil, err From 212c45e07056d5ee9c4278cd8ddc100f60dbfbf6 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Tue, 10 Dec 2024 16:45:05 +0100 Subject: [PATCH 292/443] Prepare bug fix release 1.4.1 --- Makefile | 2 +- ReleaseNotes.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index b673e79..48da4e0 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ TARGET = ./cc-backend VAR = ./var CFG = config.json .env FRONTEND = ./web/frontend -VERSION = 1.4.0 +VERSION = 1.4.1 GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' diff --git a/ReleaseNotes.md b/ReleaseNotes.md index cb8e2db..bb25b5d 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,8 +1,8 @@ -# `cc-backend` version 1.4.0 +# `cc-backend` version 1.4.1 Supports job archive version 2 and database version 8. -This is a minor release of `cc-backend`, the API backend and frontend +This is a small bug fix release of `cc-backend`, the API backend and frontend implementation of ClusterCockpit. For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). From ee2c5b58d78a1334e36a7351de9709e9f126271a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 17 Dec 2024 15:14:24 +0100 Subject: [PATCH 293/443] fix: add missing sorting parameter to REST API call and test --- internal/api/rest.go | 2 +- internal/repository/repository_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index db747ce..41b8d5a 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -341,7 +341,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { withMetadata := false filter := &model.JobFilter{} page := &model.PageRequest{ItemsPerPage: 25, Page: 1} - order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc} + order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc} for key, vals := range r.URL.Query() { switch key { diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 6d1fbfc..1ca9ec5 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -111,7 +111,7 @@ func BenchmarkDB_QueryJobs(b *testing.B) { user := "mppi133h" filter.User = &model.StringInput{Eq: &user} page := &model.PageRequest{ItemsPerPage: 50, Page: 1} - order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc} + order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc} b.Run("QueryJobs", func(b *testing.B) { db := setup(b) From fcbfa451f2f3c46078639276ec729abd95d572a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 06:22:10 +0000 Subject: [PATCH 294/443] Bump golang.org/x/crypto from 0.29.0 to 0.31.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.29.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.29.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 27a703c..681cc98 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/swaggo/http-swagger v1.3.4 github.com/swaggo/swag v1.16.4 github.com/vektah/gqlparser/v2 v2.5.20 - golang.org/x/crypto v0.29.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240707233637-46b078467d37 golang.org/x/oauth2 v0.21.0 ) @@ -79,9 +79,9 @@ require ( go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.31.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.27.0 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 6506264..be6272e 100644 --- a/go.sum +++ b/go.sum @@ -238,8 +238,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -262,8 +262,8 @@ golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -273,8 +273,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -287,8 +287,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= From bc890259246057947bf2586f037801b8828c1077 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 18 Dec 2024 11:45:56 +0100 Subject: [PATCH 295/443] Revert to blocking startJob REST api Fixes #316 --- cmd/cc-backend/server.go | 4 -- internal/api/api_test.go | 3 - internal/api/rest.go | 46 +++++++++------ internal/repository/dbConnection.go | 2 - internal/repository/jobStartWorker.go | 83 --------------------------- 5 files changed, 28 insertions(+), 110 deletions(-) delete mode 100644 internal/repository/jobStartWorker.go diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 083b9e5..0770e81 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -25,7 +25,6 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" - "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/routerConfig" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" @@ -314,9 +313,6 @@ func serverShutdown() { // First shut down the server gracefully (waiting for all ongoing requests) server.Shutdown(context.Background()) - // Then, wait for any async jobStarts still pending... - repository.WaitForJobStart() - // Then, wait for any async archivings still pending... archiver.WaitForArchiving() } diff --git a/internal/api/api_test.go b/internal/api/api_test.go index bcabd5f..c47bd4d 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -249,9 +249,6 @@ func TestRestApi(t *testing.T) { if response.StatusCode != http.StatusCreated { t.Fatal(response.Status, recorder.Body.String()) } - - time.Sleep(1 * time.Second) - resolver := graph.GetResolverInstance() job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime) if err != nil { diff --git a/internal/api/rest.go b/internal/api/rest.go index 41b8d5a..4e52701 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -123,18 +123,8 @@ func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { } } -// StartJobApiResponse model -type StartJobApiResponse struct { - Message string `json:"msg"` -} - -// DeleteJobApiResponse model -type DeleteJobApiResponse struct { - Message string `json:"msg"` -} - -// UpdateUserApiResponse model -type UpdateUserApiResponse struct { +// DefaultApiResponse model +type DefaultJobApiResponse struct { Message string `json:"msg"` } @@ -790,6 +780,11 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } + // aquire lock to avoid race condition between API calls + var unlockOnce sync.Once + api.RepositoryMutex.Lock() + defer unlockOnce.Do(api.RepositoryMutex.Unlock) + // Check if combination of (job_id, cluster_id, start_time) already exists: jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil) if err != nil && err != sql.ErrNoRows { @@ -804,12 +799,27 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { } } - repository.TriggerJobStart(repository.JobWithUser{Job: &req, User: repository.GetUserFromContext(r.Context())}) + id, err := api.JobRepository.Start(&req) + if err != nil { + handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw) + return + } + // unlock here, adding Tags can be async + unlockOnce.Do(api.RepositoryMutex.Unlock) + for _, tag := range req.Tags { + if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) + return + } + } + + log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusCreated) - json.NewEncoder(rw).Encode(StartJobApiResponse{ - Message: fmt.Sprintf("Successfully triggered job start"), + json.NewEncoder(rw).Encode(DefaultJobApiResponse{ + Message: "success", }) } @@ -892,7 +902,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { } rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) - json.NewEncoder(rw).Encode(DeleteJobApiResponse{ + json.NewEncoder(rw).Encode(DefaultJobApiResponse{ Message: fmt.Sprintf("Successfully deleted job %s", id), }) } @@ -943,7 +953,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) - json.NewEncoder(rw).Encode(DeleteJobApiResponse{ + json.NewEncoder(rw).Encode(DefaultJobApiResponse{ Message: fmt.Sprintf("Successfully deleted job %d", job.ID), }) } @@ -987,7 +997,7 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) - json.NewEncoder(rw).Encode(DeleteJobApiResponse{ + json.NewEncoder(rw).Encode(DefaultJobApiResponse{ Message: fmt.Sprintf("Successfully deleted %d jobs", cnt), }) } diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go index d062052..418eef9 100644 --- a/internal/repository/dbConnection.go +++ b/internal/repository/dbConnection.go @@ -82,8 +82,6 @@ func Connect(driver string, db string) { if err != nil { log.Fatal(err) } - - startJobStartWorker() }) } diff --git a/internal/repository/jobStartWorker.go b/internal/repository/jobStartWorker.go deleted file mode 100644 index 18d2be7..0000000 --- a/internal/repository/jobStartWorker.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. -// All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. -package repository - -import ( - "sync" - "time" - - "github.com/ClusterCockpit/cc-backend/pkg/log" - "github.com/ClusterCockpit/cc-backend/pkg/schema" -) - -type JobWithUser struct { - Job *schema.JobMeta - User *schema.User -} - -var ( - jobStartPending sync.WaitGroup - jobStartChannel chan JobWithUser -) - -func startJobStartWorker() { - jobStartChannel = make(chan JobWithUser, 128) - - go jobStartWorker() -} - -// Archiving worker thread -func jobStartWorker() { - for { - select { - case req, ok := <-jobStartChannel: - if !ok { - break - } - jobRepo := GetJobRepository() - var id int64 - - for i := 0; i < 5; i++ { - var err error - - id, err = jobRepo.Start(req.Job) - if err != nil { - log.Errorf("Attempt %d: insert into database failed: %v", i, err) - } else { - break - } - time.Sleep(1 * time.Second) - } - - for _, tag := range req.Job.Tags { - if _, err := jobRepo.AddTagOrCreate(req.User, id, - tag.Type, tag.Name, tag.Scope); err != nil { - log.Errorf("adding tag to new job %d failed: %v", id, err) - } - } - - log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", - id, req.Job.Cluster, req.Job.JobID, req.Job.User, req.Job.StartTime) - - jobStartPending.Done() - } - } -} - -// Trigger async archiving -func TriggerJobStart(req JobWithUser) { - if jobStartChannel == nil { - log.Fatal("Cannot start Job without jobStart channel. Did you Start the worker?") - } - - jobStartPending.Add(1) - jobStartChannel <- req -} - -// Wait for background thread to finish pending archiving operations -func WaitForJobStart() { - // close channel and wait for worker to process remaining jobs - jobStartPending.Wait() -} From 7b91a819be25cf95562075916e8fad433e493f7c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 18 Dec 2024 16:40:49 +0100 Subject: [PATCH 296/443] add workaround for clipboard button --- .../src/generic/joblist/JobInfo.svelte | 43 ++++++++++++++----- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/generic/joblist/JobInfo.svelte b/web/frontend/src/generic/joblist/JobInfo.svelte index adacd4f..8917653 100644 --- a/web/frontend/src/generic/joblist/JobInfo.svelte +++ b/web/frontend/src/generic/joblist/JobInfo.svelte @@ -7,7 +7,7 @@ --> @@ -58,13 +76,18 @@ {job.jobId} ({job.cluster}) - + + { displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' } + {#if job.metaData?.jobName} {#if job.metaData?.jobName.length <= 25} From 0bdbcb8bab96ccaa39365307e66e8f71adf7f871 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 19 Dec 2024 05:55:31 +0100 Subject: [PATCH 297/443] Use persisted duration for running jobs Fixes #318 --- cmd/cc-backend/main.go | 2 +- internal/graph/schema.resolvers.go | 5 +---- internal/repository/job.go | 8 +------- internal/repository/jobQuery.go | 3 +-- internal/routerConfig/routes.go | 1 + 5 files changed, 5 insertions(+), 14 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 436379d..33bab07 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -112,7 +112,7 @@ func main() { if flagInit { initEnv() - fmt.Print("Succesfully setup environment!\n") + fmt.Print("Successfully setup environment!\n") fmt.Print("Please review config.json and .env and adjust it to your needs.\n") fmt.Print("Add your job-archive at ./var/job-archive.\n") os.Exit(0) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9fd7260..b529f2c 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -36,10 +36,7 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, // ConcurrentJobs is the resolver for the concurrentJobs field. func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) { - if obj.State == schema.JobStateRunning { - obj.Duration = int32(time.Now().Unix() - obj.StartTimeUnix) - } - + // FIXME: Make the hardcoded duration configurable if obj.Exclusive != 1 && obj.Duration > 600 { return r.Repo.FindConcurrentJobs(ctx, obj) } diff --git a/internal/repository/job.go b/internal/repository/job.go index cc44ca9..2c206b6 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -79,14 +79,7 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { } job.RawFootprint = nil - // if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil { - // return nil, err - // } - job.StartTime = time.Unix(job.StartTimeUnix, 0) - if job.Duration == 0 && job.State == schema.JobStateRunning { - job.Duration = int32(time.Since(job.StartTime).Seconds()) - } return job, nil } @@ -457,6 +450,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in return subclusters, nil } +// FIXME: Set duration to requested walltime? func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error { start := time.Now() res, err := sq.Update("job"). diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index 0ab2ea2..b43b569 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -170,8 +170,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildTimeCondition("job.start_time", filter.StartTime, query) } if filter.Duration != nil { - now := time.Now().Unix() // There does not seam to be a portable way to get the current unix timestamp accross different DBs. - query = query.Where("(CASE WHEN job.job_state = 'running' THEN (? - job.start_time) ELSE job.duration END) BETWEEN ? AND ?", now, filter.Duration.From, filter.Duration.To) + query = buildIntCondition("job.duration", filter.Duration, query) } if filter.MinRunningFor != nil { now := time.Now().Unix() // There does not seam to be a portable way to get the current unix timestamp accross different DBs. diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 2267efb..1a3317f 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -182,6 +182,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType { return i } +// FIXME: Lots of redundant code. Needs refactoring func buildFilterPresets(query url.Values) map[string]interface{} { filterPresets := map[string]interface{}{} From f2d1a85afbf402314365a16f96432ecb67b70912 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 19 Dec 2024 06:14:35 +0100 Subject: [PATCH 298/443] Reformat json schema files --- init/clustercockpit.service | 2 +- pkg/schema/schemas/job-data.schema.json | 956 +++++++++--------- pkg/schema/schemas/job-meta.schema.json | 672 ++++++------ .../schemas/job-metric-data.schema.json | 426 ++++---- .../schemas/job-metric-statistics.schema.json | 62 +- pkg/schema/schemas/unit.schema.json | 74 +- 6 files changed, 1096 insertions(+), 1096 deletions(-) diff --git a/init/clustercockpit.service b/init/clustercockpit.service index 53fc429..0a9448d 100644 --- a/init/clustercockpit.service +++ b/init/clustercockpit.service @@ -1,5 +1,5 @@ [Unit] -Description=ClusterCockpit Web Server (Go edition) +Description=ClusterCockpit Web Server Documentation=https://github.com/ClusterCockpit/cc-backend Wants=network-online.target After=network-online.target diff --git a/pkg/schema/schemas/job-data.schema.json b/pkg/schema/schemas/job-data.schema.json index e8a5739..c0c492b 100644 --- a/pkg/schema/schemas/job-data.schema.json +++ b/pkg/schema/schemas/job-data.schema.json @@ -1,490 +1,490 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-data.schema.json", - "title": "Job metric data list", - "description": "Collection of metric data of a HPC job", - "type": "object", - "properties": { - "mem_used": { - "description": "Memory capacity used", - "type": "object", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "flops_any": { - "description": "Total flop rate with DP flops scaled up", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "mem_bw": { - "description": "Main memory bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "net_bw": { - "description": "Total fast interconnect network bandwidth", - "type": "object", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ipc": { - "description": "Instructions executed per cycle", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_user": { - "description": "CPU user active core utilization", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_load": { - "description": "CPU requested core utilization (load 1m)", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "flops_dp": { - "description": "Double precision flop rate", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "flops_sp": { - "description": "Single precision flops rate", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "vectorization_ratio": { - "description": "Fraction of arithmetic instructions using SIMD instructions", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "cpu_power": { - "description": "CPU power consumption", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "mem_power": { - "description": "Memory power consumption", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "acc_utilization": { - "description": "GPU utilization", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "acc_mem_used": { - "description": "GPU memory capacity used", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "acc_power": { - "description": "GPU power consumption", - "properties": { - "accelerator": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "accelerator" - ] - }, - "clock": { - "description": "Average core frequency", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "socket": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "memoryDomain": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "core": { - "$ref": "embedfs://job-metric-data.schema.json" - }, - "hwthread": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "minProperties": 1 - }, - "eth_read_bw": { - "description": "Ethernet read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "eth_write_bw": { - "description": "Ethernet write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "filesystems": { - "description": "Array of filesystems", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "nfs", - "lustre", - "gpfs", - "nvme", - "ssd", - "hdd", - "beegfs" - ] - }, - "read_bw": { - "description": "File system read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "write_bw": { - "description": "File system write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "read_req": { - "description": "File system read requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "write_req": { - "description": "File system write requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "inodes": { - "description": "File system write requests", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "accesses": { - "description": "File system open and close", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "fsync": { - "description": "File system fsync", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "create": { - "description": "File system create", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "open": { - "description": "File system open", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "close": { - "description": "File system close", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "seek": { - "description": "File system seek", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - } - }, - "required": [ - "name", - "type", - "read_bw", - "write_bw" - ] - }, - "minItems": 1 + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://job-data.schema.json", + "title": "Job metric data list", + "description": "Collection of metric data of a HPC job", + "type": "object", + "properties": { + "mem_used": { + "description": "Memory capacity used", + "type": "object", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" } + }, + "required": [ + "node" + ] }, - "ic_rcv_packets": { - "description": "Network interconnect read packets", + "flops_any": { + "description": "Total flop rate with DP flops scaled up", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "mem_bw": { + "description": "Main memory bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "net_bw": { + "description": "Total fast interconnect network bandwidth", + "type": "object", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "ipc": { + "description": "Instructions executed per cycle", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "cpu_user": { + "description": "CPU user active core utilization", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "cpu_load": { + "description": "CPU requested core utilization (load 1m)", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "flops_dp": { + "description": "Double precision flop rate", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "flops_sp": { + "description": "Single precision flops rate", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "vectorization_ratio": { + "description": "Fraction of arithmetic instructions using SIMD instructions", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "cpu_power": { + "description": "CPU power consumption", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "mem_power": { + "description": "Memory power consumption", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "acc_utilization": { + "description": "GPU utilization", + "properties": { + "accelerator": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "accelerator" + ] + }, + "acc_mem_used": { + "description": "GPU memory capacity used", + "properties": { + "accelerator": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "accelerator" + ] + }, + "acc_power": { + "description": "GPU power consumption", + "properties": { + "accelerator": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "accelerator" + ] + }, + "clock": { + "description": "Average core frequency", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "socket": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "memoryDomain": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "core": { + "$ref": "embedfs://job-metric-data.schema.json" + }, + "hwthread": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "minProperties": 1 + }, + "eth_read_bw": { + "description": "Ethernet read bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "eth_write_bw": { + "description": "Ethernet write bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "filesystems": { + "description": "Array of filesystems", + "type": "array", + "items": { + "type": "object", "properties": { - "node": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "nfs", + "lustre", + "gpfs", + "nvme", + "ssd", + "hdd", + "beegfs" + ] + }, + "read_bw": { + "description": "File system read bandwidth", + "properties": { + "node": { "$ref": "embedfs://job-metric-data.schema.json" - } + } + }, + "required": [ + "node" + ] + }, + "write_bw": { + "description": "File system write bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "read_req": { + "description": "File system read requests", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "write_req": { + "description": "File system write requests", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "inodes": { + "description": "File system write requests", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "accesses": { + "description": "File system open and close", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "fsync": { + "description": "File system fsync", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "create": { + "description": "File system create", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "open": { + "description": "File system open", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "close": { + "description": "File system close", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "seek": { + "description": "File system seek", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + } }, "required": [ - "node" - ] - }, - "ic_send_packets": { - "description": "Network interconnect send packet", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ic_read_bw": { - "description": "Network interconnect read bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" - ] - }, - "ic_write_bw": { - "description": "Network interconnect write bandwidth", - "properties": { - "node": { - "$ref": "embedfs://job-metric-data.schema.json" - } - }, - "required": [ - "node" + "name", + "type", + "read_bw", + "write_bw" ] + }, + "minItems": 1 + } + }, + "ic_rcv_packets": { + "description": "Network interconnect read packets", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } }, "required": [ - "cpu_user", - "cpu_load", - "mem_used", - "flops_any", - "mem_bw", - "net_bw", - "filesystems" + "node" ] + }, + "ic_send_packets": { + "description": "Network interconnect send packet", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "ic_read_bw": { + "description": "Network interconnect read bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "ic_write_bw": { + "description": "Network interconnect write bandwidth", + "properties": { + "node": { + "$ref": "embedfs://job-metric-data.schema.json" + } + }, + "required": [ + "node" + ] + }, + "required": [ + "cpu_user", + "cpu_load", + "mem_used", + "flops_any", + "mem_bw", + "net_bw", + "filesystems" + ] } diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index b907d7f..db7475c 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -1,351 +1,351 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-meta.schema.json", - "title": "Job meta data", - "description": "Meta data information of a HPC job", - "type": "object", - "properties": { - "jobId": { - "description": "The unique identifier of a job", - "type": "integer" - }, - "user": { - "description": "The unique identifier of a user", + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://job-meta.schema.json", + "title": "Job meta data", + "description": "Meta data information of a HPC job", + "type": "object", + "properties": { + "jobId": { + "description": "The unique identifier of a job", + "type": "integer" + }, + "user": { + "description": "The unique identifier of a user", + "type": "string" + }, + "project": { + "description": "The unique identifier of a project", + "type": "string" + }, + "cluster": { + "description": "The unique identifier of a cluster", + "type": "string" + }, + "subCluster": { + "description": "The unique identifier of a sub cluster", + "type": "string" + }, + "partition": { + "description": "The Slurm partition to which the job was submitted", + "type": "string" + }, + "arrayJobId": { + "description": "The unique identifier of an array job", + "type": "integer" + }, + "numNodes": { + "description": "Number of nodes used", + "type": "integer", + "exclusiveMinimum": 0 + }, + "numHwthreads": { + "description": "Number of HWThreads used", + "type": "integer", + "exclusiveMinimum": 0 + }, + "numAcc": { + "description": "Number of accelerators used", + "type": "integer", + "exclusiveMinimum": 0 + }, + "exclusive": { + "description": "Specifies how nodes are shared. 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive, 2 - Shared among multiple jobs of same user", + "type": "integer", + "minimum": 0, + "maximum": 2 + }, + "monitoringStatus": { + "description": "State of monitoring system during job run", + "type": "integer" + }, + "smt": { + "description": "SMT threads used by job", + "type": "integer" + }, + "walltime": { + "description": "Requested walltime of job in seconds", + "type": "integer", + "exclusiveMinimum": 0 + }, + "jobState": { + "description": "Final state of job", + "type": "string", + "enum": [ + "completed", + "failed", + "cancelled", + "stopped", + "out_of_memory", + "timeout" + ] + }, + "startTime": { + "description": "Start epoch time stamp in seconds", + "type": "integer", + "exclusiveMinimum": 0 + }, + "duration": { + "description": "Duration of job in seconds", + "type": "integer", + "exclusiveMinimum": 0 + }, + "resources": { + "description": "Resources used by job", + "type": "array", + "items": { + "type": "object", + "properties": { + "hostname": { "type": "string" - }, - "project": { - "description": "The unique identifier of a project", - "type": "string" - }, - "cluster": { - "description": "The unique identifier of a cluster", - "type": "string" - }, - "subCluster": { - "description": "The unique identifier of a sub cluster", - "type": "string" - }, - "partition": { - "description": "The Slurm partition to which the job was submitted", - "type": "string" - }, - "arrayJobId": { - "description": "The unique identifier of an array job", - "type": "integer" - }, - "numNodes": { - "description": "Number of nodes used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "numHwthreads": { - "description": "Number of HWThreads used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "numAcc": { - "description": "Number of accelerators used", - "type": "integer", - "exclusiveMinimum": 0 - }, - "exclusive": { - "description": "Specifies how nodes are shared. 0 - Shared among multiple jobs of multiple users, 1 - Job exclusive, 2 - Shared among multiple jobs of same user", - "type": "integer", - "minimum": 0, - "maximum": 2 - }, - "monitoringStatus": { - "description": "State of monitoring system during job run", - "type": "integer" - }, - "smt": { - "description": "SMT threads used by job", - "type": "integer" - }, - "walltime": { - "description": "Requested walltime of job in seconds", - "type": "integer", - "exclusiveMinimum": 0 - }, - "jobState": { - "description": "Final state of job", + }, + "hwthreads": { + "type": "array", + "description": "List of OS processor ids", + "items": { + "type": "integer" + } + }, + "accelerators": { + "type": "array", + "description": "List of of accelerator device ids", + "items": { + "type": "string" + } + }, + "configuration": { "type": "string", - "enum": [ - "completed", - "failed", - "cancelled", - "stopped", - "out_of_memory", - "timeout" - ] + "description": "The configuration options of the node" + } }, - "startTime": { - "description": "Start epoch time stamp in seconds", - "type": "integer", - "exclusiveMinimum": 0 + "required": [ + "hostname" + ], + "minItems": 1 + } + }, + "metaData": { + "description": "Additional information about the job", + "type": "object", + "properties": { + "jobScript": { + "type": "string", + "description": "The batch script of the job" }, - "duration": { - "description": "Duration of job in seconds", - "type": "integer", - "exclusiveMinimum": 0 + "jobName": { + "type": "string", + "description": "Slurm Job name" }, - "resources": { - "description": "Resources used by job", - "type": "array", - "items": { - "type": "object", - "properties": { - "hostname": { - "type": "string" - }, - "hwthreads": { - "type": "array", - "description": "List of OS processor ids", - "items": { - "type": "integer" - } - }, - "accelerators": { - "type": "array", - "description": "List of of accelerator device ids", - "items": { - "type": "string" - } - }, - "configuration": { - "type": "string", - "description": "The configuration options of the node" - } - }, - "required": [ - "hostname" - ], - "minItems": 1 - } + "slurmInfo": { + "type": "string", + "description": "Additional slurm infos as show by scontrol show job" + } + } + }, + "tags": { + "description": "List of tags", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string" + } }, - "metaData": { - "description": "Additional information about the job", + "required": [ + "name", + "type" + ] + }, + "uniqueItems": true + }, + "statistics": { + "description": "Job statistic data", + "type": "object", + "properties": { + "mem_used": { + "description": "Memory capacity used (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "cpu_load": { + "description": "CPU requested core utilization (load 1m) (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "flops_any": { + "description": "Total flop rate with DP flops scaled up (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "mem_bw": { + "description": "Main memory bandwidth (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "net_bw": { + "description": "Total fast interconnect network bandwidth (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "file_bw": { + "description": "Total file IO bandwidth (required)", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "ipc": { + "description": "Instructions executed per cycle", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "cpu_user": { + "description": "CPU user active core utilization", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "flops_dp": { + "description": "Double precision flop rate", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "flops_sp": { + "description": "Single precision flops rate", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "rapl_power": { + "description": "CPU power consumption", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "acc_used": { + "description": "GPU utilization", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "acc_mem_used": { + "description": "GPU memory capacity used", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "acc_power": { + "description": "GPU power consumption", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "clock": { + "description": "Average core frequency", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "eth_read_bw": { + "description": "Ethernet read bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "eth_write_bw": { + "description": "Ethernet write bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "ic_rcv_packets": { + "description": "Network interconnect read packets", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "ic_send_packets": { + "description": "Network interconnect send packet", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "ic_read_bw": { + "description": "Network interconnect read bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "ic_write_bw": { + "description": "Network interconnect write bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "filesystems": { + "description": "Array of filesystems", + "type": "array", + "items": { "type": "object", "properties": { - "jobScript": { - "type": "string", - "description": "The batch script of the job" - }, - "jobName": { - "type": "string", - "description": "Slurm Job name" - }, - "slurmInfo": { - "type": "string", - "description": "Additional slurm infos as show by scontrol show job" - } - } - }, - "tags": { - "description": "List of tags", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string" - } - }, - "required": [ - "name", - "type" + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "nfs", + "lustre", + "gpfs", + "nvme", + "ssd", + "hdd", + "beegfs" ] - }, - "uniqueItems": true - }, - "statistics": { - "description": "Job statistic data", - "type": "object", - "properties": { - "mem_used": { - "description": "Memory capacity used (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "cpu_load": { - "description": "CPU requested core utilization (load 1m) (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_any": { - "description": "Total flop rate with DP flops scaled up (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "mem_bw": { - "description": "Main memory bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "net_bw": { - "description": "Total fast interconnect network bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "file_bw": { - "description": "Total file IO bandwidth (required)", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ipc": { - "description": "Instructions executed per cycle", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "cpu_user": { - "description": "CPU user active core utilization", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_dp": { - "description": "Double precision flop rate", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "flops_sp": { - "description": "Single precision flops rate", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "rapl_power": { - "description": "CPU power consumption", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_used": { - "description": "GPU utilization", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_mem_used": { - "description": "GPU memory capacity used", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "acc_power": { - "description": "GPU power consumption", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "clock": { - "description": "Average core frequency", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "eth_read_bw": { - "description": "Ethernet read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "eth_write_bw": { - "description": "Ethernet write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_rcv_packets": { - "description": "Network interconnect read packets", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_send_packets": { - "description": "Network interconnect send packet", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_read_bw": { - "description": "Network interconnect read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "ic_write_bw": { - "description": "Network interconnect write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "filesystems": { - "description": "Array of filesystems", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "nfs", - "lustre", - "gpfs", - "nvme", - "ssd", - "hdd", - "beegfs" - ] - }, - "read_bw": { - "description": "File system read bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "write_bw": { - "description": "File system write bandwidth", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "read_req": { - "description": "File system read requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "write_req": { - "description": "File system write requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "inodes": { - "description": "File system write requests", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "accesses": { - "description": "File system open and close", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "fsync": { - "description": "File system fsync", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "create": { - "description": "File system create", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "open": { - "description": "File system open", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "close": { - "description": "File system close", - "$ref": "embedfs://job-metric-statistics.schema.json" - }, - "seek": { - "description": "File system seek", - "$ref": "embedfs://job-metric-statistics.schema.json" - } - }, - "required": [ - "name", - "type", - "read_bw", - "write_bw" - ] - }, - "minItems": 1 - } + }, + "read_bw": { + "description": "File system read bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "write_bw": { + "description": "File system write bandwidth", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "read_req": { + "description": "File system read requests", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "write_req": { + "description": "File system write requests", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "inodes": { + "description": "File system write requests", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "accesses": { + "description": "File system open and close", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "fsync": { + "description": "File system fsync", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "create": { + "description": "File system create", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "open": { + "description": "File system open", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "close": { + "description": "File system close", + "$ref": "embedfs://job-metric-statistics.schema.json" + }, + "seek": { + "description": "File system seek", + "$ref": "embedfs://job-metric-statistics.schema.json" + } }, "required": [ - "cpu_user", - "cpu_load", - "mem_used", - "flops_any", - "mem_bw" + "name", + "type", + "read_bw", + "write_bw" ] + }, + "minItems": 1 } - }, - "required": [ - "jobId", - "user", - "project", - "cluster", - "subCluster", - "numNodes", - "exclusive", - "startTime", - "jobState", - "duration", - "resources", - "statistics" - ] + }, + "required": [ + "cpu_user", + "cpu_load", + "mem_used", + "flops_any", + "mem_bw" + ] + } + }, + "required": [ + "jobId", + "user", + "project", + "cluster", + "subCluster", + "numNodes", + "exclusive", + "startTime", + "jobState", + "duration", + "resources", + "statistics" + ] } diff --git a/pkg/schema/schemas/job-metric-data.schema.json b/pkg/schema/schemas/job-metric-data.schema.json index 3f2b934..ad499bf 100644 --- a/pkg/schema/schemas/job-metric-data.schema.json +++ b/pkg/schema/schemas/job-metric-data.schema.json @@ -1,216 +1,216 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-metric-data.schema.json", - "title": "Job metric data", - "description": "Metric data of a HPC job", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "timestep": { - "description": "Measurement interval in seconds", - "type": "integer" - }, - "thresholds": { - "description": "Metric thresholds for specific system", - "type": "object", - "properties": { - "peak": { - "type": "number" - }, - "normal": { - "type": "number" - }, - "caution": { - "type": "number" - }, - "alert": { - "type": "number" - } - } - }, - "statisticsSeries": { - "type": "object", - "description": "Statistics series across topology", - "properties": { - "min": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "max": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "mean": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "percentiles": { - "type": "object", - "properties": { - "10": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "20": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "30": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "40": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "50": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "60": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "70": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "80": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "90": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "25": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - }, - "75": { - "type": "array", - "items": { - "type": "number", - "minimum": 0 - }, - "minItems": 3 - } - } - } - } - }, - "series": { - "type": "array", - "items": { - "type": "object", - "properties": { - "hostname": { - "type": "string" - }, - "id": { - "type": "string" - }, - "statistics": { - "type": "object", - "description": "Statistics across time dimension", - "properties": { - "avg": { - "description": "Series average", - "type": "number", - "minimum": 0 - }, - "min": { - "description": "Series minimum", - "type": "number", - "minimum": 0 - }, - "max": { - "description": "Series maximum", - "type": "number", - "minimum": 0 - } - }, - "required": [ - "avg", - "min", - "max" - ] - }, - "data": { - "type": "array", - "contains": { - "type": "number", - "minimum": 0 - }, - "minItems": 1 - } - }, - "required": [ - "hostname", - "statistics", - "data" - ] - } - } + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://job-metric-data.schema.json", + "title": "Job metric data", + "description": "Metric data of a HPC job", + "type": "object", + "properties": { + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" }, - "required": [ - "unit", - "timestep", - "series" - ] + "timestep": { + "description": "Measurement interval in seconds", + "type": "integer" + }, + "thresholds": { + "description": "Metric thresholds for specific system", + "type": "object", + "properties": { + "peak": { + "type": "number" + }, + "normal": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "alert": { + "type": "number" + } + } + }, + "statisticsSeries": { + "type": "object", + "description": "Statistics series across topology", + "properties": { + "min": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "max": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "mean": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "percentiles": { + "type": "object", + "properties": { + "10": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "20": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "30": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "40": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "50": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "60": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "70": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "80": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "90": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "25": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + }, + "75": { + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 3 + } + } + } + } + }, + "series": { + "type": "array", + "items": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "id": { + "type": "string" + }, + "statistics": { + "type": "object", + "description": "Statistics across time dimension", + "properties": { + "avg": { + "description": "Series average", + "type": "number", + "minimum": 0 + }, + "min": { + "description": "Series minimum", + "type": "number", + "minimum": 0 + }, + "max": { + "description": "Series maximum", + "type": "number", + "minimum": 0 + } + }, + "required": [ + "avg", + "min", + "max" + ] + }, + "data": { + "type": "array", + "contains": { + "type": "number", + "minimum": 0 + }, + "minItems": 1 + } + }, + "required": [ + "hostname", + "statistics", + "data" + ] + } + } + }, + "required": [ + "unit", + "timestep", + "series" + ] } diff --git a/pkg/schema/schemas/job-metric-statistics.schema.json b/pkg/schema/schemas/job-metric-statistics.schema.json index 3412c23..f753ed3 100644 --- a/pkg/schema/schemas/job-metric-statistics.schema.json +++ b/pkg/schema/schemas/job-metric-statistics.schema.json @@ -1,34 +1,34 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://job-metric-statistics.schema.json", - "title": "Job statistics", - "description": "Format specification for job metric statistics", - "type": "object", - "properties": { - "unit": { - "description": "Metric unit", - "$ref": "embedfs://unit.schema.json" - }, - "avg": { - "description": "Job metric average", - "type": "number", - "minimum": 0 - }, - "min": { - "description": "Job metric minimum", - "type": "number", - "minimum": 0 - }, - "max": { - "description": "Job metric maximum", - "type": "number", - "minimum": 0 - } + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://job-metric-statistics.schema.json", + "title": "Job statistics", + "description": "Format specification for job metric statistics", + "type": "object", + "properties": { + "unit": { + "description": "Metric unit", + "$ref": "embedfs://unit.schema.json" }, - "required": [ - "unit", - "avg", - "min", - "max" - ] + "avg": { + "description": "Job metric average", + "type": "number", + "minimum": 0 + }, + "min": { + "description": "Job metric minimum", + "type": "number", + "minimum": 0 + }, + "max": { + "description": "Job metric maximum", + "type": "number", + "minimum": 0 + } + }, + "required": [ + "unit", + "avg", + "min", + "max" + ] } diff --git a/pkg/schema/schemas/unit.schema.json b/pkg/schema/schemas/unit.schema.json index 9ee781c..c0a3df3 100644 --- a/pkg/schema/schemas/unit.schema.json +++ b/pkg/schema/schemas/unit.schema.json @@ -1,40 +1,40 @@ { - "$schema": "http://json-schema.org/draft/2020-12/schema", - "$id": "embedfs://unit.schema.json", - "title": "Metric unit", - "description": "Format specification for job metric units", - "type": "object", - "properties": { - "base": { - "description": "Metric base unit", - "type": "string", - "enum": [ - "B", - "F", - "B/s", - "F/s", - "CPI", - "IPC", - "Hz", - "W", - "°C", - "" - ] - }, - "prefix": { - "description": "Unit prefix", - "type": "string", - "enum": [ - "K", - "M", - "G", - "T", - "P", - "E" - ] - } + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$id": "embedfs://unit.schema.json", + "title": "Metric unit", + "description": "Format specification for job metric units", + "type": "object", + "properties": { + "base": { + "description": "Metric base unit", + "type": "string", + "enum": [ + "B", + "F", + "B/s", + "F/s", + "CPI", + "IPC", + "Hz", + "W", + "°C", + "" + ] }, - "required": [ - "base" - ] + "prefix": { + "description": "Unit prefix", + "type": "string", + "enum": [ + "K", + "M", + "G", + "T", + "P", + "E" + ] + } + }, + "required": [ + "base" + ] } From fd94d85edf9606dba70cb2233fb19a51f78ff080 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 19 Dec 2024 06:24:08 +0100 Subject: [PATCH 299/443] Compute duration for running jobs on the fly --- internal/repository/job.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/repository/job.go b/internal/repository/job.go index 2c206b6..11f3b46 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -80,6 +80,10 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { job.RawFootprint = nil job.StartTime = time.Unix(job.StartTimeUnix, 0) + // Always ensure accurate duration for running jobs + if job.State == schema.JobStateRunning { + job.Duration = int32(time.Since(job.StartTime).Seconds()) + } return job, nil } From 48e95fbdb05367bb332e0b7d11258a187c5f8b01 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 19 Dec 2024 06:34:35 +0100 Subject: [PATCH 300/443] Prepare release 1.4.2 --- Makefile | 2 +- ReleaseNotes.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 48da4e0..52f0d39 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ TARGET = ./cc-backend VAR = ./var CFG = config.json .env FRONTEND = ./web/frontend -VERSION = 1.4.1 +VERSION = 1.4.2 GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' diff --git a/ReleaseNotes.md b/ReleaseNotes.md index bb25b5d..0d44ccc 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,4 +1,4 @@ -# `cc-backend` version 1.4.1 +# `cc-backend` version 1.4.2 Supports job archive version 2 and database version 8. From 53dfe9e4f55660a1a02a9e2c817128dc233f5286 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 19 Dec 2024 11:00:12 +0100 Subject: [PATCH 301/443] fix: footprint peak is default if footprint stat is avg --- .../src/generic/helper/JobFootprint.svelte | 40 ++++--------------- 1 file changed, 8 insertions(+), 32 deletions(-) diff --git a/web/frontend/src/generic/helper/JobFootprint.svelte b/web/frontend/src/generic/helper/JobFootprint.svelte index 187eff9..f9bc165 100644 --- a/web/frontend/src/generic/helper/JobFootprint.svelte +++ b/web/frontend/src/generic/helper/JobFootprint.svelte @@ -9,12 +9,11 @@ --> @@ -93,7 +69,7 @@ const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "") // Threshold / -Differences - const fmt = findJobThresholds(job, fmc); + const fmt = findJobThresholds(job, jf.stat, fmc); if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0); // Define basic data -> Value: Use as Provided From 9e2d981c60cf7ed79fd8a295af2e784314418ca8 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 19 Dec 2024 11:12:40 +0100 Subject: [PATCH 302/443] Add notice about footprint to ReleaseNotes --- ReleaseNotes.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0d44ccc..2659964 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -12,7 +12,8 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus migration might require several hours! - You need to adapt the `cluster.json` configuration files in the job-archive, add new required attributes to the metric list and after that edit - `./job-archive/version.txt` to version 2. + `./job-archive/version.txt` to version 2. Only metrics that have the footprint + attribute set can be filtered and show up in the footprint UI and polar plot. - Continuous scrolling is default now in all job lists. You can change this back to paging globally, also every user can configure to use paging or continuous scrolling individually. From aa915d639d86cd4d80a01bb10f0de9f3f8e67dec Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 20 Dec 2024 13:02:21 +0100 Subject: [PATCH 303/443] feat: add deselect all button to jobStatefilter --- web/frontend/src/generic/filters/JobStates.svelte | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/web/frontend/src/generic/filters/JobStates.svelte b/web/frontend/src/generic/filters/JobStates.svelte index 9255003..d903abc 100644 --- a/web/frontend/src/generic/filters/JobStates.svelte +++ b/web/frontend/src/generic/filters/JobStates.svelte @@ -77,6 +77,13 @@ dispatch("set-filter", { states }); }}>Close & Apply + {/if} @@ -431,6 +432,7 @@ configName="job_view_selectedMetrics" bind:metrics={selectedMetrics} bind:isOpen={isMetricsSelectionOpen} + bind:allMetrics={availableMetrics} /> {/if} diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index d68d237..21d9b3b 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -18,6 +18,8 @@ InputGroup, InputGroupText, Icon, + Row, + Col } from "@sveltestrap/sveltestrap"; import { maxScope } from "../generic/utils.js"; import StatsTableEntry from "./StatsTableEntry.svelte"; @@ -26,7 +28,7 @@ export let job; export let jobMetrics; - const allMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort() + const sortedJobMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort() const scopesForMetric = (metric) => jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope); @@ -34,11 +36,12 @@ selectedScopes = {}, sorting = {}, isMetricSelectionOpen = false, + availableMetrics = new Set(), selectedMetrics = getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] || getContext("cc-config")["job_view_nodestats_selectedMetrics"]; - for (let metric of allMetrics) { + for (let metric of sortedJobMetrics) { // Not Exclusive or Multi-Node: get maxScope directly (mostly: node) // -> Else: Load smallest available granularity as default as per availability const availableScopes = scopesForMetric(metric); @@ -95,15 +98,19 @@ }; + + + + + +
- + {#if groupSelection.key == "user"} {:else} diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 63a69f5..1249e0c 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -177,6 +177,7 @@ groupBy: USER ) { id + name totalJobs totalNodes totalCores @@ -518,7 +519,7 @@ From 0fe0461340aac88efb01d5ef9689856ebcdb2ab4 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 14:00:27 +0100 Subject: [PATCH 364/443] remove conflicting variable layer in metric histo select --- web/frontend/src/Status.root.svelte | 12 ++++++------ web/frontend/src/User.root.svelte | 12 ++++++------ .../src/generic/select/HistogramSelection.svelte | 12 ++++-------- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 1249e0c..a310421 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -75,7 +75,7 @@ ); let isHistogramSelectionOpen = false; - $: metricsInHistograms = cluster + $: selectedHistograms = cluster ? ccconfig[`user_view_histogramMetrics:${cluster}`] || ( ccconfig['user_view_histogramMetrics'] || [] ) : ccconfig['user_view_histogramMetrics'] || []; @@ -90,7 +90,7 @@ $metrics: [String!] $from: Time! $to: Time! - $metricsInHistograms: [String!] + $selectedHistograms: [String!] ) { nodeMetrics( cluster: $cluster @@ -116,7 +116,7 @@ } } - stats: jobsStatistics(filter: $filter, metrics: $metricsInHistograms) { + stats: jobsStatistics(filter: $filter, metrics: $selectedHistograms) { histDuration { count value @@ -157,7 +157,7 @@ from: from.toISOString(), to: to.toISOString(), filter: [{ state: ["running"] }, { cluster: { eq: cluster } }], - metricsInHistograms: metricsInHistograms, + selectedHistograms: selectedHistograms, }, }); @@ -653,7 +653,7 @@ - {#if metricsInHistograms} + {#if selectedHistograms} {#key $mainQuery.data.stats[0].histMetrics} diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 77c4e01..0e6a5b8 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -68,7 +68,7 @@ let durationBinOptions = ["1m","10m","1h","6h","12h"]; let metricBinOptions = [10, 20, 50, 100]; - $: metricsInHistograms = selectedCluster + $: selectedHistograms = selectedCluster ? ccconfig[`user_view_histogramMetrics:${selectedCluster}`] || ( ccconfig['user_view_histogramMetrics'] || [] ) : ccconfig['user_view_histogramMetrics'] || []; @@ -76,8 +76,8 @@ $: stats = queryStore({ client: client, query: gql` - query ($jobFilters: [JobFilter!]!, $metricsInHistograms: [String!], $numDurationBins: String, $numMetricBins: Int) { - jobsStatistics(filter: $jobFilters, metrics: $metricsInHistograms, numDurationBins: $numDurationBins , numMetricBins: $numMetricBins ) { + query ($jobFilters: [JobFilter!]!, $selectedHistograms: [String!], $numDurationBins: String, $numMetricBins: Int) { + jobsStatistics(filter: $jobFilters, metrics: $selectedHistograms, numDurationBins: $numDurationBins , numMetricBins: $numMetricBins ) { totalJobs shortJobs totalWalltime @@ -104,7 +104,7 @@ } } `, - variables: { jobFilters, metricsInHistograms, numDurationBins, numMetricBins }, + variables: { jobFilters, selectedHistograms, numDurationBins, numMetricBins }, }); onMount(() => filterComponent.updateFilters()); @@ -290,7 +290,7 @@ -{#if metricsInHistograms?.length > 0} +{#if selectedHistograms?.length > 0} {#if $stats.error} @@ -357,6 +357,6 @@ diff --git a/web/frontend/src/generic/select/HistogramSelection.svelte b/web/frontend/src/generic/select/HistogramSelection.svelte index 48971b0..604fc95 100644 --- a/web/frontend/src/generic/select/HistogramSelection.svelte +++ b/web/frontend/src/generic/select/HistogramSelection.svelte @@ -3,7 +3,7 @@ Properties: - `cluster String`: Currently selected cluster - - `metricsInHistograms [String]`: The currently selected metrics to display as histogram + - `selectedHistograms [String]`: The currently selected metrics to display as histogram - ìsOpen Bool`: Is selection opened --> @@ -21,13 +21,12 @@ import { gql, getContextClient, mutationStore } from "@urql/svelte"; export let cluster; - export let metricsInHistograms; + export let selectedHistograms; export let isOpen; const client = getContextClient(); const initialized = getContext("initialized"); - function loadHistoMetrics(isInitialized, thisCluster) { if (!isInitialized) return []; @@ -43,8 +42,6 @@ } } - $: pendingMetrics = [...metricsInHistograms]; // Copy on change from above - const updateConfigurationMutation = ({ name, value }) => { return mutationStore({ client: client, @@ -69,13 +66,12 @@ } function closeAndApply() { - metricsInHistograms = [...pendingMetrics]; // Set for parent isOpen = !isOpen; updateConfiguration({ name: cluster ? `user_view_histogramMetrics:${cluster}` : "user_view_histogramMetrics", - value: metricsInHistograms, + value: selectedHistograms, }); } @@ -89,7 +85,7 @@ {#each availableMetrics as metric (metric)} - + {metric} {/each} From c661baf058e4de122d600b51ab7ecae7a6be13bf Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 28 Feb 2025 14:36:19 +0100 Subject: [PATCH 365/443] Load new default metrics config from working directory --- internal/config/default_metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/config/default_metrics.go b/internal/config/default_metrics.go index 83015d4..b0a0cc5 100644 --- a/internal/config/default_metrics.go +++ b/internal/config/default_metrics.go @@ -16,7 +16,7 @@ type DefaultMetricsConfig struct { } func LoadDefaultMetricsConfig() (*DefaultMetricsConfig, error) { - filePath := "configs/default_metrics.json" + filePath := "default_metrics.json" if _, err := os.Stat(filePath); os.IsNotExist(err) { return nil, nil } From b31aea7bc5492da69d58c00cc66742d04be17579 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 14:40:27 +0100 Subject: [PATCH 366/443] revert back to using globalMetrics in jobView metric default select --- web/frontend/src/Job.root.svelte | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 43d4f10..b641a43 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -129,7 +129,12 @@ const pendingMetrics = [ ...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || - ccconfig[`job_view_selectedMetrics`] + $initq.data.globalMetrics.reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster)) { + names.push(gm.name); + } + return names; + }, []) ), ...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || ccconfig[`job_view_nodestats_selectedMetrics`] From d7aefe0cf0b206a288bff8330d0814114f55d025 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 14:55:32 +0100 Subject: [PATCH 367/443] move user names in top lists to tooltip --- web/frontend/src/Analysis.root.svelte | 12 ++++++++++-- web/frontend/src/Status.root.svelte | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 1617ccd..861c0ec 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -20,6 +20,7 @@ Card, Table, Icon, + Tooltip } from "@sveltestrap/sveltestrap"; import { init, @@ -425,11 +426,18 @@ {#if groupSelection.key == "user"} - + {#if te?.name} + {te.name} + {/if} {:else} - + {#if tu?.name} + {tu.name} + {/if} {/each} From 6640e93ce98884de56c965b7da66bb3f61be05aa Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 15:12:42 +0100 Subject: [PATCH 368/443] edit new features for 1.4.3 releasenotes --- ReleaseNotes.md | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0015727..ef1082f 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -22,20 +22,19 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus ## New features -- Tags have a scope now. Tags created by a basic user are only visible by that - user. Tags created by an admin/support role can be configured to be visible by - all users (global scope) or only be admin/support role. -- Re-sampling support for running (requires a recent `cc-metric-store`) and - archived jobs. This greatly speeds up loading of large or very long jobs. You - need to add the new configuration key `enable-resampling` to the `config.json` - file. -- For finished jobs a total job energy is shown in the job view. -- Continuous scrolling in job lists is default now. -- All database queries (especially for sqlite) were optimized resulting in - dramatically faster load times. -- A performance and energy footprint can be freely configured on a per - subcluster base. One can filter for footprint statistics for running and - finished jobs. +- Detailed Node List + - Adds new routes `/systems/list/$cluster` and `/systems/list/$cluster/$subcluster` + - Displays live, scoped metric data requested from the nodes indepenent of jobs +- Color Blind Mode + - Set on a per-user basis in options + - Applies to plot data, plot background color, statsseries colors, roofline timescale +- Histogram Bin Select in User-View + - Metric-Histograms: `10 Bins` now default, selectable options `20, 50, 100` + - Job-Duration-Histogram: `48h in 1h Bins` now default, selectable options: + - `60 minutes in 1 minute Bins` + - `12 hours in 10 minute Bins` + - `3 days in 6 hour Bins` + - `7 days in 12 hour Bins` ## Known issues From ec895e1d9e1b789e76f08da399922e7244b215d2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 09:36:37 +0100 Subject: [PATCH 369/443] Add fallback case to nodeInfo --- web/frontend/src/systems/nodelist/NodeInfo.svelte | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/web/frontend/src/systems/nodelist/NodeInfo.svelte b/web/frontend/src/systems/nodelist/NodeInfo.svelte index ad6c98e..6b14656 100644 --- a/web/frontend/src/systems/nodelist/NodeInfo.svelte +++ b/web/frontend/src/systems/nodelist/NodeInfo.svelte @@ -102,6 +102,19 @@ Shared + + {:else if nodeJobsData.jobs.count >= 1} + + + + + + Status + + + {:else} From c21d7cf101ad7e4a1e4d6780db519351c420b14a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 11:21:54 +0100 Subject: [PATCH 370/443] fix and review quick starttime select handling --- web/frontend/src/generic/Filters.svelte | 26 ++-- .../src/generic/filters/StartTime.svelte | 113 +++++++++++------- 2 files changed, 86 insertions(+), 53 deletions(-) diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte index 481211b..4a9be3e 100644 --- a/web/frontend/src/generic/Filters.svelte +++ b/web/frontend/src/generic/Filters.svelte @@ -45,6 +45,14 @@ export let startTimeQuickSelect = false; export let matchedJobs = -2; + const startTimeSelectOptions = [ + { range: "", rangeLabel: "No Selection"}, + { range: "last6h", rangeLabel: "Last 6hrs"}, + { range: "last24h", rangeLabel: "Last 24hrs"}, + { range: "last7d", rangeLabel: "Last 7 days"}, + { range: "last30d", rangeLabel: "Last 30 days"} + ]; + let filters = { projectMatch: filterPresets.projectMatch || "contains", userMatch: filterPresets.userMatch || "contains", @@ -56,7 +64,7 @@ filterPresets.states || filterPresets.state ? [filterPresets.state].flat() : allJobStates, - startTime: filterPresets.startTime || { from: null, to: null }, + startTime: filterPresets.startTime || { from: null, to: null, range: ""}, tags: filterPresets.tags || [], duration: filterPresets.duration || { lessThan: null, @@ -268,16 +276,17 @@ {#if startTimeQuickSelect} Start Time Quick Selection - {#each [{ text: "Last 6hrs", range: "last6h" }, { text: "Last 24hrs", range: "last24h" }, { text: "Last 7 days", range: "last7d" }, { text: "Last 30 days", range: "last30d" }] as { text, range }} + {#each startTimeSelectOptions.filter((stso) => stso.range !== "") as { rangeLabel, range }} { + filters.startTime.from = null + filters.startTime.to = null filters.startTime.range = range; - filters.startTime.text = text; updateFilters(); }} > - {text} + {rangeLabel} {/each} {/if} @@ -316,7 +325,7 @@ {#if filters.startTime.range} (isStartTimeOpen = true)}> - {filters?.startTime?.text ? filters.startTime.text : filters.startTime.range } + {startTimeSelectOptions.find((stso) => stso.range === filters.startTime.range).rangeLabel } {/if} @@ -414,11 +423,8 @@ bind:from={filters.startTime.from} bind:to={filters.startTime.to} bind:range={filters.startTime.range} - on:set-filter={() => { - delete filters.startTime["text"]; - delete filters.startTime["range"]; - updateFilters(); - }} + {startTimeSelectOptions} + on:set-filter={() => updateFilters()} /> (isOpen = !isOpen)}> @@ -92,52 +89,82 @@ {#if range !== ""}

Current Range

-
- - + + + {#each startTimeSelectOptions as { rangeLabel, range }} + {/if}

From

- + - +

To

- + - + - + {#if pendingRange !== ""} + + + {:else} + + {/if} From 3ab8973895441481c8ce5b2494b87a284d4bb7a5 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 12:44:18 +0100 Subject: [PATCH 371/443] use extendedLegend in nodeList for all non-idle nodes - changed from "use for shared nodes only" --- web/frontend/src/systems/nodelist/NodeListRow.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index a1e4a54..07e5556 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -98,8 +98,8 @@ let extendedLegendData = null; $: if ($nodeJobsData?.data) { - // Get Shared State of Node: Only Build extended Legend For Shared Nodes - if ($nodeJobsData.data.jobs.count >= 1 && !$nodeJobsData.data.jobs.items[0].exclusive) { + // Build Extended for allocated nodes [Commented: Only Build extended Legend For Shared Nodes] + if ($nodeJobsData.data.jobs.count >= 1) { // "&& !$nodeJobsData.data.jobs.items[0].exclusive)" const accSet = Array.from(new Set($nodeJobsData.data.jobs.items .map((i) => i.resources .filter((r) => r.hostname === nodeData.host) From 419bc2747b9e774b5404c34f59b17d3609fe5299 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 16:53:19 +0100 Subject: [PATCH 372/443] fix nodeInfo null error --- web/frontend/src/systems/nodelist/NodeListRow.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 07e5556..5e6e4ac 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -105,7 +105,7 @@ .filter((r) => r.hostname === nodeData.host) .map((r) => r.accelerators) ) - )).flat(2) + )).flat(2).filter(a => a) // Last filter(): Exclude Null, Undefined and empty Str extendedLegendData = {} for (const accId of accSet) { From 5c9d4ffa9a1f70ea4ff3ad2c3185ef9c7cc87168 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 17:00:33 +0100 Subject: [PATCH 373/443] clarify and simplyfy earlier change --- web/frontend/src/systems/nodelist/NodeListRow.svelte | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 5e6e4ac..5202573 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -102,10 +102,10 @@ if ($nodeJobsData.data.jobs.count >= 1) { // "&& !$nodeJobsData.data.jobs.items[0].exclusive)" const accSet = Array.from(new Set($nodeJobsData.data.jobs.items .map((i) => i.resources - .filter((r) => r.hostname === nodeData.host) - .map((r) => r.accelerators) + .filter((r) => (r.hostname === nodeData.host) && r?.accelerators) + .map((r) => r?.accelerators) ) - )).flat(2).filter(a => a) // Last filter(): Exclude Null, Undefined and empty Str + )).flat(2) extendedLegendData = {} for (const accId of accSet) { From fcc9e17664ecf5d05206bda34e054c6fc48abccb Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 17:24:54 +0100 Subject: [PATCH 374/443] change: remove metrics from job view select if unavailable on subCLuster --- web/frontend/src/Job.root.svelte | 3 ++- web/frontend/src/generic/select/MetricSelection.svelte | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index b641a43..f2df916 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -130,7 +130,7 @@ const pendingMetrics = [ ...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || $initq.data.globalMetrics.reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster)) { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { names.push(gm.name); } return names; @@ -434,6 +434,7 @@ {#if $initq.data} av.cluster === cluster)) allMetrics.add(gm.name); + if (subCluster == null) { + if (gm.availability.find((av) => av.cluster === cluster)) allMetrics.add(gm.name); + } else { + if (gm.availability.find((av) => av.cluster === cluster && av.subClusters.includes(subCluster))) allMetrics.add(gm.name); + } } } newMetricsOrder = [...allMetrics].filter((m) => !metrics.includes(m)); From e733688fd03b41ccc6bb6ffac47aa8c1abd69bad Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 3 Mar 2025 17:54:34 +0100 Subject: [PATCH 375/443] add new subCluster prop to statsTable metric select --- web/frontend/src/job/StatsTable.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index 21d9b3b..b6b0f85 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -169,6 +169,7 @@ Date: Thu, 27 Feb 2025 15:11:07 +0100 Subject: [PATCH 376/443] allow /start_job/ with 0 second duration Apparently it is possible to get this for very short jobs. --- internal/api/rest.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index b76da0b..fd2f86d 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1008,8 +1008,8 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo return } - if job == nil || job.StartTime.Unix() >= req.StopTime { - handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw) + if job == nil || job.StartTime.Unix() > req.StopTime { + handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger/equal than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw) return } From 6454576417ca9048435390a6a3c30415d1a15951 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 17:39:38 +0100 Subject: [PATCH 377/443] add node_fail job state --- api/swagger.json | 6 ++++-- api/swagger.yaml | 2 ++ internal/api/docs.go | 6 ++++-- pkg/schema/job.go | 4 +++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 51b22c8..9035beb 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1786,7 +1786,8 @@ "stopped", "timeout", "preempted", - "out_of_memory" + "out_of_memory", + "node_fail" ], "x-enum-varnames": [ "JobStateRunning", @@ -1796,7 +1797,8 @@ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory" + "JobStateOutOfMemory", + "JobStateNodeFail" ] }, "schema.JobStatistics": { diff --git a/api/swagger.yaml b/api/swagger.yaml index f5f0081..20fa031 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -395,6 +395,7 @@ definitions: - timeout - preempted - out_of_memory + - node_fail type: string x-enum-varnames: - JobStateRunning @@ -405,6 +406,7 @@ definitions: - JobStateTimeout - JobStatePreempted - JobStateOutOfMemory + - JobStateNodeFail schema.JobStatistics: description: Specification for job metric statistics. properties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 642003f..6f034b4 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1792,7 +1792,8 @@ const docTemplate = `{ "stopped", "timeout", "preempted", - "out_of_memory" + "out_of_memory", + "node_fail" ], "x-enum-varnames": [ "JobStateRunning", @@ -1802,7 +1803,8 @@ const docTemplate = `{ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory" + "JobStateOutOfMemory", + "JobStateNodeFail" ] }, "schema.JobStatistics": { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 5e3110b..b6ac44d 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -143,6 +143,7 @@ const ( JobStateTimeout JobState = "timeout" JobStatePreempted JobState = "preempted" JobStateOutOfMemory JobState = "out_of_memory" + JobStateNodeFail JobState = "node_fail" ) func (e *JobState) UnmarshalGQL(v interface{}) error { @@ -171,5 +172,6 @@ func (e JobState) Valid() bool { e == JobStateStopped || e == JobStateTimeout || e == JobStatePreempted || - e == JobStateOutOfMemory + e == JobStateOutOfMemory || + e == JobStateNodeFail } From 65d2698af4a104fbd5ff1faf0f462e6a50b6a466 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 17:47:49 +0100 Subject: [PATCH 378/443] add node_fail state to database schema --- internal/repository/migrations/mysql/01_init-schema.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/mysql/01_init-schema.up.sql b/internal/repository/migrations/mysql/01_init-schema.up.sql index 3a6930c..16f7627 100644 --- a/internal/repository/migrations/mysql/01_init-schema.up.sql +++ b/internal/repository/migrations/mysql/01_init-schema.up.sql @@ -13,7 +13,7 @@ CREATE TABLE IF NOT EXISTS job ( walltime INT NOT NULL DEFAULT 0, job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', - 'stopped', 'timeout', 'preempted', 'out_of_memory')), + 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON From d4336b0dcb4e054a39033fc681634c285d08d4d8 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:00:02 +0100 Subject: [PATCH 379/443] add missing node_fail to db constraints --- .../repository/migrations/sqlite3/04_add-constraints.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql index 06b1a9b..a6898c3 100644 --- a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql +++ b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql @@ -11,7 +11,7 @@ array_job_id BIGINT, duration INT NOT NULL, walltime INT NOT NULL, job_state VARCHAR(255) NOT NULL -CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')), +CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON num_nodes INT NOT NULL, From 0a3e678329bc7162bffde549a2e85ac69b63e11b Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:03:01 +0100 Subject: [PATCH 380/443] add more missing node_fail states --- api/swagger.json | 6 ++++-- api/swagger.yaml | 2 ++ internal/api/docs.go | 6 ++++-- pkg/schema/job.go | 2 +- pkg/schema/schemas/job-meta.schema.json | 1 + web/frontend/src/generic/filters/JobStates.svelte | 1 + 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 9035beb..5cd4a5e 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1512,7 +1512,8 @@ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { @@ -1670,7 +1671,8 @@ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { diff --git a/api/swagger.yaml b/api/swagger.yaml index 20fa031..3f188c2 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -201,6 +201,7 @@ definitions: - stopped - timeout - out_of_memory + - node_fail example: completed metaData: additionalProperties: @@ -314,6 +315,7 @@ definitions: - stopped - timeout - out_of_memory + - node_fail example: completed metaData: additionalProperties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 6f034b4..99a8a14 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1518,7 +1518,8 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { @@ -1676,7 +1677,8 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory" + "out_of_memory", + "node_fail" ], "allOf": [ { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index b6ac44d..7a2d950 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -21,7 +21,7 @@ type BaseJob struct { Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` User string `json:"user" db:"hpc_user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` + State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory,node_fail"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` RawFootprint []byte `json:"-" db:"footprint"` diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index db7475c..a12057b 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -76,6 +76,7 @@ "cancelled", "stopped", "out_of_memory", + "node_fail", "timeout" ] }, diff --git a/web/frontend/src/generic/filters/JobStates.svelte b/web/frontend/src/generic/filters/JobStates.svelte index d903abc..b9a747d 100644 --- a/web/frontend/src/generic/filters/JobStates.svelte +++ b/web/frontend/src/generic/filters/JobStates.svelte @@ -23,6 +23,7 @@ "timeout", "preempted", "out_of_memory", + "node_fail", ]; From a61ff915ac0517261b8ac2be3e3cc3b8e7f40e7c Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:39 +0100 Subject: [PATCH 381/443] Revert "add more missing node_fail states" This reverts commit 0a3e678329bc7162bffde549a2e85ac69b63e11b. --- api/swagger.json | 6 ++---- api/swagger.yaml | 2 -- internal/api/docs.go | 6 ++---- pkg/schema/job.go | 2 +- pkg/schema/schemas/job-meta.schema.json | 1 - web/frontend/src/generic/filters/JobStates.svelte | 1 - 6 files changed, 5 insertions(+), 13 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 5cd4a5e..9035beb 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1512,8 +1512,7 @@ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { @@ -1671,8 +1670,7 @@ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { diff --git a/api/swagger.yaml b/api/swagger.yaml index 3f188c2..20fa031 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -201,7 +201,6 @@ definitions: - stopped - timeout - out_of_memory - - node_fail example: completed metaData: additionalProperties: @@ -315,7 +314,6 @@ definitions: - stopped - timeout - out_of_memory - - node_fail example: completed metaData: additionalProperties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 99a8a14..6f034b4 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1518,8 +1518,7 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { @@ -1677,8 +1676,7 @@ const docTemplate = `{ "cancelled", "stopped", "timeout", - "out_of_memory", - "node_fail" + "out_of_memory" ], "allOf": [ { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index 7a2d950..b6ac44d 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -21,7 +21,7 @@ type BaseJob struct { Partition string `json:"partition,omitempty" db:"cluster_partition" example:"main"` Project string `json:"project" db:"project" example:"abcd200"` User string `json:"user" db:"hpc_user" example:"abcd100h"` - State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory,node_fail"` + State JobState `json:"jobState" db:"job_state" example:"completed" enums:"completed,failed,cancelled,stopped,timeout,out_of_memory"` Tags []*Tag `json:"tags,omitempty"` RawEnergyFootprint []byte `json:"-" db:"energy_footprint"` RawFootprint []byte `json:"-" db:"footprint"` diff --git a/pkg/schema/schemas/job-meta.schema.json b/pkg/schema/schemas/job-meta.schema.json index a12057b..db7475c 100644 --- a/pkg/schema/schemas/job-meta.schema.json +++ b/pkg/schema/schemas/job-meta.schema.json @@ -76,7 +76,6 @@ "cancelled", "stopped", "out_of_memory", - "node_fail", "timeout" ] }, diff --git a/web/frontend/src/generic/filters/JobStates.svelte b/web/frontend/src/generic/filters/JobStates.svelte index b9a747d..d903abc 100644 --- a/web/frontend/src/generic/filters/JobStates.svelte +++ b/web/frontend/src/generic/filters/JobStates.svelte @@ -23,7 +23,6 @@ "timeout", "preempted", "out_of_memory", - "node_fail", ]; From aa3fe2b8726634800a36d6dc4153ab6c7c9f93f9 Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:46 +0100 Subject: [PATCH 382/443] Revert "add missing node_fail to db constraints" This reverts commit d4336b0dcb4e054a39033fc681634c285d08d4d8. --- .../repository/migrations/sqlite3/04_add-constraints.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql index a6898c3..06b1a9b 100644 --- a/internal/repository/migrations/sqlite3/04_add-constraints.up.sql +++ b/internal/repository/migrations/sqlite3/04_add-constraints.up.sql @@ -11,7 +11,7 @@ array_job_id BIGINT, duration INT NOT NULL, walltime INT NOT NULL, job_state VARCHAR(255) NOT NULL -CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), +CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON num_nodes INT NOT NULL, From bd93b8be8efd2440d3eab6d5c43e9c4e7d4c164b Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:15:53 +0100 Subject: [PATCH 383/443] Revert "add node_fail state to database schema" This reverts commit 65d2698af4a104fbd5ff1faf0f462e6a50b6a466. --- internal/repository/migrations/mysql/01_init-schema.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/mysql/01_init-schema.up.sql b/internal/repository/migrations/mysql/01_init-schema.up.sql index 16f7627..3a6930c 100644 --- a/internal/repository/migrations/mysql/01_init-schema.up.sql +++ b/internal/repository/migrations/mysql/01_init-schema.up.sql @@ -13,7 +13,7 @@ CREATE TABLE IF NOT EXISTS job ( walltime INT NOT NULL DEFAULT 0, job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', - 'stopped', 'timeout', 'preempted', 'out_of_memory', 'node_fail')), + 'stopped', 'timeout', 'preempted', 'out_of_memory')), meta_data TEXT, -- JSON resources TEXT NOT NULL, -- JSON From 4b2d7068b334c99bca3b77cc6a34371d5cb4416e Mon Sep 17 00:00:00 2001 From: Michael Panzlaff Date: Tue, 4 Mar 2025 18:16:02 +0100 Subject: [PATCH 384/443] Revert "add node_fail job state" This reverts commit 6454576417ca9048435390a6a3c30415d1a15951. --- api/swagger.json | 6 ++---- api/swagger.yaml | 2 -- internal/api/docs.go | 6 ++---- pkg/schema/job.go | 4 +--- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 9035beb..51b22c8 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -1786,8 +1786,7 @@ "stopped", "timeout", "preempted", - "out_of_memory", - "node_fail" + "out_of_memory" ], "x-enum-varnames": [ "JobStateRunning", @@ -1797,8 +1796,7 @@ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory", - "JobStateNodeFail" + "JobStateOutOfMemory" ] }, "schema.JobStatistics": { diff --git a/api/swagger.yaml b/api/swagger.yaml index 20fa031..f5f0081 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -395,7 +395,6 @@ definitions: - timeout - preempted - out_of_memory - - node_fail type: string x-enum-varnames: - JobStateRunning @@ -406,7 +405,6 @@ definitions: - JobStateTimeout - JobStatePreempted - JobStateOutOfMemory - - JobStateNodeFail schema.JobStatistics: description: Specification for job metric statistics. properties: diff --git a/internal/api/docs.go b/internal/api/docs.go index 6f034b4..642003f 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1792,8 +1792,7 @@ const docTemplate = `{ "stopped", "timeout", "preempted", - "out_of_memory", - "node_fail" + "out_of_memory" ], "x-enum-varnames": [ "JobStateRunning", @@ -1803,8 +1802,7 @@ const docTemplate = `{ "JobStateStopped", "JobStateTimeout", "JobStatePreempted", - "JobStateOutOfMemory", - "JobStateNodeFail" + "JobStateOutOfMemory" ] }, "schema.JobStatistics": { diff --git a/pkg/schema/job.go b/pkg/schema/job.go index b6ac44d..5e3110b 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -143,7 +143,6 @@ const ( JobStateTimeout JobState = "timeout" JobStatePreempted JobState = "preempted" JobStateOutOfMemory JobState = "out_of_memory" - JobStateNodeFail JobState = "node_fail" ) func (e *JobState) UnmarshalGQL(v interface{}) error { @@ -172,6 +171,5 @@ func (e JobState) Valid() bool { e == JobStateStopped || e == JobStateTimeout || e == JobStatePreempted || - e == JobStateOutOfMemory || - e == JobStateNodeFail + e == JobStateOutOfMemory } From 2b56b40e6d2b69d49f666f0753e131f34a13aa83 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 6 Mar 2025 12:46:25 +0100 Subject: [PATCH 385/443] Review energyFootprint calculation, fix missing numNodes factor, add log --- internal/importer/handleImport.go | 24 ++++++++++++++++-------- internal/importer/initDB.go | 24 ++++++++++++++++-------- internal/repository/job.go | 24 +++++++++++++++--------- 3 files changed, 47 insertions(+), 25 deletions(-) diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index 01773a5..623291c 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -96,27 +96,35 @@ func HandleImportFlag(flag string) error { } job.EnergyFootprint = make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh - // Energy: Power (in Watts) * Time (in Seconds) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp) + // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) - // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits - energy = math.Round(((repository.LoadJobStat(&job, fp, "avg")*float64(job.Duration))/3600/1000)*100) / 100 + // Energy: Power (in Watts) * Time (in Seconds) + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 + // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 + rawEnergy := ((repository.LoadJobStat(&job, fp, "avg") * float64(job.NumNodes)) * (float64(job.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) } - job.EnergyFootprint[fp] = energy - totalEnergy += energy + job.EnergyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy } - job.Energy = (math.Round(totalEnergy*100) / 100) + job.Energy = (math.Round(totalEnergy*100.0) / 100.0) if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) return err diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index fa2ee6e..9a2ccdf 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -93,27 +93,35 @@ func InitDB() error { } job.EnergyFootprint = make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh - // Energy: Power (in Watts) * Time (in Seconds) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) + // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) - // Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits - energy = math.Round(((repository.LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 + // Energy: Power (in Watts) * Time (in Seconds) + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 + // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 + rawEnergy := ((repository.LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } - job.EnergyFootprint[fp] = energy - totalEnergy += energy + job.EnergyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy } - job.Energy = (math.Round(totalEnergy*100) / 100) + job.Energy = (math.Round(totalEnergy*100.0) / 100.0) if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) return err diff --git a/internal/repository/job.go b/internal/repository/job.go index 020c3c2..84de6f7 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -590,28 +590,34 @@ func (r *JobRepository) UpdateEnergy( return stmt, err } energyFootprint := make(map[string]float64) - var totalEnergy float64 - var energy float64 + // Total Job Energy Outside Loop + totalEnergy := 0.0 for _, fp := range sc.EnergyFootprint { + // Always Init Metric Energy Inside Loop + metricEnergy := 0.0 if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { // Note: For DB data, calculate and save as kWh if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh) + log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp) // FIXME: Needs sum as stats type } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) // Energy: Power (in Watts) * Time (in Seconds) - // Unit: (( W * s ) / 3600) / 1000 = kWh ; Rounded to 2 nearest digits: (Energy * 100) / 100 - // Here: All-Node Metric Average * Number of Nodes * Job Runtime + // Unit: (W * (s / 3600)) / 1000 = kWh + // Round 2 Digits: round(Energy * 100) / 100 + // Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000 // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 - metricNodeSum := LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes) * float64(jobMeta.Duration) - energy = math.Round(((metricNodeSum/3600)/1000)*100) / 100 + rawEnergy := ((LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0 + metricEnergy = math.Round(rawEnergy*100.0) / 100.0 } } else { log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) } - energyFootprint[fp] = energy - totalEnergy += energy + energyFootprint[fp] = metricEnergy + totalEnergy += metricEnergy + + // log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy) } var rawFootprint []byte @@ -620,7 +626,7 @@ func (r *JobRepository) UpdateEnergy( return stmt, err } - return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100) / 100)), nil + return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100.0) / 100.0)), nil } func (r *JobRepository) UpdateFootprint( From d0af933b350d3e50cc64c648b5fbfd2fd4d1a0cf Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 6 Mar 2025 15:39:15 +0100 Subject: [PATCH 386/443] feat: add subCluster level frontend keys for metric selections - applies to jobView and nodeList --- web/frontend/src/Job.root.svelte | 25 +++++++---- web/frontend/src/Jobs.root.svelte | 2 +- web/frontend/src/Systems.root.svelte | 10 +++-- web/frontend/src/User.root.svelte | 2 +- .../src/generic/select/MetricSelection.svelte | 42 ++++++++++++------- web/frontend/src/job/StatsTable.svelte | 7 ++-- web/frontend/src/systems/NodeList.svelte | 16 +++---- 7 files changed, 65 insertions(+), 39 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index f2df916..6980230 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -128,15 +128,24 @@ if (!job) return; const pendingMetrics = [ - ...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || - $initq.data.globalMetrics.reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) + ...( + ( + ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_selectedMetrics:${job.cluster}`] + ) || + $initq.data.globalMetrics + .reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) ), - ...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || + ...( + ( + ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] + ) || ccconfig[`job_view_nodestats_selectedMetrics`] ), ]; diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index df928d0..7faa8b8 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -137,5 +137,5 @@ bind:metrics bind:isOpen={isMetricsSelectionOpen} bind:showFootprint - footprintSelect={true} + footprintSelect /> diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index 8089bbe..1589cac 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -29,8 +29,8 @@ import Refresher from "./generic/helper/Refresher.svelte"; export let displayType; - export let cluster; - export let subCluster = ""; + export let cluster = null; + export let subCluster = null; export let from = null; export let to = null; @@ -60,7 +60,10 @@ let hostnameFilter = ""; let pendingHostnameFilter = ""; let selectedMetric = ccconfig.system_view_selectedMetric || ""; - let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric]; + let selectedMetrics = ( + ccconfig[`node_list_selectedMetrics:${cluster}:${subCluster}`] || + ccconfig[`node_list_selectedMetrics:${cluster}`] + ) || [ccconfig.system_view_selectedMetric]; let isMetricsSelectionOpen = false; /* @@ -191,6 +194,7 @@ av.cluster === cluster)) allMetrics.add(gm.name); } else { if (gm.availability.find((av) => av.cluster === cluster && av.subClusters.includes(subCluster))) allMetrics.add(gm.name); @@ -67,7 +67,7 @@ function printAvailability(metric, cluster) { const avail = globalMetrics.find((gm) => gm.name === metric)?.availability - if (cluster == null) { + if (!cluster) { return avail.map((av) => av.cluster).join(',') } else { return avail.find((av) => av.cluster === cluster).subClusters.join(',') @@ -112,10 +112,17 @@ metrics = newMetricsOrder.filter((m) => unorderedMetrics.includes(m)); isOpen = false; - showFootprint = !!pendingShowFootprint; + let configKey; + if (cluster && subCluster) { + configKey = `${configName}:${cluster}:${subCluster}`; + } else if (cluster && !subCluster) { + configKey = `${configName}:${cluster}`; + } else { + configKey = `${configName}`; + } updateConfigurationMutation({ - name: cluster == null ? configName : `${configName}:${cluster}`, + name: configKey, value: JSON.stringify(metrics), }).subscribe((res) => { if (res.fetching === false && res.error) { @@ -123,17 +130,20 @@ } }); - updateConfigurationMutation({ - name: - cluster == null - ? "plot_list_showFootprint" - : `plot_list_showFootprint:${cluster}`, - value: JSON.stringify(showFootprint), - }).subscribe((res) => { - if (res.fetching === false && res.error) { - throw res.error; - } - }); + if (footprintSelect) { + showFootprint = !!pendingShowFootprint; + updateConfigurationMutation({ + name: + !cluster + ? "plot_list_showFootprint" + : `plot_list_showFootprint:${cluster}`, + value: JSON.stringify(showFootprint), + }).subscribe((res) => { + if (res.fetching === false && res.error) { + throw res.error; + } + }); + }; dispatch('update-metrics', metrics); } diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index b6b0f85..c8f12f2 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -37,9 +37,10 @@ sorting = {}, isMetricSelectionOpen = false, availableMetrics = new Set(), - selectedMetrics = - getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] || - getContext("cc-config")["job_view_nodestats_selectedMetrics"]; + selectedMetrics = ( + getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || + getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] + ) || getContext("cc-config")["job_view_nodestats_selectedMetrics"]; for (let metric of sortedJobMetrics) { // Not Exclusive or Multi-Node: get maxScope directly (mostly: node) diff --git a/web/frontend/src/systems/NodeList.svelte b/web/frontend/src/systems/NodeList.svelte index ad64a1f..ca22d57 100644 --- a/web/frontend/src/systems/NodeList.svelte +++ b/web/frontend/src/systems/NodeList.svelte @@ -217,13 +217,15 @@ From 16db9bd1a29382a1bc556d1f31c1ef1c4352673d Mon Sep 17 00:00:00 2001 From: exterr2f Date: Mon, 10 Mar 2025 08:15:42 +0100 Subject: [PATCH 387/443] Fix node filter: Use EXISTS with Eq for exact match and LIKE for Contains --- internal/repository/jobFind.go | 4 ++-- internal/repository/jobQuery.go | 9 ++++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/internal/repository/jobFind.go b/internal/repository/jobFind.go index 0354df0..ea5e1e9 100644 --- a/internal/repository/jobFind.go +++ b/internal/repository/jobFind.go @@ -194,11 +194,11 @@ func (r *JobRepository) FindConcurrentJobs( queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)", "running", startTimeTail, stopTimeTail, startTime) - queryRunning = queryRunning.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%")) + queryRunning = queryRunning.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname) query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)", "running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime) - query = query.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%")) + query = query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname) rows, err := query.RunWith(r.stmtCache).Query() if err != nil { diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go index b43b569..7d1d9eb 100644 --- a/internal/repository/jobQuery.go +++ b/internal/repository/jobQuery.go @@ -194,7 +194,14 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query) } if filter.Node != nil { - query = buildStringCondition("job.resources", filter.Node, query) + log.Infof("Applying node filter: %v", filter.Node) + if filter.Node.Eq != nil { + query = query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", *filter.Node.Eq) + } else if filter.Node.Contains != nil { + query = query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') LIKE ?)", "%"+*filter.Node.Contains+"%") + } else { + query = buildStringCondition("job.resources", filter.Node, query) + } } if filter.Energy != nil { query = buildFloatCondition("job.energy", filter.Energy, query) From f5f36427a45d082cb92df72a79566ea8efbe75d0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 13 Mar 2025 17:33:55 +0100 Subject: [PATCH 388/443] split statsTable data from jobMetrics query, initial commit - mainly backend changes - statstable changes only for prototyping --- api/schema.graphqls | 39 +- internal/graph/generated/generated.go | 1326 +++++++++++++---- internal/graph/model/models_gen.go | 22 +- internal/graph/schema.resolvers.go | 56 +- internal/metricDataDispatcher/dataLoader.go | 34 +- internal/metricdata/cc-metric-store.go | 93 +- internal/metricdata/influxdb-v2.go | 12 + internal/metricdata/metricdata.go | 5 +- internal/metricdata/prometheus.go | 14 +- internal/metricdata/utils.go | 12 +- pkg/archive/archive.go | 20 +- pkg/archive/fsBackend.go | 46 + pkg/archive/json.go | 37 + pkg/schema/metrics.go | 7 + web/frontend/src/Job.root.svelte | 44 +- web/frontend/src/job/Metric.svelte | 5 - web/frontend/src/job/StatsTable.svelte | 95 +- web/frontend/src/job/StatsTableEntry.svelte | 18 +- .../job/jobsummary/JobFootprintPolar.svelte | 12 +- 19 files changed, 1471 insertions(+), 426 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 9385a6f..ed8843c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -137,11 +137,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -156,6 +151,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -167,13 +186,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -247,7 +259,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index b4c6e19..e5c9ca2 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -156,11 +156,6 @@ type ComplexityRoot struct { Unit func(childComplexity int) int } - JobMetricStatWithName struct { - Name func(childComplexity int) int - Stats func(childComplexity int) int - } - JobMetricWithName struct { Metric func(childComplexity int) int Name func(childComplexity int) int @@ -175,6 +170,17 @@ type ComplexityRoot struct { Offset func(childComplexity int) int } + JobStats struct { + Name func(childComplexity int) int + Stats func(childComplexity int) int + } + + JobStatsWithScope struct { + Name func(childComplexity int) int + Scope func(childComplexity int) int + Stats func(childComplexity int) int + } + JobsStatistics struct { HistDuration func(childComplexity int) int HistMetrics func(childComplexity int) int @@ -268,14 +274,15 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetricStats func(childComplexity int, id string, metrics []string) int JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int + JobStats func(childComplexity int, id string, metrics []string) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int + ScopedJobStats func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Tags func(childComplexity int) int User func(childComplexity int, username string) int } @@ -287,6 +294,12 @@ type ComplexityRoot struct { Hostname func(childComplexity int) int } + ScopedStats struct { + Data func(childComplexity int) int + Hostname func(childComplexity int) int + ID func(childComplexity int) int + } + Series struct { Data func(childComplexity int) int Hostname func(childComplexity int) int @@ -396,7 +409,8 @@ type QueryResolver interface { AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) - JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) + JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) + ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) @@ -861,20 +875,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricStatWithName.name": - if e.complexity.JobMetricStatWithName.Name == nil { - break - } - - return e.complexity.JobMetricStatWithName.Name(childComplexity), true - - case "JobMetricStatWithName.stats": - if e.complexity.JobMetricStatWithName.Stats == nil { - break - } - - return e.complexity.JobMetricStatWithName.Stats(childComplexity), true - case "JobMetricWithName.metric": if e.complexity.JobMetricWithName.Metric == nil { break @@ -931,6 +931,41 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true + case "JobStats.name": + if e.complexity.JobStats.Name == nil { + break + } + + return e.complexity.JobStats.Name(childComplexity), true + + case "JobStats.stats": + if e.complexity.JobStats.Stats == nil { + break + } + + return e.complexity.JobStats.Stats(childComplexity), true + + case "JobStatsWithScope.name": + if e.complexity.JobStatsWithScope.Name == nil { + break + } + + return e.complexity.JobStatsWithScope.Name(childComplexity), true + + case "JobStatsWithScope.scope": + if e.complexity.JobStatsWithScope.Scope == nil { + break + } + + return e.complexity.JobStatsWithScope.Scope(childComplexity), true + + case "JobStatsWithScope.stats": + if e.complexity.JobStatsWithScope.Stats == nil { + break + } + + return e.complexity.JobStatsWithScope.Stats(childComplexity), true + case "JobsStatistics.histDuration": if e.complexity.JobsStatistics.HistDuration == nil { break @@ -1400,18 +1435,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Job(childComplexity, args["id"].(string)), true - case "Query.jobMetricStats": - if e.complexity.Query.JobMetricStats == nil { - break - } - - args, err := ec.field_Query_jobMetricStats_args(context.TODO(), rawArgs) - if err != nil { - return 0, false - } - - return e.complexity.Query.JobMetricStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true - case "Query.jobMetrics": if e.complexity.Query.JobMetrics == nil { break @@ -1424,6 +1447,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true + case "Query.jobStats": + if e.complexity.Query.JobStats == nil { + break + } + + args, err := ec.field_Query_jobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.JobStats(childComplexity, args["id"].(string), args["metrics"].([]string)), true + case "Query.jobs": if e.complexity.Query.Jobs == nil { break @@ -1496,6 +1531,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.RooflineHeatmap(childComplexity, args["filter"].([]*model.JobFilter), args["rows"].(int), args["cols"].(int), args["minX"].(float64), args["minY"].(float64), args["maxX"].(float64), args["maxY"].(float64)), true + case "Query.scopedJobStats": + if e.complexity.Query.ScopedJobStats == nil { + break + } + + args, err := ec.field_Query_scopedJobStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.ScopedJobStats(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + case "Query.tags": if e.complexity.Query.Tags == nil { break @@ -1543,6 +1590,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Resource.Hostname(childComplexity), true + case "ScopedStats.data": + if e.complexity.ScopedStats.Data == nil { + break + } + + return e.complexity.ScopedStats.Data(childComplexity), true + + case "ScopedStats.hostname": + if e.complexity.ScopedStats.Hostname == nil { + break + } + + return e.complexity.ScopedStats.Hostname(childComplexity), true + + case "ScopedStats.id": + if e.complexity.ScopedStats.ID == nil { + break + } + + return e.complexity.ScopedStats.ID(childComplexity), true + case "Series.data": if e.complexity.Series.Data == nil { break @@ -2131,11 +2199,6 @@ type JobMetricWithName { metric: JobMetric! } -type JobMetricStatWithName { - name: String! - stats: MetricStatistics! -} - type JobMetric { unit: Unit timestep: Int! @@ -2150,6 +2213,30 @@ type Series { data: [NullableFloat!]! } +type StatsSeries { + mean: [NullableFloat!]! + median: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! +} + +type JobStatsWithScope { + name: String! + scope: MetricScope! + stats: [ScopedStats!]! +} + +type ScopedStats { + hostname: String! + id: String + data: MetricStatistics! +} + +type JobStats { + name: String! + stats: MetricStatistics! +} + type Unit { base: String! prefix: String @@ -2161,13 +2248,6 @@ type MetricStatistics { max: Float! } -type StatsSeries { - mean: [NullableFloat!]! - median: [NullableFloat!]! - min: [NullableFloat!]! - max: [NullableFloat!]! -} - type MetricFootprints { metric: String! data: [NullableFloat!]! @@ -2241,7 +2321,8 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! + jobStats(id: ID!, metrics: [String!]): [JobStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2694,57 +2775,6 @@ func (ec *executionContext) field_Query_allocatedNodes_argsCluster( return zeroVal, nil } -func (ec *executionContext) field_Query_jobMetricStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { - var err error - args := map[string]any{} - arg0, err := ec.field_Query_jobMetricStats_argsID(ctx, rawArgs) - if err != nil { - return nil, err - } - args["id"] = arg0 - arg1, err := ec.field_Query_jobMetricStats_argsMetrics(ctx, rawArgs) - if err != nil { - return nil, err - } - args["metrics"] = arg1 - return args, nil -} -func (ec *executionContext) field_Query_jobMetricStats_argsID( - ctx context.Context, - rawArgs map[string]any, -) (string, error) { - if _, ok := rawArgs["id"]; !ok { - var zeroVal string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) - if tmp, ok := rawArgs["id"]; ok { - return ec.unmarshalNID2string(ctx, tmp) - } - - var zeroVal string - return zeroVal, nil -} - -func (ec *executionContext) field_Query_jobMetricStats_argsMetrics( - ctx context.Context, - rawArgs map[string]any, -) ([]string, error) { - if _, ok := rawArgs["metrics"]; !ok { - var zeroVal []string - return zeroVal, nil - } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) - if tmp, ok := rawArgs["metrics"]; ok { - return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) - } - - var zeroVal []string - return zeroVal, nil -} - func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -2842,6 +2872,57 @@ func (ec *executionContext) field_Query_jobMetrics_argsResolution( return zeroVal, nil } +func (ec *executionContext) field_Query_jobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_jobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_jobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + return args, nil +} +func (ec *executionContext) field_Query_jobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Query_job_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -3682,6 +3763,80 @@ func (ec *executionContext) field_Query_rooflineHeatmap_argsMaxY( return zeroVal, nil } +func (ec *executionContext) field_Query_scopedJobStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_scopedJobStats_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_scopedJobStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + arg2, err := ec.field_Query_scopedJobStats_argsScopes(ctx, rawArgs) + if err != nil { + return nil, err + } + args["scopes"] = arg2 + return args, nil +} +func (ec *executionContext) field_Query_scopedJobStats_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_scopedJobStats_argsScopes( + ctx context.Context, + rawArgs map[string]any, +) ([]schema.MetricScope, error) { + if _, ok := rawArgs["scopes"]; !ok { + var zeroVal []schema.MetricScope + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + if tmp, ok := rawArgs["scopes"]; ok { + return ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx, tmp) + } + + var zeroVal []schema.MetricScope + return zeroVal, nil +} + func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -6663,102 +6818,6 @@ func (ec *executionContext) fieldContext_JobMetric_statisticsSeries(_ context.Co return fc, nil } -func (ec *executionContext) _JobMetricStatWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_name(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Name, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _JobMetricStatWithName_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricStatWithName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobMetricStatWithName_stats(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Stats, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(*schema.MetricStatistics) - fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobMetricStatWithName_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobMetricStatWithName", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "avg": - return ec.fieldContext_MetricStatistics_avg(ctx, field) - case "min": - return ec.fieldContext_MetricStatistics_min(ctx, field) - case "max": - return ec.fieldContext_MetricStatistics_max(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) - }, - } - return fc, nil -} - func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobMetricWithName_name(ctx, field) if err != nil { @@ -7163,6 +7222,242 @@ func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Con return fc, nil } +func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_scope(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.MetricScope) + fc.Result = res + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type MetricScope does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _JobStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStatsWithScope_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.ScopedStats) + fc.Result = res + return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hostname": + return ec.fieldContext_ScopedStats_hostname(ctx, field) + case "id": + return ec.fieldContext_ScopedStats_id(ctx, field) + case "data": + return ec.fieldContext_ScopedStats_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) if err != nil { @@ -10296,8 +10591,8 @@ func (ec *executionContext) fieldContext_Query_jobMetrics(ctx context.Context, f return fc, nil } -func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_jobMetricStats(ctx, field) +func (ec *executionContext) _Query_jobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_jobStats(ctx, field) if err != nil { return graphql.Null } @@ -10310,7 +10605,7 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetricStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) + return ec.resolvers.Query().JobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string)) }) if err != nil { ec.Error(ctx, err) @@ -10322,12 +10617,12 @@ func (ec *executionContext) _Query_jobMetricStats(ctx context.Context, field gra } return graphql.Null } - res := resTmp.([]*model.JobMetricStatWithName) + res := resTmp.([]*model.JobStats) fc.Result = res - return ec.marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx, field.Selections, res) + return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, @@ -10336,11 +10631,11 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "name": - return ec.fieldContext_JobMetricStatWithName_name(ctx, field) + return ec.fieldContext_JobStats_name(ctx, field) case "stats": - return ec.fieldContext_JobMetricStatWithName_stats(ctx, field) + return ec.fieldContext_JobStats_stats(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type JobMetricStatWithName", field.Name) + return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name) }, } defer func() { @@ -10350,7 +10645,70 @@ func (ec *executionContext) fieldContext_Query_jobMetricStats(ctx context.Contex } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_jobMetricStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Query_jobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_scopedJobStats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().ScopedJobStats(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.JobStatsWithScope) + fc.Result = res + return ec.marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "name": + return ec.fieldContext_JobStatsWithScope_name(ctx, field) + case "scope": + return ec.fieldContext_JobStatsWithScope_scope(ctx, field) + case "stats": + return ec.fieldContext_JobStatsWithScope_stats(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type JobStatsWithScope", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_scopedJobStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } @@ -11058,6 +11416,143 @@ func (ec *executionContext) fieldContext_Resource_configuration(_ context.Contex return fc, nil } +func (ec *executionContext) _ScopedStats_hostname(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_hostname(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_hostname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_id(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _ScopedStats_data(ctx context.Context, field graphql.CollectedField, obj *model.ScopedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ScopedStats_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ScopedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ScopedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Series_hostname(ctx, field) if err != nil { @@ -16569,50 +17064,6 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet return out } -var jobMetricStatWithNameImplementors = []string{"JobMetricStatWithName"} - -func (ec *executionContext) _JobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricStatWithName) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobMetricStatWithNameImplementors) - - out := graphql.NewFieldSet(fields) - deferred := make(map[string]*graphql.FieldSet) - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobMetricStatWithName") - case "name": - out.Values[i] = ec._JobMetricStatWithName_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "stats": - out.Values[i] = ec._JobMetricStatWithName_stats(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch(ctx) - if out.Invalids > 0 { - return graphql.Null - } - - atomic.AddInt32(&ec.deferred, int32(len(deferred))) - - for label, dfs := range deferred { - ec.processDeferredGroup(graphql.DeferredGroup{ - Label: label, - Path: graphql.GetPath(ctx), - FieldSet: dfs, - Context: ctx, - }) - } - - return out -} - var jobMetricWithNameImplementors = []string{"JobMetricWithName"} func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.SelectionSet, obj *model.JobMetricWithName) graphql.Marshaler { @@ -16709,6 +17160,99 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio return out } +var jobStatsImplementors = []string{"JobStats"} + +func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, obj *model.JobStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStats") + case "name": + out.Values[i] = ec._JobStats_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStats_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var jobStatsWithScopeImplementors = []string{"JobStatsWithScope"} + +func (ec *executionContext) _JobStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.JobStatsWithScope) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsWithScopeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobStatsWithScope") + case "name": + out.Values[i] = ec._JobStatsWithScope_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "scope": + out.Values[i] = ec._JobStatsWithScope_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._JobStatsWithScope_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var jobsStatisticsImplementors = []string{"JobsStatistics"} func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler { @@ -17513,7 +18057,7 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) - case "jobMetricStats": + case "jobStats": field := field innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { @@ -17522,7 +18066,29 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._Query_jobMetricStats(ctx, field) + res = ec._Query_jobStats(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "scopedJobStats": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_scopedJobStats(ctx, field) if res == graphql.Null { atomic.AddUint32(&fs.Invalids, 1) } @@ -17740,6 +18306,52 @@ func (ec *executionContext) _Resource(ctx context.Context, sel ast.SelectionSet, return out } +var scopedStatsImplementors = []string{"ScopedStats"} + +func (ec *executionContext) _ScopedStats(ctx context.Context, sel ast.SelectionSet, obj *model.ScopedStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, scopedStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("ScopedStats") + case "hostname": + out.Values[i] = ec._ScopedStats_hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "id": + out.Values[i] = ec._ScopedStats_id(ctx, field, obj) + case "data": + out.Values[i] = ec._ScopedStats_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var seriesImplementors = []string{"Series"} func (ec *executionContext) _Series(ctx context.Context, sel ast.SelectionSet, obj *schema.Series) graphql.Marshaler { @@ -19346,60 +19958,6 @@ func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpit return ec._JobMetric(ctx, sel, v) } -func (ec *executionContext) marshalNJobMetricStatWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricStatWithName) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - - for _, e := range ret { - if e == graphql.Null { - return graphql.Null - } - } - - return ret -} - -func (ec *executionContext) marshalNJobMetricStatWithName2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricStatWithName(ctx context.Context, sel ast.SelectionSet, v *model.JobMetricStatWithName) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._JobMetricStatWithName(ctx, sel, v) -} - func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19478,6 +20036,114 @@ func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋcc return v } +func (ec *executionContext) marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStats(ctx context.Context, sel ast.SelectionSet, v *model.JobStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStats(ctx, sel, v) +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStatsWithScope) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.JobStatsWithScope) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._JobStatsWithScope(ctx, sel, v) +} + func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -19891,6 +20557,60 @@ func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋ return ec._Resource(ctx, sel, v) } +func (ec *executionContext) marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ScopedStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNScopedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStats(ctx context.Context, sel ast.SelectionSet, v *model.ScopedStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._ScopedStats(ctx, sel, v) +} + func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { return ec._Series(ctx, sel, &v) } diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index d83a318..43c4e37 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -81,11 +81,6 @@ type JobLinkResultList struct { Count *int `json:"count,omitempty"` } -type JobMetricStatWithName struct { - Name string `json:"name"` - Stats *schema.MetricStatistics `json:"stats"` -} - type JobMetricWithName struct { Name string `json:"name"` Scope schema.MetricScope `json:"scope"` @@ -100,6 +95,17 @@ type JobResultList struct { HasNextPage *bool `json:"hasNextPage,omitempty"` } +type JobStats struct { + Name string `json:"name"` + Stats *schema.MetricStatistics `json:"stats"` +} + +type JobStatsWithScope struct { + Name string `json:"name"` + Scope schema.MetricScope `json:"scope"` + Stats []*ScopedStats `json:"stats"` +} + type JobsStatistics struct { ID string `json:"id"` Name string `json:"name"` @@ -173,6 +179,12 @@ type PageRequest struct { Page int `json:"page"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + ID *string `json:"id,omitempty"` + Data *schema.MetricStatistics `json:"data"` +} + type StringInput struct { Eq *string `json:"eq,omitempty"` Neq *string `json:"neq,omitempty"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index ce1384b..1565c7e 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -301,24 +301,23 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return res, err } -// JobMetricStats is the resolver for the jobMetricStats field. -func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) { - +// JobMetricStats is the resolver for the jobStats field. +func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warn("Error while querying job for metrics") + log.Warnf("Error while querying job %s for metrics", id) return nil, err } - data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx) + data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warn("Error while loading job stat data") + log.Warnf("Error while loading job stat data for job id %s", id) return nil, err } - res := []*model.JobMetricStatWithName{} + res := []*model.JobStats{} for name, md := range data { - res = append(res, &model.JobMetricStatWithName{ + res = append(res, &model.JobStats{ Name: name, Stats: &md, }) @@ -327,6 +326,47 @@ func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics [ return res, err } +// JobStats is the resolver for the scopedJobStats field. +func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) { + job, err := r.Query().Job(ctx, id) + if err != nil { + log.Warnf("Error while querying job %s for metrics", id) + return nil, err + } + + data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) + if err != nil { + log.Warnf("Error while loading scoped job stat data for job id %s", id) + return nil, err + } + + res := make([]*model.JobStatsWithScope, 0) + for name, scoped := range data { + for scope, stats := range scoped { + // log.Debugf("HANDLE >>>>> %s @ %s -> First Array Value %#v", name, scope, *stats[0]) + + mdlStats := make([]*model.ScopedStats, 0) + for _, stat := range stats { + // log.Debugf("CONVERT >>>>> >>>>> %s -> %v -> %#v", stat.Hostname, stat.Id, stat.Data) + mdlStats = append(mdlStats, &model.ScopedStats{ + Hostname: stat.Hostname, + ID: stat.Id, + Data: stat.Data, + }) + } + + // log.Debugf("APPEND >>>>> >>>>> %#v", mdlStats) + res = append(res, &model.JobStatsWithScope{ + Name: name, + Scope: scope, + Stats: mdlStats, + }) + } + } + + return res, nil +} + // JobsFootprints is the resolver for the jobsFootprints field. func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index f3f60b4..c6cecd8 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -224,8 +224,34 @@ func LoadAverages( return nil } -// Used for polar plots in frontend -func LoadStatData( +// Used for statsTable in frontend: Return scoped statistics by metric. +func LoadScopedJobStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + + if job.State != schema.JobStateRunning && !config.Keys.DisableArchive { + return archive.LoadScopedStatsFromArchive(job, metrics, scopes) + } + + repo, err := metricdata.GetMetricDataRepo(job.Cluster) + if err != nil { + return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) + } + + scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx) + if err != nil { + log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) + return nil, err + } + + return scopedStats, nil +} + +// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric. +func LoadJobStats( job *schema.Job, metrics []string, ctx context.Context, @@ -237,12 +263,12 @@ func LoadStatData( data := make(map[string]schema.MetricStatistics, len(metrics)) repo, err := metricdata.GetMetricDataRepo(job.Cluster) if err != nil { - return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) + return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster) } stats, err := repo.LoadStats(job, metrics, ctx) if err != nil { - log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) + log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project) return data, err } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 2b92fbb..6635299 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -618,7 +618,98 @@ func (ccms *CCMetricStore) LoadStats( return stats, nil } -// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! +// Scoped Stats: Basically Load Data without resolution and data query flag? +func (ccms *CCMetricStore) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context, +) (schema.ScopedJobStats, error) { + queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) + if err != nil { + log.Warn("Error while building queries") + return nil, err + } + + req := ApiQueryRequest{ + Cluster: job.Cluster, + From: job.StartTime.Unix(), + To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(), + Queries: queries, + WithStats: true, + WithData: false, + } + + resBody, err := ccms.doRequest(ctx, &req) + if err != nil { + log.Error("Error while performing request") + return nil, err + } + + var errors []string + scopedJobStats := make(schema.ScopedJobStats) + + for i, row := range resBody.Results { + query := req.Queries[i] + metric := ccms.toLocalName(query.Metric) + scope := assignedScope[i] + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for ndx, res := range row { + if res.Error != nil { + /* Build list for "partial errors", if any */ + errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) + continue + } + + id := (*string)(nil) + if query.Type != nil { + id = new(string) + *id = query.TypeIds[ndx] + } + + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // "schema.Float()" because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: query.Hostname, + Id: id, + Data: &schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + + if len(errors) != 0 { + /* Returns list for "partial errors" */ + return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) + } + return scopedJobStats, nil +} + +// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! - Todo Outdated with NodeListData? func (ccms *CCMetricStore) LoadNodeData( cluster string, metrics, nodes []string, diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 79c2d4a..2a943b6 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -301,6 +301,18 @@ func (idb *InfluxDBv2DataRepository) LoadStats( return stats, nil } +func (idb *InfluxDBv2DataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // TODO : Implement to be used in JobView Stats Table + log.Infof("LoadScopedStats unimplemented for InfluxDBv2DataRepository, Args: Job-ID %d, metrics %v, scopes %v", job.JobID, metrics, scopes) + + return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") +} + func (idb *InfluxDBv2DataRepository) LoadNodeData( cluster string, metrics, nodes []string, diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 0fe94d1..f30d837 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -24,9 +24,12 @@ type MetricDataRepository interface { // Return the JobData for the given job, only with the requested metrics. LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) - // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. + // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + // Return a map of metrics to a map of scopes to the scoped metric statistics of the job. + LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) + // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index cd849ce..fe829c0 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -448,6 +448,18 @@ func (pdb *PrometheusDataRepository) LoadNodeData( return data, nil } +func (pdb *PrometheusDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { + + // TODO : Implement to be used in Job-View StatsTable + log.Infof("LoadScopedStats unimplemented for PrometheusDataRepository, Args: job-id %v, metrics %v, scopes %v", job.JobID, metrics, scopes) + + return nil, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") +} + func (pdb *PrometheusDataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -463,5 +475,5 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( // TODO : Implement to be used in NodeList-View log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") + return nil, totalNodes, hasNextPage, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") } diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 48dd237..aa7bde1 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData( func (tmdr *TestMetricDataRepository) LoadStats( job *schema.Job, - metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + metrics []string, + ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + + panic("TODO") +} + +func (tmdr *TestMetricDataRepository) LoadScopedStats( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, + ctx context.Context) (schema.ScopedJobStats, error) { panic("TODO") } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 2eabb52..002fd5e 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -27,6 +27,8 @@ type ArchiveBackend interface { LoadJobData(job *schema.Job) (schema.JobData, error) + LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) + LoadClusterCfg(name string) (*schema.Cluster, error) StoreJobMeta(jobMeta *schema.JobMeta) error @@ -125,7 +127,7 @@ func LoadAveragesFromArchive( return nil } -// Helper to metricdataloader.LoadStatData(). +// Helper to metricdataloader.LoadJobStats(). func LoadStatsFromArchive( job *schema.Job, metrics []string, @@ -154,6 +156,22 @@ func LoadStatsFromArchive( return data, nil } +// Helper to metricdataloader.LoadScopedJobStats(). +func LoadScopedStatsFromArchive( + job *schema.Job, + metrics []string, + scopes []schema.MetricScope, +) (schema.ScopedJobStats, error) { + + data, err := ar.LoadJobStats(job) + if err != nil { + log.Warn("Error while loading job metadata from archiveBackend") + return nil, err + } + + return data, nil +} + func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { metaFile, err := ar.LoadJobMeta(job) if err != nil { diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index 8a43748..711b1f5 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) { } } +func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) { + f, err := os.Open(filename) + + if err != nil { + log.Errorf("fsBackend LoadJobStats()- %v", err) + return nil, err + } + defer f.Close() + + if isCompressed { + r, err := gzip.NewReader(f) + if err != nil { + log.Errorf(" %v", err) + return nil, err + } + defer r.Close() + + if config.Keys.Validate { + if err := schema.Validate(schema.Data, r); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + + return DecodeJobStats(r, filename) + } else { + if config.Keys.Validate { + if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil { + return nil, fmt.Errorf("validate job data: %v", err) + } + } + return DecodeJobStats(bufio.NewReader(f), filename) + } +} + func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { var config FsArchiveConfig @@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { return loadJobData(filename, isCompressed) } +func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) { + var isCompressed bool = true + filename := getPath(job, fsa.path, "data.json.gz") + + if !util.CheckFileExists(filename) { + filename = getPath(job, fsa.path, "data.json") + isCompressed = false + } + + return loadJobStats(filename, isCompressed) +} + func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { filename := getPath(job, fsa.path, "meta.json") return loadJobMeta(filename) diff --git a/pkg/archive/json.go b/pkg/archive/json.go index 1219658..5201b74 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { return data.(schema.JobData), nil } +func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) { + jobData, err := DecodeJobData(r, k) + // Convert schema.JobData to schema.ScopedJobStats + if jobData != nil { + scopedJobStats := make(schema.ScopedJobStats) + for metric, metricData := range jobData { + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + for scope, jobMetric := range metricData { + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range jobMetric.Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Id: series.Id, + Data: &series.Statistics, + }) + } + + // So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty + if len(scopedJobStats[metric][scope]) == 0 { + delete(scopedJobStats[metric], scope) + if len(scopedJobStats[metric]) == 0 { + delete(scopedJobStats, metric) + } + } + } + } + return scopedJobStats, nil + } + return nil, err +} + func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) { var d schema.JobMeta if err := json.NewDecoder(r).Decode(&d); err != nil { diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index ffac21b..fbb85e4 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -15,6 +15,7 @@ import ( ) type JobData map[string]map[MetricScope]*JobMetric +type ScopedJobStats map[string]map[MetricScope][]*ScopedStats type JobMetric struct { StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` @@ -30,6 +31,12 @@ type Series struct { Statistics MetricStatistics `json:"statistics"` } +type ScopedStats struct { + Hostname string `json:"hostname"` + Id *string `json:"id,omitempty"` + Data *MetricStatistics `json:"data"` +} + type MetricStatistics struct { Avg float64 `json:"avg"` Min float64 `json:"min"` diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 6980230..2fe5bc4 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -127,28 +127,17 @@ let job = $initq.data.job; if (!job) return; - const pendingMetrics = [ - ...( - ( - ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_selectedMetrics:${job.cluster}`] - ) || - $initq.data.globalMetrics - .reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) - ), - ...( - ( - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] || - ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] - ) || - ccconfig[`job_view_nodestats_selectedMetrics`] - ), - ]; + const pendingMetrics = ( + ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || + ccconfig[`job_view_selectedMetrics:${job.cluster}`] + ) || + $initq.data.globalMetrics + .reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) // Select default Scopes to load: Check before if any metric has accelerator scope by default const accScopeDefault = [...pendingMetrics].some(function (m) { @@ -343,7 +332,6 @@ {#if item.data} statsTable.moreLoaded(detail)} job={$initq.data.job} metricName={item.metric} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} @@ -404,15 +392,7 @@ class="overflow-x-auto" active={!somethingMissing} > - {#if $jobMetrics?.data?.jobMetrics} - {#key $jobMetrics.data.jobMetrics} - - {/key} - {/if} +
diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index bcfa4fd..b68ef47 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -150,11 +150,6 @@ // On additional scope request if (selectedScope == "load-all") { - // Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition) - const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node") - if (statsTableData.length > 0) { - dispatch("more-loaded", statsTableData); - } // Set selected scope to min of returned scopes selectedScope = minScope(scopes) nodeOnly = (selectedScope == "node") // "node" still only scope after load-all diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index c8f12f2..159d24b 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -3,13 +3,14 @@ Properties: - `job Object`: The job object - - `jobMetrics [Object]`: The jobs metricdata - - Exported: - - `moreLoaded`: Adds additional scopes requested from Metric.svelte in Job-View --> diff --git a/web/frontend/src/job/StatsTableEntry.svelte b/web/frontend/src/job/StatsTableEntry.svelte index 9504a63..dc2f628 100644 --- a/web/frontend/src/job/StatsTableEntry.svelte +++ b/web/frontend/src/job/StatsTableEntry.svelte @@ -37,8 +37,8 @@ return s.dir != "up" ? a[field] - b[field] : b[field] - a[field]; } else { return s.dir != "up" - ? a.statistics[field] - b.statistics[field] - : b.statistics[field] - a.statistics[field]; + ? a.data[field] - b.data[field] + : b.data[field] - a.data[field]; } }); } @@ -52,7 +52,7 @@ $: series = jobMetrics .find((jm) => jm.name == metric && jm.scope == scope) - ?.metric.series.filter((s) => s.hostname == host && s.statistics != null) + ?.stats.filter((s) => s.hostname == host && s.data != null) ?.sort(compareNumbers); @@ -60,13 +60,13 @@
{:else if series.length == 1 && scope == "node"} {:else} - - - + + + {/each}
- - {#each selectedMetrics as metric} @@ -163,7 +170,7 @@ From 38569f55c740fa92019cfb772f902cea073d653f Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 13:09:04 +0100 Subject: [PATCH 361/443] add title to roofline plot - Clarify that roofline is CPU only --- web/frontend/src/generic/plots/Roofline.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/generic/plots/Roofline.svelte b/web/frontend/src/generic/plots/Roofline.svelte index 558d8e8..2941ecb 100644 --- a/web/frontend/src/generic/plots/Roofline.svelte +++ b/web/frontend/src/generic/plots/Roofline.svelte @@ -179,7 +179,7 @@ function render(plotData) { if (plotData) { const opts = { - title: "", + title: "CPU Roofline Diagram", mode: 2, width: width, height: height, From 42135fd26ceb2c30b02800618723026ff2426064 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 13:37:28 +0100 Subject: [PATCH 362/443] if disableClusterSelection is set, display info in cluster filter - instead of undocumented unresponsive cluster name select --- .../src/generic/filters/Cluster.svelte | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/web/frontend/src/generic/filters/Cluster.svelte b/web/frontend/src/generic/filters/Cluster.svelte index 8606247..f886582 100644 --- a/web/frontend/src/generic/filters/Cluster.svelte +++ b/web/frontend/src/generic/filters/Cluster.svelte @@ -43,26 +43,31 @@ {#if $initialized}

Cluster

- - ((pendingCluster = null), (pendingPartition = null))} - > - Any Cluster - - {#each clusters as cluster} + {#if disableClusterSelection} + + + {:else} + ( - (pendingCluster = cluster.name), (pendingPartition = null) - )} + active={pendingCluster == null} + on:click={() => ((pendingCluster = null), (pendingPartition = null))} > - {cluster.name} + Any Cluster - {/each} - + {#each clusters as cluster} + ( + (pendingCluster = cluster.name), (pendingPartition = null) + )} + > + {cluster.name} + + {/each} + + {/if} {/if} {#if $initialized && pendingCluster != null}
From d5394c9e92de8d04a186ac18db05838c1c732c70 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 28 Feb 2025 13:37:59 +0100 Subject: [PATCH 363/443] fix: analysis view top links fixed, add full name to topusers --- web/frontend/src/Analysis.root.svelte | 9 ++++++--- web/frontend/src/Status.root.svelte | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index 40757d3..1617ccd 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -70,6 +70,8 @@ ...new Set([...metricsInHistograms, ...metricsInScatterplots.flat()]), ]; + $: clusterName = cluster?.name ? cluster.name : cluster; + const sortOptions = [ { key: "totalWalltime", label: "Walltime" }, { key: "totalNodeHours", label: "Node Hours" }, @@ -159,6 +161,7 @@ groupBy: $groupBy ) { id + name totalWalltime totalNodeHours totalCoreHours @@ -423,14 +426,14 @@
{te.id}{te.id} {te?.name ? `(${te.name})` : ''}{te.id}{tu.id}{tu.id} {tu?.name ? `(${tu.name})` : ''} {tu[topUserSelection.key]}
{te.id} {te?.name ? `(${te.name})` : ''}{te.id} {tu.id} {tu?.name ? `(${tu.name})` : ''}{tu.id}{tu[topUserSelection.key]}
-

- Loading nodes {nodes.length + 1} to - { matchedNodes - ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` - : (nodes.length + paging.itemsPerPage) - } -

+ {#if !usePaging} +

+ Loading nodes {nodes.length + 1} to + { matchedNodes + ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` + : (nodes.length + paging.itemsPerPage) + } +

+ {/if}
No data - {series[0].statistics.min} + {series[0].data.min} - {series[0].statistics.avg} + {series[0].data.avg} - {series[0].statistics.max} + {series[0].data.max} @@ -86,9 +86,9 @@ {#each series as s, i}
{s.id ?? i}{s.statistics.min}{s.statistics.avg}{s.statistics.max}{s.data.min}{s.data.avg}{s.data.max}
diff --git a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte index cf90408..fe6693b 100644 --- a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte +++ b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte @@ -40,14 +40,14 @@ const client = getContextClient(); const polarQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!) { - jobMetricStats(id: $dbid, metrics: $selectedMetrics) { + jobStats(id: $dbid, metrics: $selectedMetrics) { name stats { - min - avg - max - } + min + avg + max } + } } `; @@ -66,7 +66,7 @@ {:else} {/if} \ No newline at end of file From 6a7546c43b7e833ed32054b51390a94607deb356 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 14 Mar 2025 10:03:53 +0100 Subject: [PATCH 389/443] Clarify header for breaking changes --- ReleaseNotes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index ef1082f..f047abf 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -6,7 +6,7 @@ This is a bug fix release of `cc-backend`, the API backend and frontend implementation of ClusterCockpit. For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). -## Breaking changes +## Breaking changes for minor release 1.4.x - You need to perform a database migration. Depending on your database size the migration might require several hours! From 25aaf55b93a13d853c6ef57d08ccc22488ddaf84 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 14 Mar 2025 10:06:25 +0100 Subject: [PATCH 390/443] Add feature to Releasenotes --- ReleaseNotes.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index f047abf..beb8ee1 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -28,6 +28,8 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus - Color Blind Mode - Set on a per-user basis in options - Applies to plot data, plot background color, statsseries colors, roofline timescale +- Job-View metric selection is now persisted based on the jobs subcluster. +Helpful for heterogeneous subcluster configurations. - Histogram Bin Select in User-View - Metric-Histograms: `10 Bins` now default, selectable options `20, 50, 100` - Job-Duration-Histogram: `48h in 1h Bins` now default, selectable options: From 33c6cdb9feaa3bca13571c58bedc541550f1ff28 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Fri, 14 Mar 2025 10:52:27 +0100 Subject: [PATCH 391/443] Update test workflow --- .github/workflows/Release.yml | 331 ---------------------------------- .github/workflows/test.yml | 2 +- 2 files changed, 1 insertion(+), 332 deletions(-) delete mode 100644 .github/workflows/Release.yml diff --git a/.github/workflows/Release.yml b/.github/workflows/Release.yml deleted file mode 100644 index 8fc8755..0000000 --- a/.github/workflows/Release.yml +++ /dev/null @@ -1,331 +0,0 @@ -# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions - -# Workflow name -name: Release - -# Run on tag push -on: - push: - tags: - - '**' - -jobs: - - # - # Build on AlmaLinux 8.5 using golang-1.18.2 - # - AlmaLinux-RPM-build: - runs-on: ubuntu-latest - # See: https://hub.docker.com/_/almalinux - container: almalinux:8.5 - # The job outputs link to the outputs of the 'rpmrename' step - # Only job outputs can be used in child jobs - outputs: - rpm : ${{steps.rpmrename.outputs.RPM}} - srpm : ${{steps.rpmrename.outputs.SRPM}} - steps: - - # Use dnf to install development packages - - name: Install development packages - run: | - dnf --assumeyes group install "Development Tools" "RPM Development Tools" - dnf --assumeyes install wget openssl-devel diffutils delve which npm - dnf --assumeyes install 'dnf-command(builddep)' - - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - - # Use dnf to install build dependencies - - name: Install build dependencies - run: | - wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm - rpm -i go*.rpm - npm install --global yarn rollup svelte rollup-plugin-svelte - #dnf --assumeyes builddep build/package/cc-backend.spec - - - name: RPM build ClusterCockpit - id: rpmbuild - run: make RPM - - # AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8), - # so the created RPM both contain the substring 'el8' in the RPM file names - # This step replaces the substring 'el8' to 'alma85'. It uses the move operation - # because it is unclear whether the default AlmaLinux 8.5 container contains the - # 'rename' command. This way we also get the new names for output. - - name: Rename RPMs (s/el8/alma85/) - id: rpmrename - run: | - OLD_RPM="${{steps.rpmbuild.outputs.RPM}}" - OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}" - NEW_RPM="${OLD_RPM/el8/alma85}" - NEW_SRPM=${OLD_SRPM/el8/alma85} - mv "${OLD_RPM}" "${NEW_RPM}" - mv "${OLD_SRPM}" "${NEW_SRPM}" - echo "::set-output name=SRPM::${NEW_SRPM}" - echo "::set-output name=RPM::${NEW_RPM}" - - # See: https://github.com/actions/upload-artifact - - name: Save RPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend RPM for AlmaLinux 8.5 - path: ${{ steps.rpmrename.outputs.RPM }} - - name: Save SRPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend SRPM for AlmaLinux 8.5 - path: ${{ steps.rpmrename.outputs.SRPM }} - - # - # Build on UBI 8 using golang-1.18.2 - # - UBI-8-RPM-build: - runs-on: ubuntu-latest - # See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti - container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065 - # The job outputs link to the outputs of the 'rpmbuild' step - outputs: - rpm : ${{steps.rpmbuild.outputs.RPM}} - srpm : ${{steps.rpmbuild.outputs.SRPM}} - steps: - - # Use dnf to install development packages - - name: Install development packages - run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which - - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - - # Use dnf to install build dependencies - - name: Install build dependencies - run: | - wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \ - http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm - rpm -i go*.rpm - dnf --assumeyes --disableplugin=subscription-manager install npm - npm install --global yarn rollup svelte rollup-plugin-svelte - #dnf --assumeyes builddep build/package/cc-backend.spec - - - name: RPM build ClusterCockpit - id: rpmbuild - run: make RPM - - # See: https://github.com/actions/upload-artifact - - name: Save RPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend RPM for UBI 8 - path: ${{ steps.rpmbuild.outputs.RPM }} - - name: Save SRPM as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend SRPM for UBI 8 - path: ${{ steps.rpmbuild.outputs.SRPM }} - - # - # Build on Ubuntu 20.04 using official go 1.19.1 package - # - Ubuntu-focal-build: - runs-on: ubuntu-latest - container: ubuntu:20.04 - # The job outputs link to the outputs of the 'debrename' step - # Only job outputs can be used in child jobs - outputs: - deb : ${{steps.debrename.outputs.DEB}} - steps: - # Use apt to install development packages - - name: Install development packages - run: | - apt update && apt --assume-yes upgrade - apt --assume-yes install build-essential sed git wget bash - apt --assume-yes install npm - npm install --global yarn rollup svelte rollup-plugin-svelte - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - # Use official golang package - - name: Install Golang - run: | - wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz - tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - go version - - name: DEB build ClusterCockpit - id: dpkg-build - run: | - ls -la - pwd - env - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - git config --global --add safe.directory $(pwd) - make DEB - - name: Rename DEB (add '_ubuntu20.04') - id: debrename - run: | - OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev) - NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb" - mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}" - echo "::set-output name=DEB::${NEW_DEB_FILE}" - # See: https://github.com/actions/upload-artifact - - name: Save DEB as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 20.04 - path: ${{ steps.debrename.outputs.DEB }} - - # - # Build on Ubuntu 20.04 using official go 1.19.1 package - # - Ubuntu-jammy-build: - runs-on: ubuntu-latest - container: ubuntu:22.04 - # The job outputs link to the outputs of the 'debrename' step - # Only job outputs can be used in child jobs - outputs: - deb : ${{steps.debrename.outputs.DEB}} - steps: - # Use apt to install development packages - - name: Install development packages - run: | - apt update && apt --assume-yes upgrade - apt --assume-yes install build-essential sed git wget bash npm - npm install --global yarn rollup svelte rollup-plugin-svelte - # Checkout git repository and submodules - # fetch-depth must be 0 to use git describe - # See: https://github.com/marketplace/actions/checkout - - name: Checkout - uses: actions/checkout@v2 - with: - submodules: recursive - fetch-depth: 0 - # Use official golang package - - name: Install Golang - run: | - wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz - tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - go version - - name: DEB build ClusterCockpit - id: dpkg-build - run: | - ls -la - pwd - env - export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH - git config --global --add safe.directory $(pwd) - make DEB - - name: Rename DEB (add '_ubuntu22.04') - id: debrename - run: | - OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev) - NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb" - mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}" - echo "::set-output name=DEB::${NEW_DEB_FILE}" - # See: https://github.com/actions/upload-artifact - - name: Save DEB as artifact - uses: actions/upload-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 22.04 - path: ${{ steps.debrename.outputs.DEB }} - - # - # Create release with fresh RPMs - # - Release: - runs-on: ubuntu-latest - # We need the RPMs, so add dependency - needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build, Ubuntu-jammy-build] - - steps: - # See: https://github.com/actions/download-artifact - - name: Download AlmaLinux 8.5 RPM - uses: actions/download-artifact@v2 - with: - name: cc-backend RPM for AlmaLinux 8.5 - - name: Download AlmaLinux 8.5 SRPM - uses: actions/download-artifact@v2 - with: - name: cc-backend SRPM for AlmaLinux 8.5 - - - name: Download UBI 8 RPM - uses: actions/download-artifact@v2 - with: - name: cc-backend RPM for UBI 8 - - name: Download UBI 8 SRPM - uses: actions/download-artifact@v2 - with: - name: cc-backend SRPM for UBI 8 - - - name: Download Ubuntu 20.04 DEB - uses: actions/download-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 20.04 - - - name: Download Ubuntu 22.04 DEB - uses: actions/download-artifact@v2 - with: - name: cc-backend DEB for Ubuntu 22.04 - - # The download actions do not publish the name of the downloaded file, - # so we re-use the job outputs of the parent jobs. The files are all - # downloaded to the current folder. - # The gh-release action afterwards does not accept file lists but all - # files have to be listed at 'files'. The step creates one output per - # RPM package (2 per distro) - - name: Set RPM variables - id: files - run: | - ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}") - ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}") - UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}") - UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}") - U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}") - U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}") - echo "ALMA_85_RPM::${ALMA_85_RPM}" - echo "ALMA_85_SRPM::${ALMA_85_SRPM}" - echo "UBI_8_RPM::${UBI_8_RPM}" - echo "UBI_8_SRPM::${UBI_8_SRPM}" - echo "U_2004_DEB::${U_2004_DEB}" - echo "U_2204_DEB::${U_2204_DEB}" - echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}" - echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}" - echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}" - echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}" - echo "::set-output name=U_2004_DEB::${U_2004_DEB}" - echo "::set-output name=U_2204_DEB::${U_2204_DEB}" - - # See: https://github.com/softprops/action-gh-release - - name: Release - uses: softprops/action-gh-release@v1 - if: startsWith(github.ref, 'refs/tags/') - with: - name: cc-backend-${{github.ref_name}} - files: | - ${{ steps.files.outputs.ALMA_85_RPM }} - ${{ steps.files.outputs.ALMA_85_SRPM }} - ${{ steps.files.outputs.UBI_8_RPM }} - ${{ steps.files.outputs.UBI_8_SRPM }} - ${{ steps.files.outputs.U_2004_DEB }} - ${{ steps.files.outputs.U_2204_DEB }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e4aa02b..a8a7429 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ jobs: - name: Install Go uses: actions/setup-go@v4 with: - go-version: 1.22.x + go-version: 1.24.x - name: Checkout code uses: actions/checkout@v3 - name: Build, Vet & Test From 8da2fc30c39d2ce4b3c7c2b532b480851f431d89 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 14 Mar 2025 16:36:31 +0100 Subject: [PATCH 392/443] split statsTable data from jobMetrics query, frontend refactor --- internal/graph/schema.resolvers.go | 3 - web/frontend/src/Job.root.svelte | 18 +- web/frontend/src/job/StatsTab.svelte | 145 +++++++++++++ web/frontend/src/job/StatsTable.svelte | 201 ------------------ .../src/job/statstab/StatsTable.svelte | 139 ++++++++++++ .../job/{ => statstab}/StatsTableEntry.svelte | 50 ++--- 6 files changed, 314 insertions(+), 242 deletions(-) create mode 100644 web/frontend/src/job/StatsTab.svelte delete mode 100644 web/frontend/src/job/StatsTable.svelte create mode 100644 web/frontend/src/job/statstab/StatsTable.svelte rename web/frontend/src/job/{ => statstab}/StatsTableEntry.svelte (67%) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 1565c7e..a470807 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -343,11 +343,9 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ res := make([]*model.JobStatsWithScope, 0) for name, scoped := range data { for scope, stats := range scoped { - // log.Debugf("HANDLE >>>>> %s @ %s -> First Array Value %#v", name, scope, *stats[0]) mdlStats := make([]*model.ScopedStats, 0) for _, stat := range stats { - // log.Debugf("CONVERT >>>>> >>>>> %s -> %v -> %#v", stat.Hostname, stat.Id, stat.Data) mdlStats = append(mdlStats, &model.ScopedStats{ Hostname: stat.Hostname, ID: stat.Id, @@ -355,7 +353,6 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ }) } - // log.Debugf("APPEND >>>>> >>>>> %#v", mdlStats) res = append(res, &model.JobStatsWithScope{ Name: name, Scope: scope, diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 2fe5bc4..c2748e6 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -40,7 +40,7 @@ import JobRoofline from "./job/JobRoofline.svelte"; import EnergySummary from "./job/EnergySummary.svelte"; import PlotGrid from "./generic/PlotGrid.svelte"; - import StatsTable from "./job/StatsTable.svelte"; + import StatsTab from "./job/StatsTab.svelte"; export let dbid; export let username; @@ -53,10 +53,8 @@ let isMetricsSelectionOpen = false, selectedMetrics = [], - selectedScopes = []; - - let plots = {}, - statsTable + selectedScopes = [], + plots = {}; let availableMetrics = new Set(), missingMetrics = [], @@ -386,14 +384,8 @@
{/if} - - - + +
{#if $initq.data.job.metaData?.jobScript} diff --git a/web/frontend/src/job/StatsTab.svelte b/web/frontend/src/job/StatsTab.svelte new file mode 100644 index 0000000..b7647b5 --- /dev/null +++ b/web/frontend/src/job/StatsTab.svelte @@ -0,0 +1,145 @@ + + + + + + + + + {#if job.numNodes > 1} + + {/if} + + +
+ + {#if $scopedStats.fetching} + + + + + + {:else if $scopedStats.error} + + + {$scopedStats.error.message} + + + {:else} + r.hostname).sort()} + data={$scopedStats?.data?.scopedJobStats} + {selectedMetrics} + /> + {/if} +
+ + diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte deleted file mode 100644 index 159d24b..0000000 --- a/web/frontend/src/job/StatsTable.svelte +++ /dev/null @@ -1,201 +0,0 @@ - - - - - - - - - -
- - - - - - {/each} - - - - - {#each selectedMetrics as metric} - {#if selectedScopes[metric] != "node"} - - {/if} - {#each ["min", "avg", "max"] as stat} - - {/each} - {/each} - - - - {#each hosts as host (host)} - - - {#each selectedMetrics as metric (metric)} - - {/each} - - {/each} - -
- {#each selectedMetrics as metric} - - - - - {metric} - - - {#each scopesForMetric(metric, jobMetrics) as scope} - - {/each} - - -
NodeId sortBy(metric, stat)}> - {stat} - {#if selectedScopes[metric] == "node"} - - {/if} -
{host}
- - diff --git a/web/frontend/src/job/statstab/StatsTable.svelte b/web/frontend/src/job/statstab/StatsTable.svelte new file mode 100644 index 0000000..4adb4bd --- /dev/null +++ b/web/frontend/src/job/statstab/StatsTable.svelte @@ -0,0 +1,139 @@ + + + + + + + + + + {/each} + + + + + {#each selectedMetrics as metric} + {#if selectedScopes[metric] != "node"} + + {/if} + {#each ["min", "avg", "max"] as stat} + + {/each} + {/each} + + + + {#each hosts as host (host)} + + + {#each selectedMetrics as metric (metric)} + + {/each} + + {/each} + +
+ {#each selectedMetrics as metric} + + + + + {metric} + + + {#each (availableScopes[metric] || []) as scope} + + {/each} + + +
NodeId sortBy(metric, stat)}> + {stat} + {#if selectedScopes[metric] == "node"} + + {/if} +
{host}
\ No newline at end of file diff --git a/web/frontend/src/job/StatsTableEntry.svelte b/web/frontend/src/job/statstab/StatsTableEntry.svelte similarity index 67% rename from web/frontend/src/job/StatsTableEntry.svelte rename to web/frontend/src/job/statstab/StatsTableEntry.svelte index dc2f628..b39eacb 100644 --- a/web/frontend/src/job/StatsTableEntry.svelte +++ b/web/frontend/src/job/statstab/StatsTableEntry.svelte @@ -1,11 +1,11 @@ -{#if series == null || series.length == 0} +{#if stats == null || stats.length == 0} No data -{:else if series.length == 1 && scope == "node"} +{:else if stats.length == 1 && scope == "node"} - {series[0].data.min} + {stats[0].data.min} - {series[0].data.avg} + {stats[0].data.avg} - {series[0].data.max} + {stats[0].data.max} {:else} @@ -76,14 +76,14 @@ sortByField(field)}> Sort {/each} - {#each series as s, i} + {#each stats as s, i} {s.id ?? i} {s.data.min} From 0144ad43f57c6e3c903a7a9f7152cd8617920e59 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 17 Mar 2025 11:03:51 +0100 Subject: [PATCH 393/443] Implement NodeListData and ScopedStats for Prometheus Backend --- internal/metricdata/prometheus.go | 167 ++++++++++++++++++++++++++++-- 1 file changed, 161 insertions(+), 6 deletions(-) diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index fe829c0..d16501e 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -448,18 +448,51 @@ func (pdb *PrometheusDataRepository) LoadNodeData( return data, nil } +// Implemented by NHR@FAU; Used in Job-View StatsTable func (pdb *PrometheusDataRepository) LoadScopedStats( job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) { - // TODO : Implement to be used in Job-View StatsTable - log.Infof("LoadScopedStats unimplemented for PrometheusDataRepository, Args: job-id %v, metrics %v, scopes %v", job.JobID, metrics, scopes) + // Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable + scopedJobStats := make(schema.ScopedJobStats) + data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) + if err != nil { + log.Warn("Error while loading job for scopedJobStats") + return nil, err + } - return nil, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") + for metric, metricData := range data { + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range metricData[scope].Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Data: &series.Statistics, + }) + } + } + } + + return scopedJobStats, nil } +// Implemented by NHR@FAU; Used in NodeList-View func (pdb *PrometheusDataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -470,10 +503,132 @@ func (pdb *PrometheusDataRepository) LoadNodeListData( ctx context.Context, ) (map[string]schema.JobData, int, bool, error) { + // Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList + + // 0) Init additional vars var totalNodes int = 0 var hasNextPage bool = false - // TODO : Implement to be used in NodeList-View - log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository") + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // 4) Fetch Data, based on pdb.LoadNodeData() + + t0 := time.Now() + // Map of hosts of jobData + data := make(map[string]schema.JobData) + + // query db for each metric + // TODO: scopes seems to be always empty + if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { + scopes = append(scopes, schema.MetricScopeNode) + } + + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + for _, metric := range metrics { + metricConfig := archive.GetMetricConfig(cluster, metric) + if metricConfig == nil { + log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster) + return nil, totalNodes, hasNextPage, errors.New("Prometheus config error") + } + query, err := pdb.FormatQuery(metric, scope, nodes, cluster) + if err != nil { + log.Warn("Error while formatting prometheus query") + return nil, totalNodes, hasNextPage, err + } + + // ranged query over all nodes + r := promv1.Range{ + Start: from, + End: to, + Step: time.Duration(metricConfig.Timestep * 1e9), + } + result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) + if err != nil { + log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) + return nil, totalNodes, hasNextPage, errors.New("Prometheus query error") + } + if len(warnings) > 0 { + log.Warnf("Warnings: %v\n", warnings) + } + + step := int64(metricConfig.Timestep) + steps := int64(to.Sub(from).Seconds()) / step + + // iter rows of host, metric, values + for _, row := range result.(promm.Matrix) { + hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix) + + hostdata, ok := data[hostname] + if !ok { + hostdata = make(schema.JobData) + data[hostname] = hostdata + } + + metricdata, ok := hostdata[metric] + if !ok { + metricdata = make(map[schema.MetricScope]*schema.JobMetric) + data[hostname][metric] = metricdata + } + + // output per host, metric and scope + scopeData, ok := metricdata[scope] + if !ok { + scopeData = &schema.JobMetric{ + Unit: metricConfig.Unit, + Timestep: metricConfig.Timestep, + Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)}, + } + data[hostname][metric][scope] = scopeData + } + } + } + } + t1 := time.Since(t0) + log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1) + return data, totalNodes, hasNextPage, nil } From 93040d46296eb16f4a9c02bef220e33f3abefdd0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 17 Mar 2025 15:25:33 +0100 Subject: [PATCH 394/443] IMplement LoadNode Data, LoadNodeListData, LoadScopedStats for influxDB2 backend - Untested - Only Node Scope --- internal/metricdata/influxdb-v2.go | 250 +++++++++++++++++++++++++++-- 1 file changed, 240 insertions(+), 10 deletions(-) diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 2a943b6..c53dad3 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -10,6 +10,8 @@ import ( "encoding/json" "errors" "fmt" + "math" + "sort" "strings" "time" @@ -64,6 +66,8 @@ func (idb *InfluxDBv2DataRepository) LoadData( ctx context.Context, resolution int) (schema.JobData, error) { + log.Infof("InfluxDB 2 Backend: Resolution Scaling not Implemented, will return default timestep. Requested Resolution %d", resolution) + measurementsConds := make([]string, 0, len(metrics)) for _, m := range metrics { measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m)) @@ -86,7 +90,7 @@ func (idb *InfluxDBv2DataRepository) LoadData( query := "" switch scope { case "node": - // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows + // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows <-- Resolution could be added here? // log.Info("Scope 'node' requested. ") query = fmt.Sprintf(` from(bucket: "%s") @@ -116,6 +120,12 @@ func (idb *InfluxDBv2DataRepository) LoadData( // idb.bucket, // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )), // measurementsCond, hostsCond) + case "hwthread": + log.Info(" Scope 'hwthread' requested, but not yet supported: Will return 'node' scope only. ") + continue + case "accelerator": + log.Info(" Scope 'accelerator' requested, but not yet supported: Will return 'node' scope only. ") + continue default: log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope) continue @@ -173,6 +183,11 @@ func (idb *InfluxDBv2DataRepository) LoadData( } case "socket": continue + case "accelerator": + continue + case "hwthread": + // See below @ core + continue case "core": continue // Include Series.Id in hostSeries @@ -301,18 +316,53 @@ func (idb *InfluxDBv2DataRepository) LoadStats( return stats, nil } +// Used in Job-View StatsTable +// UNTESTED func (idb *InfluxDBv2DataRepository) LoadScopedStats( job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error) { - // TODO : Implement to be used in JobView Stats Table - log.Infof("LoadScopedStats unimplemented for InfluxDBv2DataRepository, Args: Job-ID %d, metrics %v, scopes %v", job.JobID, metrics, scopes) + // Assumption: idb.loadData() only returns series node-scope - use node scope for statsTable + scopedJobStats := make(schema.ScopedJobStats) + data, err := idb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) + if err != nil { + log.Warn("Error while loading job for scopedJobStats") + return nil, err + } - return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") + for metric, metricData := range data { + for _, scope := range scopes { + if scope != schema.MetricScopeNode { + logOnce.Do(func() { + log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope) + }) + continue + } + + if _, ok := scopedJobStats[metric]; !ok { + scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats) + } + + if _, ok := scopedJobStats[metric][scope]; !ok { + scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0) + } + + for _, series := range metricData[scope].Series { + scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ + Hostname: series.Hostname, + Data: &series.Statistics, + }) + } + } + } + + return scopedJobStats, nil } +// Used in Systems-View @ Node-Overview +// UNTESTED func (idb *InfluxDBv2DataRepository) LoadNodeData( cluster string, metrics, nodes []string, @@ -320,12 +370,123 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData( from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - // TODO : Implement to be used in Analysis- und System/Node-View - log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes) + // Note: scopes[] Array will be ignored, only return node scope - return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") + // CONVERT ARGS TO INFLUX + measurementsConds := make([]string, 0) + for _, m := range metrics { + measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m)) + } + measurementsCond := strings.Join(measurementsConds, " or ") + + hostsConds := make([]string, 0) + if nodes == nil { + var allNodes []string + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + allNodes = append(nodes, nodeList.PrintList()...) + } + for _, node := range allNodes { + nodes = append(nodes, node) + hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node)) + } + } else { + for _, node := range nodes { + hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node)) + } + } + hostsCond := strings.Join(hostsConds, " or ") + + // BUILD AND PERFORM QUERY + query := fmt.Sprintf(` + from(bucket: "%s") + |> range(start: %s, stop: %s) + |> filter(fn: (r) => (%s) and (%s) ) + |> drop(columns: ["_start", "_stop"]) + |> group(columns: ["hostname", "_measurement"]) + |> aggregateWindow(every: 60s, fn: mean) + |> drop(columns: ["_time"])`, + idb.bucket, + idb.formatTime(from), idb.formatTime(to), + measurementsCond, hostsCond) + + rows, err := idb.queryClient.Query(ctx, query) + if err != nil { + log.Error("Error while performing query") + return nil, err + } + + // HANDLE QUERY RETURN + // Collect Float Arrays for Node@Metric -> No Scope Handling! + influxData := make(map[string]map[string][]schema.Float) + for rows.Next() { + row := rows.Record() + host, field := row.ValueByKey("hostname").(string), row.Measurement() + + influxHostData, ok := influxData[host] + if !ok { + influxHostData = make(map[string][]schema.Float) + influxData[host] = influxHostData + } + + influxFieldData, ok := influxData[host][field] + if !ok { + influxFieldData = make([]schema.Float, 0) + influxData[host][field] = influxFieldData + } + + val, ok := row.Value().(float64) + if ok { + influxData[host][field] = append(influxData[host][field], schema.Float(val)) + } else { + influxData[host][field] = append(influxData[host][field], schema.Float(0)) + } + } + + // BUILD FUNCTION RETURN + data := make(map[string]map[string][]*schema.JobMetric) + for node, metricData := range influxData { + + nodeData, ok := data[node] + if !ok { + nodeData = make(map[string][]*schema.JobMetric) + data[node] = nodeData + } + + for metric, floatArray := range metricData { + avg, min, max := 0.0, 0.0, 0.0 + for _, val := range floatArray { + avg += float64(val) + min = math.Min(min, float64(val)) + max = math.Max(max, float64(val)) + } + + stats := schema.MetricStatistics{ + Avg: (math.Round((avg/float64(len(floatArray)))*100) / 100), + Min: (math.Round(min*100) / 100), + Max: (math.Round(max*100) / 100), + } + + mc := archive.GetMetricConfig(cluster, metric) + nodeData[metric] = append(nodeData[metric], &schema.JobMetric{ + Unit: mc.Unit, + Timestep: mc.Timestep, + Series: []schema.Series{ + { + Hostname: node, + Statistics: stats, + Data: floatArray, + }, + }, + }) + } + } + + return data, nil } +// Used in Systems-View @ Node-List +// UNTESTED func (idb *InfluxDBv2DataRepository) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -336,10 +497,79 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData( ctx context.Context, ) (map[string]schema.JobData, int, bool, error) { + // Assumption: idb.loadData() only returns series node-scope - use node scope for NodeList + + // 0) Init additional vars var totalNodes int = 0 var hasNextPage bool = false - // TODO : Implement to be used in NodeList-View - log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes) - return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") + // 1) Get list of all nodes + var nodes []string + if subCluster != "" { + scNodes := archive.NodeLists[cluster][subCluster] + nodes = scNodes.PrintList() + } else { + subClusterNodeLists := archive.NodeLists[cluster] + for _, nodeList := range subClusterNodeLists { + nodes = append(nodes, nodeList.PrintList()...) + } + } + + // 2) Filter nodes + if nodeFilter != "" { + filteredNodes := []string{} + for _, node := range nodes { + if strings.Contains(node, nodeFilter) { + filteredNodes = append(filteredNodes, node) + } + } + nodes = filteredNodes + } + + // 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ... + totalNodes = len(nodes) + sort.Strings(nodes) + + // 3) Apply paging + if len(nodes) > page.ItemsPerPage { + start := (page.Page - 1) * page.ItemsPerPage + end := start + page.ItemsPerPage + if end > len(nodes) { + end = len(nodes) + hasNextPage = false + } else { + hasNextPage = true + } + nodes = nodes[start:end] + } + + // 4) Fetch And Convert Data, use idb.LoadNodeData() for query + + rawNodeData, err := idb.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) + if err != nil { + log.Error(fmt.Sprintf("Error while loading influx nodeData for nodeListData %#v\n", err)) + return nil, totalNodes, hasNextPage, err + } + + data := make(map[string]schema.JobData) + for node, nodeData := range rawNodeData { + // Init Nested Map Data Structures If Not Found + hostData, ok := data[node] + if !ok { + hostData = make(schema.JobData) + data[node] = hostData + } + + for metric, nodeMetricData := range nodeData { + metricData, ok := hostData[metric] + if !ok { + metricData = make(map[schema.MetricScope]*schema.JobMetric) + data[node][metric] = metricData + } + + data[node][metric][schema.MetricScopeNode] = nodeMetricData[0] // Only Node Scope Returned from loadNodeData + } + } + + return data, totalNodes, hasNextPage, nil } From 9ed64e0388eb948bd8ad3b1dc07f14feb771a367 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 17 Mar 2025 17:39:17 +0100 Subject: [PATCH 395/443] Review logging, comment cleanup --- internal/graph/schema.resolvers.go | 12 ++--- internal/metricdata/cc-metric-store.go | 48 ++++++++----------- pkg/archive/archive.go | 14 +++--- .../src/job/statstab/StatsTable.svelte | 4 +- 4 files changed, 35 insertions(+), 43 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index a470807..b5966c7 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -301,17 +301,17 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return res, err } -// JobMetricStats is the resolver for the jobStats field. +// JobStats is the resolver for the jobStats field. func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warnf("Error while querying job %s for metrics", id) + log.Warnf("Error while querying job %s for metadata", id) return nil, err } data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) if err != nil { - log.Warnf("Error while loading job stat data for job id %s", id) + log.Warnf("Error while loading jobStats data for job id %s", id) return nil, err } @@ -326,17 +326,17 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin return res, err } -// JobStats is the resolver for the scopedJobStats field. +// ScopedJobStats is the resolver for the scopedJobStats field. func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) { job, err := r.Query().Job(ctx, id) if err != nil { - log.Warnf("Error while querying job %s for metrics", id) + log.Warnf("Error while querying job %s for metadata", id) return nil, err } data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx) if err != nil { - log.Warnf("Error while loading scoped job stat data for job id %s", id) + log.Warnf("Error while loading scopedJobStats data for job id %s", id) return nil, err } diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 6635299..9516e2b 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest( ) (*ApiQueryResponse, error) { buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(body); err != nil { - log.Warn("Error while encoding request body") + log.Errorf("Error while encoding request body: %s", err.Error()) return nil, err } req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) if err != nil { - log.Warn("Error while building request body") + log.Errorf("Error while building request body: %s", err.Error()) return nil, err } if ccms.jwt != "" { @@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest( res, err := ccms.client.Do(req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest( var resBody ApiQueryResponse if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { - log.Warn("Error while decoding result body") + log.Errorf("Error while decoding result body: %s", err.Error()) return nil, err } @@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData( ) (schema.JobData, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) if err != nil { - log.Warn("Error while building queries") + log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } @@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -557,16 +557,9 @@ func (ccms *CCMetricStore) LoadStats( ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - // metricConfigs := archive.GetCluster(job.Cluster).MetricConfig - // resolution := 9000 - - // for _, mc := range metricConfigs { - // resolution = min(resolution, mc.Timestep) - // } - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { - log.Warn("Error while building query") + log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error()) return nil, err } @@ -581,7 +574,7 @@ func (ccms *CCMetricStore) LoadStats( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -591,9 +584,8 @@ func (ccms *CCMetricStore) LoadStats( metric := ccms.toLocalName(query.Metric) data := res[0] if data.Error != nil { - log.Infof("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) + log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) continue - // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) } metricdata, ok := stats[metric] @@ -603,9 +595,8 @@ func (ccms *CCMetricStore) LoadStats( } if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { - log.Infof("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) + log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) continue - // return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN") } metricdata[query.Hostname] = schema.MetricStatistics{ @@ -618,7 +609,7 @@ func (ccms *CCMetricStore) LoadStats( return stats, nil } -// Scoped Stats: Basically Load Data without resolution and data query flag? +// Used for Job-View Statistics Table func (ccms *CCMetricStore) LoadScopedStats( job *schema.Job, metrics []string, @@ -627,7 +618,7 @@ func (ccms *CCMetricStore) LoadScopedStats( ) (schema.ScopedJobStats, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0) if err != nil { - log.Warn("Error while building queries") + log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error()) return nil, err } @@ -642,7 +633,7 @@ func (ccms *CCMetricStore) LoadScopedStats( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -709,7 +700,7 @@ func (ccms *CCMetricStore) LoadScopedStats( return scopedJobStats, nil } -// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! - Todo Outdated with NodeListData? +// Used for Systems-View Node-Overview func (ccms *CCMetricStore) LoadNodeData( cluster string, metrics, nodes []string, @@ -743,7 +734,7 @@ func (ccms *CCMetricStore) LoadNodeData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + log.Errorf("Error while performing request: %s", err.Error()) return nil, err } @@ -801,6 +792,7 @@ func (ccms *CCMetricStore) LoadNodeData( return data, nil } +// Used for Systems-View Node-List func (ccms *CCMetricStore) LoadNodeListData( cluster, subCluster, nodeFilter string, metrics []string, @@ -859,7 +851,7 @@ func (ccms *CCMetricStore) LoadNodeListData( queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) if err != nil { - log.Warn("Error while building queries") + log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error()) return nil, totalNodes, hasNextPage, err } @@ -874,7 +866,7 @@ func (ccms *CCMetricStore) LoadNodeListData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) + log.Errorf("Error while performing request: %s", err.Error()) return nil, totalNodes, hasNextPage, err } @@ -979,7 +971,7 @@ func (ccms *CCMetricStore) buildNodeQueries( if subCluster != "" { subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) if scterr != nil { - // TODO: Log + log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error()) return nil, nil, scterr } } @@ -989,7 +981,7 @@ func (ccms *CCMetricStore) buildNodeQueries( mc := archive.GetMetricConfig(cluster, metric) if mc == nil { // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) - log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster) + log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster) continue } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 002fd5e..cd457eb 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -89,7 +89,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error { var version uint64 version, err = ar.Init(rawConfig) if err != nil { - log.Error("Error while initializing archiveBackend") + log.Errorf("Error while initializing archiveBackend: %s", err.Error()) return } log.Infof("Load archive version %d", version) @@ -112,7 +112,7 @@ func LoadAveragesFromArchive( ) error { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -135,7 +135,7 @@ func LoadStatsFromArchive( data := make(map[string]schema.MetricStatistics, len(metrics)) metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return data, err } @@ -165,7 +165,7 @@ func LoadScopedStatsFromArchive( data, err := ar.LoadJobStats(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error()) return nil, err } @@ -175,7 +175,7 @@ func LoadScopedStatsFromArchive( func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { metaFile, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return nil, err } @@ -191,7 +191,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } @@ -211,7 +211,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error { jobMeta, err := ar.LoadJobMeta(job) if err != nil { - log.Warn("Error while loading job metadata from archiveBackend") + log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error()) return err } diff --git a/web/frontend/src/job/statstab/StatsTable.svelte b/web/frontend/src/job/statstab/StatsTable.svelte index 4adb4bd..2ed2f28 100644 --- a/web/frontend/src/job/statstab/StatsTable.svelte +++ b/web/frontend/src/job/statstab/StatsTable.svelte @@ -2,8 +2,8 @@ @component Job-View subcomponent; display table of metric data statistics with selectable scopes Properties: - - `job Object`: The job object - - `clusters Object`: The clusters object + - `data Object`: The data object + - `selectedMetrics [String]`: The selected metrics - `hosts [String]`: The list of hostnames of this job --> From c53f5eb144ec559cde19440748ca6886ad54f2a3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 18 Mar 2025 18:01:37 +0100 Subject: [PATCH 396/443] fix: always return hasNextPage boolean to frontend - removes dependency on uiDefaults setting --- internal/graph/schema.resolvers.go | 44 ++++++++++++++---------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index ce1384b..7d3de90 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -354,30 +354,28 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag return nil, err } - if !config.Keys.UiDefaults["job_list_usePaging"].(bool) { - hasNextPage := false - // page.Page += 1 : Simple, but expensive - // Example Page 4 @ 10 IpP : Does item 41 exist? - // Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists. - nextPage := &model.PageRequest{ - ItemsPerPage: 1, - Page: ((page.Page * page.ItemsPerPage) + 1), - } - - nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order) - if err != nil { - log.Warn("Error while querying next jobs") - return nil, err - } - - if len(nextJobs) == 1 { - hasNextPage = true - } - - return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil - } else { - return &model.JobResultList{Items: jobs, Count: &count}, nil + // Note: Even if App-Default 'config.Keys.UiDefaults["job_list_usePaging"]' is set, always return hasNextPage boolean. + // Users can decide in frontend to use continuous scroll, even if app-default is paging! + /* + Example Page 4 @ 10 IpP : Does item 41 exist? + Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists. + */ + nextPage := &model.PageRequest{ + ItemsPerPage: 1, + Page: ((page.Page * page.ItemsPerPage) + 1), } + nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order) + if err != nil { + log.Warn("Error while querying next jobs") + return nil, err + } + + hasNextPage := false + if len(nextJobs) == 1 { + hasNextPage = true + } + + return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil } // JobsStatistics is the resolver for the jobsStatistics field. From e9a214c5b2272356ce89cbdac79056016134b735 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 19 Mar 2025 14:57:27 +0100 Subject: [PATCH 397/443] fix: add nullSafe condition to monitoringStatus display on metric queryError --- web/frontend/src/Job.root.svelte | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 6980230..22a69d8 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -231,7 +231,7 @@ {#if $initq.error} {$initq.error.message} - {:else if $initq.data} + {:else if $initq?.data} {#if $initq.data?.job?.metaData?.message} @@ -305,7 +305,7 @@ - {#if $initq.data} + {#if $initq?.data} From a6784b5549722466299541a8377c25800a83f318 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 8 Apr 2025 16:00:07 +0200 Subject: [PATCH 403/443] fix: reintroduce statstable id natural sort order - see Use natural sort order for IDs in statistics tables #369 --- web/frontend/src/job/statstab/StatsTableEntry.svelte | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/job/statstab/StatsTableEntry.svelte b/web/frontend/src/job/statstab/StatsTableEntry.svelte index b39eacb..c3161a5 100644 --- a/web/frontend/src/job/statstab/StatsTableEntry.svelte +++ b/web/frontend/src/job/statstab/StatsTableEntry.svelte @@ -41,7 +41,9 @@ if (a == null || b == null) return -1; if (field === "id") { - return s.dir != "up" ? a[field].localeCompare(b[field]) : b[field].localeCompare(a[field]) + return s.dir != "up" ? + a[field].localeCompare(b[field], undefined, {numeric: true, sensitivity: 'base'}) : + b[field].localeCompare(a[field], undefined, {numeric: true, sensitivity: 'base'}) } else { return s.dir != "up" ? a.data[field] - b.data[field] From a8d785beb348634ada38282689e05039cc081228 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 9 Apr 2025 07:27:59 +0200 Subject: [PATCH 404/443] Remove redundant check in auth package --- internal/api/rest.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index fd2f86d..8713976 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -105,6 +105,7 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { r.StrictSlash(true) if api.Authentication != nil { + log.Debug("Mounting /configuration/ route") r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet) r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet) @@ -229,7 +230,7 @@ func securedCheck(r *http.Request) error { if user.AuthType == schema.AuthToken { // If nothing declared in config: deny all request to this endpoint - if config.Keys.ApiAllowedIPs == nil || len(config.Keys.ApiAllowedIPs) == 0 { + if len(config.Keys.ApiAllowedIPs) == 0 { return fmt.Errorf("missing configuration key ApiAllowedIPs") } From 28cdc1d9e5c6a455a0ca15e624d844b665af3270 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 9 Apr 2025 09:13:21 +0200 Subject: [PATCH 405/443] fix: Update endpoints in Swagger UI --- api/swagger.json | 27 +++++++++++++-------------- api/swagger.yaml | 27 +++++++++++++-------------- internal/api/docs.go | 28 ++++++++++++++-------------- internal/api/rest.go | 34 ++++++++++++++++------------------ 4 files changed, 56 insertions(+), 60 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 51b22c8..683b520 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -15,9 +15,8 @@ "version": "1.0.0" }, "host": "localhost:8080", - "basePath": "/api", "paths": { - "/clusters/": { + "/api/clusters/": { "get": { "security": [ { @@ -74,7 +73,7 @@ } } }, - "/jobs/": { + "/api/jobs/": { "get": { "security": [ { @@ -169,7 +168,7 @@ } } }, - "/jobs/delete_job/": { + "/api/jobs/delete_job/": { "delete": { "security": [ { @@ -244,7 +243,7 @@ } } }, - "/jobs/delete_job/{id}": { + "/api/jobs/delete_job/{id}": { "delete": { "security": [ { @@ -314,7 +313,7 @@ } } }, - "/jobs/delete_job_before/{ts}": { + "/api/jobs/delete_job_before/{ts}": { "delete": { "security": [ { @@ -384,7 +383,7 @@ } } }, - "/jobs/edit_meta/{id}": { + "/api/jobs/edit_meta/{id}": { "post": { "security": [ { @@ -454,7 +453,7 @@ } } }, - "/jobs/start_job/": { + "/api/jobs/start_job/": { "post": { "security": [ { @@ -523,7 +522,7 @@ } } }, - "/jobs/stop_job/": { + "/api/jobs/stop_job/": { "post": { "security": [ { @@ -595,7 +594,7 @@ } } }, - "/jobs/tag_job/{id}": { + "/api/jobs/tag_job/{id}": { "post": { "security": [ { @@ -668,7 +667,7 @@ } } }, - "/jobs/{id}": { + "/api/jobs/{id}": { "get": { "security": [ { @@ -827,7 +826,7 @@ } } }, - "/notice/": { + "/config/notice/": { "post": { "security": [ { @@ -893,7 +892,7 @@ } } }, - "/user/{id}": { + "/config/user/{id}": { "post": { "security": [ { @@ -998,7 +997,7 @@ } } }, - "/users/": { + "/config/users/": { "get": { "security": [ { diff --git a/api/swagger.yaml b/api/swagger.yaml index f5f0081..35ec6c4 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -1,4 +1,3 @@ -basePath: /api definitions: api.ApiReturnedUser: properties: @@ -671,7 +670,7 @@ info: title: ClusterCockpit REST API version: 1.0.0 paths: - /clusters/: + /api/clusters/: get: description: Get a list of all cluster configs. Specific cluster can be requested using query parameter. @@ -708,7 +707,7 @@ paths: summary: Lists all cluster configs tags: - Cluster query - /jobs/: + /api/jobs/: get: description: |- Get a list of all jobs. Filters can be applied using query parameters. @@ -773,7 +772,7 @@ paths: summary: Lists all jobs tags: - Job query - /jobs/{id}: + /api/jobs/{id}: get: description: |- Job to get is specified by database ID @@ -882,7 +881,7 @@ paths: summary: Get job meta and configurable metric data tags: - Job query - /jobs/delete_job/: + /api/jobs/delete_job/: delete: consumes: - application/json @@ -932,7 +931,7 @@ paths: summary: Remove a job from the sql database tags: - Job remove - /jobs/delete_job/{id}: + /api/jobs/delete_job/{id}: delete: description: Job to remove is specified by database ID. This will not remove the job from the job archive. @@ -979,7 +978,7 @@ paths: summary: Remove a job from the sql database tags: - Job remove - /jobs/delete_job_before/{ts}: + /api/jobs/delete_job_before/{ts}: delete: description: Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive. @@ -1026,7 +1025,7 @@ paths: summary: Remove a job from the sql database tags: - Job remove - /jobs/edit_meta/{id}: + /api/jobs/edit_meta/{id}: post: consumes: - application/json @@ -1073,7 +1072,7 @@ paths: summary: Edit meta-data json tags: - Job add and modify - /jobs/start_job/: + /api/jobs/start_job/: post: consumes: - application/json @@ -1120,7 +1119,7 @@ paths: summary: Adds a new job as "running" tags: - Job add and modify - /jobs/stop_job/: + /api/jobs/stop_job/: post: description: |- Job to stop is specified by request body. All fields are required in this case. @@ -1168,7 +1167,7 @@ paths: summary: Marks job as completed and triggers archiving tags: - Job add and modify - /jobs/tag_job/{id}: + /api/jobs/tag_job/{id}: post: consumes: - application/json @@ -1218,7 +1217,7 @@ paths: summary: Adds one or more tags to a job tags: - Job add and modify - /notice/: + /config/notice/: post: consumes: - multipart/form-data @@ -1263,7 +1262,7 @@ paths: summary: Updates or empties the notice box content tags: - User - /user/{id}: + /config/user/{id}: post: consumes: - multipart/form-data @@ -1337,7 +1336,7 @@ paths: summary: Updates an existing user tags: - User - /users/: + /config/users/: delete: consumes: - multipart/form-data diff --git a/internal/api/docs.go b/internal/api/docs.go index 642003f..2408f85 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -23,7 +23,7 @@ const docTemplate = `{ "host": "{{.Host}}", "basePath": "{{.BasePath}}", "paths": { - "/clusters/": { + "/api/clusters/": { "get": { "security": [ { @@ -80,7 +80,7 @@ const docTemplate = `{ } } }, - "/jobs/": { + "/api/jobs/": { "get": { "security": [ { @@ -175,7 +175,7 @@ const docTemplate = `{ } } }, - "/jobs/delete_job/": { + "/api/jobs/delete_job/": { "delete": { "security": [ { @@ -250,7 +250,7 @@ const docTemplate = `{ } } }, - "/jobs/delete_job/{id}": { + "/api/jobs/delete_job/{id}": { "delete": { "security": [ { @@ -320,7 +320,7 @@ const docTemplate = `{ } } }, - "/jobs/delete_job_before/{ts}": { + "/api/jobs/delete_job_before/{ts}": { "delete": { "security": [ { @@ -390,7 +390,7 @@ const docTemplate = `{ } } }, - "/jobs/edit_meta/{id}": { + "/api/jobs/edit_meta/{id}": { "post": { "security": [ { @@ -460,7 +460,7 @@ const docTemplate = `{ } } }, - "/jobs/start_job/": { + "/api/jobs/start_job/": { "post": { "security": [ { @@ -529,7 +529,7 @@ const docTemplate = `{ } } }, - "/jobs/stop_job/": { + "/api/jobs/stop_job/": { "post": { "security": [ { @@ -601,7 +601,7 @@ const docTemplate = `{ } } }, - "/jobs/tag_job/{id}": { + "/api/jobs/tag_job/{id}": { "post": { "security": [ { @@ -674,7 +674,7 @@ const docTemplate = `{ } } }, - "/jobs/{id}": { + "/api/jobs/{id}": { "get": { "security": [ { @@ -833,7 +833,7 @@ const docTemplate = `{ } } }, - "/notice/": { + "/config/notice/": { "post": { "security": [ { @@ -899,7 +899,7 @@ const docTemplate = `{ } } }, - "/user/{id}": { + "/config/user/{id}": { "post": { "security": [ { @@ -1004,7 +1004,7 @@ const docTemplate = `{ } } }, - "/users/": { + "/config/users/": { "get": { "security": [ { @@ -2191,7 +2191,7 @@ const docTemplate = `{ var SwaggerInfo = &swag.Spec{ Version: "1.0.0", Host: "localhost:8080", - BasePath: "/api", + BasePath: "", Schemes: []string{}, Title: "ClusterCockpit REST API", Description: "API for batch job control.", diff --git a/internal/api/rest.go b/internal/api/rest.go index db9a860..85b0d13 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -46,7 +46,6 @@ import ( // @license.url https://opensource.org/licenses/MIT // @host localhost:8080 -// @basePath /api // @securityDefinitions.apikey ApiKeyAuth // @in header @@ -105,7 +104,6 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { r.StrictSlash(true) if api.Authentication != nil { - log.Debug("Mounting /configuration/ route") r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet) r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet) @@ -272,7 +270,7 @@ func securedCheck(r *http.Request) error { // @failure 403 {object} api.ErrorResponse "Forbidden" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /clusters/ [get] +// @router /api/clusters/ [get] func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) { if user := repository.GetUserFromContext(r.Context()); user != nil && !user.HasRole(schema.RoleApi) { @@ -327,7 +325,7 @@ func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) { // @failure 403 {object} api.ErrorResponse "Forbidden" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/ [get] +// @router /api/jobs/ [get] func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { withMetadata := false filter := &model.JobFilter{} @@ -461,7 +459,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/{id} [get] +// @router /api/jobs/{id} [get] func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) { // Fetch job from db id, ok := mux.Vars(r)["id"] @@ -554,7 +552,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/{id} [post] +// @router /api/jobs/{id} [post] func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { // Fetch job from db id, ok := mux.Vars(r)["id"] @@ -658,7 +656,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { // @failure 404 {object} api.ErrorResponse "Job does not exist" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/edit_meta/{id} [post] +// @router /api/jobs/edit_meta/{id} [post] func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) { id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) if err != nil { @@ -704,7 +702,7 @@ func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) { // @failure 404 {object} api.ErrorResponse "Job or tag does not exist" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/tag_job/{id} [post] +// @router /api/jobs/tag_job/{id} [post] func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) if err != nil { @@ -765,7 +763,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/start_job/ [post] +// @router /api/jobs/start_job/ [post] func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { req := schema.JobMeta{BaseJob: schema.JobDefaults} if err := decode(r.Body, &req); err != nil { @@ -838,7 +836,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: job has already been stopped" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/stop_job/ [post] +// @router /api/jobs/stop_job/ [post] func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { // Parse request body req := StopJobApiRequest{} @@ -879,7 +877,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/delete_job/{id} [delete] +// @router /api/jobs/delete_job/{id} [delete] func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { // Fetch job (that will be stopped) from db id, ok := mux.Vars(r)["id"] @@ -922,7 +920,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/delete_job/ [delete] +// @router /api/jobs/delete_job/ [delete] func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) { // Parse request body req := DeleteJobApiRequest{} @@ -972,7 +970,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/delete_job_before/{ts} [delete] +// @router /api/jobs/delete_job_before/{ts} [delete] func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { var cnt int // Fetch job (that will be stopped) from db @@ -1110,7 +1108,7 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { // @failure 422 {string} string "Unprocessable Entity: creating user failed" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /users/ [post] +// @router /config/users/ [post] func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { @@ -1174,7 +1172,7 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { // @failure 422 {string} string "Unprocessable Entity: deleting user failed" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /users/ [delete] +// @router /config/users/ [delete] func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { @@ -1210,7 +1208,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { // @failure 403 {string} string "Forbidden" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /users/ [get] +// @router /config/users/ [get] func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { @@ -1252,7 +1250,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { // @failure 422 {string} string "Unprocessable Entity: The user could not be updated" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /user/{id} [post] +// @router /config/user/{id} [post] func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { @@ -1317,7 +1315,7 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { // @failure 422 {string} string "Unprocessable Entity: The user could not be updated" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /notice/ [post] +// @router /config/notice/ [post] func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { err := securedCheck(r) if err != nil { From 317f80a9846ddda13a5cca68d29bba5ca7619d8f Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Wed, 9 Apr 2025 09:40:52 +0200 Subject: [PATCH 406/443] fix: Replace deprecated gqlgen NewDefaultServer call --- cmd/cc-backend/server.go | 21 +++++++++++++++++---- internal/api/rest.go | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 1408162..3c19730 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -18,6 +18,7 @@ import ( "time" "github.com/99designs/gqlgen/graphql/handler" + "github.com/99designs/gqlgen/graphql/handler/transport" "github.com/99designs/gqlgen/graphql/playground" "github.com/ClusterCockpit/cc-backend/internal/api" "github.com/ClusterCockpit/cc-backend/internal/archiver" @@ -31,6 +32,7 @@ import ( "github.com/ClusterCockpit/cc-backend/web" "github.com/gorilla/handlers" "github.com/gorilla/mux" + "github.com/gorilla/websocket" httpSwagger "github.com/swaggo/http-swagger" ) @@ -53,13 +55,24 @@ func serverInit() { // Setup the http.Handler/Router used by the server graph.Init() resolver := graph.GetResolverInstance() - graphQLEndpoint := handler.NewDefaultServer( + graphQLServer := handler.New( generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) + graphQLServer.AddTransport(transport.SSE{}) + graphQLServer.AddTransport(transport.POST{}) + graphQLServer.AddTransport(transport.Websocket{ + KeepAlivePingInterval: 10 * time.Second, + Upgrader: websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + }) + if os.Getenv("DEBUG") != "1" { // Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed. // The problem with this is that then, no more stacktrace is printed to stderr. - graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error { + graphQLServer.SetRecoverFunc(func(ctx context.Context, err any) error { switch e := err.(type) { case string: return fmt.Errorf("MAIN > Panic: %s", e) @@ -78,7 +91,7 @@ func serverInit() { router = mux.NewRouter() buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date} - info := map[string]interface{}{} + info := map[string]any{} info["hasOpenIDConnect"] = false if config.Keys.OpenIDConfig != nil { @@ -208,7 +221,7 @@ func serverInit() { router.PathPrefix("/swagger/").Handler(httpSwagger.Handler( httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet) } - secured.Handle("/query", graphQLEndpoint) + secured.Handle("/query", graphQLServer) // Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project. secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) { diff --git a/internal/api/rest.go b/internal/api/rest.go index 85b0d13..1ebe78e 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -214,7 +214,7 @@ func handleError(err error, statusCode int, rw http.ResponseWriter) { }) } -func decode(r io.Reader, val interface{}) error { +func decode(r io.Reader, val any) error { dec := json.NewDecoder(r) dec.DisallowUnknownFields() return dec.Decode(val) From fb6a4c3b874114883a49aacdd408d0c17dfc0a20 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 9 Apr 2025 16:00:27 +0200 Subject: [PATCH 407/443] review and move api endpoints secured check --- internal/api/rest.go | 95 +++++-------------------------------------- internal/auth/auth.go | 80 ++++++++++++++++++++++++++++++++++-- pkg/schema/config.go | 5 ++- 3 files changed, 91 insertions(+), 89 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 712a0b3..352dd94 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -69,11 +69,10 @@ func New() *RestApi { func (api *RestApi) MountApiRoutes(r *mux.Router) { r.StrictSlash(true) - + // REST API Uses TokenAuth r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut) // r.HandleFunc("/jobs/import/", api.importJob).Methods(http.MethodPost, http.MethodPut) - r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) @@ -83,7 +82,6 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) - r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) if api.MachineStateDir != "" { @@ -94,7 +92,7 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { func (api *RestApi) MountUserApiRoutes(r *mux.Router) { r.StrictSlash(true) - + // REST API Uses TokenAuth r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) @@ -103,7 +101,7 @@ func (api *RestApi) MountUserApiRoutes(r *mux.Router) { func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { r.StrictSlash(true) - + // Settings Frontend Uses SessionAuth if api.Authentication != nil { r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet) r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut) @@ -116,7 +114,7 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { r.StrictSlash(true) - + // Settings Frontrend Uses SessionAuth if api.Authentication != nil { r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet) r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost) @@ -221,44 +219,6 @@ func decode(r io.Reader, val interface{}) error { return dec.Decode(val) } -func securedCheck(r *http.Request) error { - user := repository.GetUserFromContext(r.Context()) - if user == nil { - return fmt.Errorf("no user in context") - } - - if user.AuthType == schema.AuthToken { - // If nothing declared in config: deny all request to this endpoint - if config.Keys.ApiAllowedIPs == nil || len(config.Keys.ApiAllowedIPs) == 0 { - return fmt.Errorf("missing configuration key ApiAllowedIPs") - } - - if config.Keys.ApiAllowedIPs[0] == "*" { - return nil - } - - // extract IP address - IPAddress := r.Header.Get("X-Real-Ip") - if IPAddress == "" { - IPAddress = r.Header.Get("X-Forwarded-For") - } - if IPAddress == "" { - IPAddress = r.RemoteAddr - } - - if strings.Contains(IPAddress, ":") { - IPAddress = strings.Split(IPAddress, ":")[0] - } - - // check if IP is allowed - if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) { - return fmt.Errorf("unknown ip: %v", IPAddress) - } - } - - return nil -} - // getClusters godoc // @summary Lists all cluster configs // @tags Cluster query @@ -1093,7 +1053,6 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { // @summary Adds a new user // @tags User // @description User specified in form data will be saved to database. -// @description Only accessible from IPs registered with apiAllowedIPs configuration option. // @accept mpfd // @produce plain // @param username formData string true "Unique user ID" @@ -1111,11 +1070,7 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { // @security ApiKeyAuth // @router /users/ [post] func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed rw.Header().Set("Content-Type", "text/plain") me := repository.GetUserFromContext(r.Context()) @@ -1162,7 +1117,6 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { // @summary Deletes a user // @tags User // @description User defined by username in form data will be deleted from database. -// @description Only accessible from IPs registered with apiAllowedIPs configuration option. // @accept mpfd // @produce plain // @param username formData string true "User ID to delete" @@ -1175,11 +1129,7 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { // @security ApiKeyAuth // @router /users/ [delete] func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden) @@ -1200,7 +1150,6 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { // @tags User // @description Returns a JSON-encoded list of users. // @description Required query-parameter defines if all users or only users with additional special roles are returned. -// @description Only accessible from IPs registered with apiAllowedIPs configuration option. // @produce json // @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles" // @success 200 {array} api.ApiReturnedUser "List of users returned successfully" @@ -1211,11 +1160,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { // @security ApiKeyAuth // @router /users/ [get] func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden) @@ -1236,7 +1181,6 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { // @tags User // @description Modifies user defined by username (id) in one of four possible ways. // @description If more than one formValue is set then only the highest priority field is used. -// @description Only accessible from IPs registered with apiAllowedIPs configuration option. // @accept mpfd // @produce plain // @param id path string true "Database ID of User" @@ -1253,11 +1197,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { // @security ApiKeyAuth // @router /user/{id} [post] func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden) @@ -1305,7 +1245,6 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { // @tags User // @description Modifies the content of notice.txt, shown as notice box on the homepage. // @description If more than one formValue is set then only the highest priority field is used. -// @description Only accessible from IPs registered with apiAllowedIPs configuration option. // @accept mpfd // @produce plain // @param new-content formData string false "Priority 1: New content to display" @@ -1318,11 +1257,7 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { // @security ApiKeyAuth // @router /notice/ [post] func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { http.Error(rw, "Only admins are allowed to update the notice.txt file", http.StatusForbidden) @@ -1364,12 +1299,6 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { } func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } - rw.Header().Set("Content-Type", "text/plain") username := r.FormValue("username") me := repository.GetUserFromContext(r.Context()) @@ -1398,11 +1327,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) { } func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) { - err := securedCheck(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusForbidden) - return - } + // SecuredCheck() only worked with TokenAuth: Removed user := repository.GetUserFromContext(r.Context()) if !user.HasRole(schema.RoleAdmin) { diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 262204c..d5e48ac 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -10,9 +10,11 @@ import ( "database/sql" "encoding/base64" "errors" + "fmt" "net" "net/http" "os" + "strings" "sync" "time" @@ -20,6 +22,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/repository" + "github.com/ClusterCockpit/cc-backend/internal/util" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/gorilla/sessions" @@ -233,9 +236,9 @@ func (auth *Authentication) Login( limiter := getIPUserLimiter(ip, username) if !limiter.Allow() { - log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username) - onfailure(rw, r, errors.New("Too many login attempts, try again in a few minutes.")) - return + log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username) + onfailure(rw, r, errors.New("Too many login attempts, try again in a few minutes.")) + return } var dbUser *schema.User @@ -325,6 +328,14 @@ func (auth *Authentication) AuthApi( onfailure(rw, r, err) return } + + ipErr := securedCheck(user, "api", r) + if ipErr != nil { + log.Infof("auth api -> secured check failed: %s", err.Error()) + onfailure(rw, r, ipErr) + return + } + if user != nil { switch { case len(user.Roles) == 1: @@ -360,6 +371,14 @@ func (auth *Authentication) AuthUserApi( onfailure(rw, r, err) return } + + ipErr := securedCheck(user, "userapi", r) + if ipErr != nil { + log.Infof("auth user api -> secured check failed: %s", err.Error()) + onfailure(rw, r, ipErr) + return + } + if user != nil { switch { case len(user.Roles) == 1: @@ -445,3 +464,58 @@ func (auth *Authentication) Logout(onsuccess http.Handler) http.Handler { onsuccess.ServeHTTP(rw, r) }) } + +// Helper Moved To MiddleWare Auth Handlers +func securedCheck(user *schema.User, checkEndpoint string, r *http.Request) error { + if user == nil { + return fmt.Errorf("no user for secured check") + } + + // extract IP address for checking + IPAddress := r.Header.Get("X-Real-Ip") + if IPAddress == "" { + IPAddress = r.Header.Get("X-Forwarded-For") + } + if IPAddress == "" { + IPAddress = r.RemoteAddr + } + + if strings.Contains(IPAddress, ":") { + IPAddress = strings.Split(IPAddress, ":")[0] + } + + // Used for checking TokenAuth'd Requests Only: Remove '== schema.AuthToken'-Condition + if checkEndpoint == "api" { + // If nothing declared in config: deny all request to this api endpoint + if config.Keys.ApiAllowedIPs == nil || len(config.Keys.ApiAllowedIPs) == 0 { + return fmt.Errorf("missing configuration key ApiAllowedIPs") + } + // If wildcard declared in config: Continue + if config.Keys.ApiAllowedIPs[0] == "*" { + return nil + } + // check if IP is allowed + if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) { + return fmt.Errorf("unknown ip: %v", IPAddress) + } + + } else if checkEndpoint == "userapi" { + // If nothing declared in config: deny all request to this api endpoint + if config.Keys.UserApiAllowedIPs == nil || len(config.Keys.UserApiAllowedIPs) == 0 { + return fmt.Errorf("missing configuration key UserApiAllowedIPs") + } + // If wildcard declared in config: Continue + if config.Keys.UserApiAllowedIPs[0] == "*" { + return nil + } + // check if IP is allowed + if !util.Contains(config.Keys.UserApiAllowedIPs, IPAddress) { + return fmt.Errorf("unknown user ip: %v", IPAddress) + } + + } else { + return fmt.Errorf("unknown checkEndpoint for secured check") + } + + return nil +} diff --git a/pkg/schema/config.go b/pkg/schema/config.go index f9116cf..16b4219 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -100,9 +100,12 @@ type ProgramConfig struct { // Address where the http (or https) server will listen on (for example: 'localhost:80'). Addr string `json:"addr"` - // Addresses from which secured API endpoints can be reached + // Addresses from which secured admin API endpoints can be reached, can be wildcard "*" ApiAllowedIPs []string `json:"apiAllowedIPs"` + // Addresses from which secured admin API endpoints can be reached, can be wildcard "*" + UserApiAllowedIPs []string `json:"userApiAllowedIPs"` + // Drop root permissions once .env was read and the port was taken. User string `json:"user"` Group string `json:"group"` From 25d3325049ad9060399da27d2e3e9807aaa0c630 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 14 Apr 2025 11:36:03 +0200 Subject: [PATCH 408/443] add getUsers to admin REST api --- internal/api/rest.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 352dd94..0fa4611 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -70,6 +70,11 @@ func New() *RestApi { func (api *RestApi) MountApiRoutes(r *mux.Router) { r.StrictSlash(true) // REST API Uses TokenAuth + // User List + r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet) + // Cluster List + r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) + // Job Handler r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut) // r.HandleFunc("/jobs/import/", api.importJob).Methods(http.MethodPost, http.MethodPut) @@ -82,7 +87,6 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) - r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) if api.MachineStateDir != "" { r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) From 1755a4a7dfd851f211669fe155a75afe1373aa6f Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 14 Apr 2025 11:58:42 +0200 Subject: [PATCH 409/443] remove separate userapiallowedips config and check --- internal/auth/auth.go | 53 +++++++++++-------------------------------- pkg/schema/config.go | 3 --- 2 files changed, 13 insertions(+), 43 deletions(-) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index d5e48ac..9201315 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -329,7 +329,7 @@ func (auth *Authentication) AuthApi( return } - ipErr := securedCheck(user, "api", r) + ipErr := securedCheck(user, r) if ipErr != nil { log.Infof("auth api -> secured check failed: %s", err.Error()) onfailure(rw, r, ipErr) @@ -372,13 +372,6 @@ func (auth *Authentication) AuthUserApi( return } - ipErr := securedCheck(user, "userapi", r) - if ipErr != nil { - log.Infof("auth user api -> secured check failed: %s", err.Error()) - onfailure(rw, r, ipErr) - return - } - if user != nil { switch { case len(user.Roles) == 1: @@ -466,7 +459,7 @@ func (auth *Authentication) Logout(onsuccess http.Handler) http.Handler { } // Helper Moved To MiddleWare Auth Handlers -func securedCheck(user *schema.User, checkEndpoint string, r *http.Request) error { +func securedCheck(user *schema.User, r *http.Request) error { if user == nil { return fmt.Errorf("no user for secured check") } @@ -484,37 +477,17 @@ func securedCheck(user *schema.User, checkEndpoint string, r *http.Request) erro IPAddress = strings.Split(IPAddress, ":")[0] } - // Used for checking TokenAuth'd Requests Only: Remove '== schema.AuthToken'-Condition - if checkEndpoint == "api" { - // If nothing declared in config: deny all request to this api endpoint - if config.Keys.ApiAllowedIPs == nil || len(config.Keys.ApiAllowedIPs) == 0 { - return fmt.Errorf("missing configuration key ApiAllowedIPs") - } - // If wildcard declared in config: Continue - if config.Keys.ApiAllowedIPs[0] == "*" { - return nil - } - // check if IP is allowed - if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) { - return fmt.Errorf("unknown ip: %v", IPAddress) - } - - } else if checkEndpoint == "userapi" { - // If nothing declared in config: deny all request to this api endpoint - if config.Keys.UserApiAllowedIPs == nil || len(config.Keys.UserApiAllowedIPs) == 0 { - return fmt.Errorf("missing configuration key UserApiAllowedIPs") - } - // If wildcard declared in config: Continue - if config.Keys.UserApiAllowedIPs[0] == "*" { - return nil - } - // check if IP is allowed - if !util.Contains(config.Keys.UserApiAllowedIPs, IPAddress) { - return fmt.Errorf("unknown user ip: %v", IPAddress) - } - - } else { - return fmt.Errorf("unknown checkEndpoint for secured check") + // If nothing declared in config: deny all request to this api endpoint + if len(config.Keys.ApiAllowedIPs) == 0 { + return fmt.Errorf("missing configuration key ApiAllowedIPs") + } + // If wildcard declared in config: Continue + if config.Keys.ApiAllowedIPs[0] == "*" { + return nil + } + // check if IP is allowed + if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) { + return fmt.Errorf("unknown ip: %v", IPAddress) } return nil diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 16b4219..27d11be 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -103,9 +103,6 @@ type ProgramConfig struct { // Addresses from which secured admin API endpoints can be reached, can be wildcard "*" ApiAllowedIPs []string `json:"apiAllowedIPs"` - // Addresses from which secured admin API endpoints can be reached, can be wildcard "*" - UserApiAllowedIPs []string `json:"userApiAllowedIPs"` - // Drop root permissions once .env was read and the port was taken. User string `json:"user"` Group string `json:"group"` From 29ae2423f845a71f21b1a6f2453e8ad3de7fb3c0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 16 Apr 2025 18:36:12 +0200 Subject: [PATCH 410/443] fix metricconfig pointer copy, add disabled metric card in jobView - skips disabled metrics in backend, see cc-backend tries to retrieve "removed" metrics #377 --- internal/metricdata/cc-metric-store.go | 28 ++++++++++++++++++++ pkg/archive/clusterConfig.go | 19 ++++++++++++-- web/frontend/src/Job.root.svelte | 36 ++++++++++++++++++-------- web/frontend/src/job/Metric.svelte | 2 -- 4 files changed, 70 insertions(+), 15 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 9516e2b..7c84d93 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -302,6 +302,20 @@ func (ccms *CCMetricStore) buildQueries( continue } + // Skip if metric is removed for subcluster + if len(mc.SubClusters) != 0 { + isRemoved := false + for _, scConfig := range mc.SubClusters { + if scConfig.Name == job.SubCluster && scConfig.Remove == true { + isRemoved = true + break + } + } + if isRemoved { + continue + } + } + // Avoid duplicates... handledScopes := make([]schema.MetricScope, 0, 3) @@ -985,6 +999,20 @@ func (ccms *CCMetricStore) buildNodeQueries( continue } + // Skip if metric is removed for subcluster + if mc.SubClusters != nil { + isRemoved := false + for _, scConfig := range mc.SubClusters { + if scConfig.Name == subCluster && scConfig.Remove == true { + isRemoved = true + break + } + } + if isRemoved { + continue + } + } + // Avoid duplicates... handledScopes := make([]schema.MetricScope, 0, 3) diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 72718d0..d53941b 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -68,8 +68,23 @@ func initClusterConfig() error { } for _, sc := range cluster.SubClusters { - newMetric := mc - newMetric.SubClusters = nil + newMetric := &schema.MetricConfig{ + Unit: mc.Unit, + Energy: mc.Energy, + Name: mc.Name, + Scope: mc.Scope, + Aggregation: mc.Aggregation, + Peak: mc.Peak, + Caution: mc.Caution, + Alert: mc.Alert, + Timestep: mc.Timestep, + Normal: mc.Normal, + LowerIsBetter: mc.LowerIsBetter, + } + + if mc.Footprint != "" { + newMetric.Footprint = mc.Footprint + } if cfg, ok := scLookup[sc.Name]; ok { if !cfg.Remove { diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 0a2aa26..92d8bb2 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -128,14 +128,13 @@ const pendingMetrics = ( ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] || ccconfig[`job_view_selectedMetrics:${job.cluster}`] - ) || - $initq.data.globalMetrics - .reduce((names, gm) => { - if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { - names.push(gm.name); - } - return names; - }, []) + ) || + $initq.data.globalMetrics.reduce((names, gm) => { + if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) { + names.push(gm.name); + } + return names; + }, []) // Select default Scopes to load: Check before if any metric has accelerator scope by default const accScopeDefault = [...pendingMetrics].some(function (m) { @@ -338,10 +337,25 @@ scopes={item.data.map((x) => x.scope)} isShared={$initq.data.job.exclusive != 1} /> + {:else if item.disabled == true} + + + Disabled Metric + + +

Metric {item.metric} is disabled for subcluster {$initq.data.job.subCluster}.

+

To remove this card, open metric selection and press "Close and Apply".

+
+
{:else} - No dataset returned for {item.metric} + + + Missing Metric + + +

No dataset returned for {item.metric}.

+
+
{/if} {/if} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index b68ef47..63a9b80 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -14,7 +14,6 @@ diff --git a/web/frontend/src/systems/nodelist/NodeListRow.svelte b/web/frontend/src/systems/nodelist/NodeListRow.svelte index 5202573..ee8ef49 100644 --- a/web/frontend/src/systems/nodelist/NodeListRow.svelte +++ b/web/frontend/src/systems/nodelist/NodeListRow.svelte @@ -14,7 +14,7 @@ getContextClient, } from "@urql/svelte"; import { Card, CardBody, Spinner } from "@sveltestrap/sveltestrap"; - import { maxScope, checkMetricDisabled } from "../../generic/utils.js"; + import { maxScope, checkMetricDisabled, scramble, scrambleNames } from "../../generic/utils.js"; import MetricPlot from "../../generic/plots/MetricPlot.svelte"; import NodeInfo from "./NodeInfo.svelte"; @@ -110,9 +110,12 @@ extendedLegendData = {} for (const accId of accSet) { const matchJob = $nodeJobsData.data.jobs.items.find((i) => i.resources.find((r) => r.accelerators.includes(accId))) + const matchUser = matchJob?.user ? matchJob.user : null extendedLegendData[accId] = { - user: matchJob?.user ? matchJob?.user : '-', - job: matchJob?.jobId ? matchJob?.jobId : '-', + user: (scrambleNames && matchUser) + ? scramble(matchUser) + : (matchUser ? matchUser : '-'), + job: matchJob?.jobId ? matchJob.jobId : '-', } } // Theoretically extendable for hwthreadIDs From 9bcf7adb67daa30c0c87a1e0dbaa10e87714716d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 17 Apr 2025 17:31:59 +0200 Subject: [PATCH 412/443] add api calls for removing tags, initial branch commit --- internal/api/rest.go | 116 ++++++++++++++++++++++++++++++++++++ internal/repository/tags.go | 98 +++++++++++++++++++++++++++++- 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index db9a860..89bdd5e 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -750,6 +750,122 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { json.NewEncoder(rw).Encode(job) } +// removeTagJob godoc +// @summary Removes one or more tags from a job +// @tags Job add and modify +// @description Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match. +// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. +// @description If tagged job is already finished: Tag will be removed from respective archive files. +// @accept json +// @produce json +// @param id path int true "Job Database ID" +// @param request body api.TagJobApiRequest true "Array of tag-objects to remove" +// @success 200 {object} schema.Job "Updated job resource" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 404 {object} api.ErrorResponse "Job or tag does not exist" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /jobs/tag_job/{id} [delete] +func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) { + id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + job, err := api.JobRepository.FindById(r.Context(), id) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + var req TagJobApiRequest + if err := decode(r.Body, &req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + for _, rtag := range req { + // Only Global and Admin Tags + if rtag.Scope != "global" && rtag.Scope != "admin" { + log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID) + continue + } + + remainingTags, err := api.JobRepository.RemoveJobTagByRequest(repository.GetUserFromContext(r.Context()), job.ID, rtag.Type, rtag.Name, rtag.Scope) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + // remainingTags := job.Tags[:0] + // for _, tag := range job.Tags { + // if tag.Type != rtag.Type && + // tag.Name != rtag.Name && + // tag.Scope != rtag.Scope { + // remainingTags = append(remainingTags, tag) + // } + // } + job.Tags = remainingTags + } + + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) +} + +// removeTags godoc +// @summary Removes all tags and job-relations for type:name tuple +// @tags Tag remove +// @description Removes tags by type and name. Name and Type of Tag(s) must match. +// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. +// @description Tag wills be removed from respective archive files. +// @accept json +// @produce plain +// @param request body api.TagJobApiRequest true "Array of tag-objects to remove" +// @success 200 {string} string "Success Response" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 404 {object} api.ErrorResponse "Job or tag does not exist" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /jobs/tag_job/ [delete] +func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) { + var req TagJobApiRequest + if err := decode(r.Body, &req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + targetCount := len(req) + currentCount := 0 + for _, rtag := range req { + // Only Global and Admin Tags + if rtag.Scope != "global" && rtag.Scope != "admin" { + log.Warn("Cannot delete private tag: Skip") + continue + } + + err := api.JobRepository.RemoveTagByRequest(rtag.Type, rtag.Name, rtag.Scope) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } else { + currentCount++ + } + } + + rw.WriteHeader(http.StatusOK) + rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d of %d", currentCount, targetCount))) +} + // startJob godoc // @summary Adds a new job as "running" // @tags Job add and modify diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 8120364..3a35b34 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -45,7 +45,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche return tags, archive.UpdateTags(j, archiveTags) } -// Removes a tag from a job +// Removes a tag from a job by its ID func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) { j, err := r.FindByIdWithUser(user, job) if err != nil { @@ -76,6 +76,76 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema. return tags, archive.UpdateTags(j, archiveTags) } +// Removes a tag from a job by tag info +func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagType string, tagName string, tagScope string) ([]*schema.Tag, error) { + // Get Tag ID to delete + tagID, err := r.loadTagIDByInfo(tagName, tagType, tagScope) + if err != nil { + log.Warn("Error while finding tagId with: %s, %s, %s", tagName, tagType, tagScope) + return nil, err + } + + // Get Job + j, err := r.FindByIdWithUser(user, job) + if err != nil { + log.Warn("Error while finding job by id") + return nil, err + } + + // Handle Delete + q := sq.Delete("jobtag").Where("jobtag.job_id = ?", job).Where("jobtag.tag_id = ?", tagID) + + if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := q.ToSql() + log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) + return nil, err + } + + tags, err := r.GetTags(user, &job) + if err != nil { + log.Warn("Error while getting tags for job") + return nil, err + } + + archiveTags, err := r.getArchiveTags(&job) + if err != nil { + log.Warn("Error while getting tags for job") + return nil, err + } + + return tags, archive.UpdateTags(j, archiveTags) +} + +// Removes a tag from db by tag info +func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagScope string) error { + // Get Tag ID to delete + tagID, err := r.loadTagIDByInfo(tagName, tagType, tagScope) + if err != nil { + log.Warn("Error while finding tagId with: %s, %s, %s", tagName, tagType, tagScope) + return err + } + + // Handle Delete JobTagTable + qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID) + + if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := qJobTag.ToSql() + log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) + return err + } + + // Handle Delete TagTable + qTag := sq.Delete("tag").Where("tag.id = ?", tagID) + + if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := qTag.ToSql() + log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err) + return err + } + + return nil +} + // CreateTag creates a new tag with the specified type and name and returns its database id. func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope string) (tagId int64, err error) { // Default to "Global" scope if none defined @@ -325,3 +395,29 @@ func (r *JobRepository) checkScopeAuth(user *schema.User, operation string, scop return false, fmt.Errorf("error while checking tag operation auth: no user in context") } } + +func (r *JobRepository) loadTagIDByInfo(tagType string, tagName string, tagScope string) (tagID int64, err error) { + // Get Tag ID to delete + getq := sq.Select("id").From("tag"). + Where("tag_type = ?", tagType). + Where("tag_name = ?", tagName). + Where("tag_scope = ?", tagScope) + + rows, err := getq.RunWith(r.stmtCache).Query() + if err != nil { + s, _, _ := getq.ToSql() + log.Errorf("Error get tags for delete with %s: %v", s, err) + return 0, err + } + + dbTags := make([]*schema.Tag, 0) + for rows.Next() { + dbTag := &schema.Tag{} + if err := rows.Scan(&dbTag.ID); err != nil { + log.Warn("Error while scanning rows") + return 0, err + } + } + + return dbTags[0].ID, nil +} From 277f964b30e76a7726fc75bd3673d1f947068627 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 22 Apr 2025 13:47:25 +0200 Subject: [PATCH 413/443] move taglist a from go tmpl to svelte component --- web/frontend/rollup.config.mjs | 1 + web/frontend/src/Tags.root.svelte | 50 +++++++++++++++++++++++++++ web/frontend/src/tags.entrypoint.js | 12 +++++++ web/templates/monitoring/taglist.tmpl | 46 ++++++------------------ 4 files changed, 74 insertions(+), 35 deletions(-) create mode 100644 web/frontend/src/Tags.root.svelte create mode 100644 web/frontend/src/tags.entrypoint.js diff --git a/web/frontend/rollup.config.mjs b/web/frontend/rollup.config.mjs index 8336287..0e15105 100644 --- a/web/frontend/rollup.config.mjs +++ b/web/frontend/rollup.config.mjs @@ -62,6 +62,7 @@ export default [ entrypoint('jobs', 'src/jobs.entrypoint.js'), entrypoint('user', 'src/user.entrypoint.js'), entrypoint('list', 'src/list.entrypoint.js'), + entrypoint('taglist', 'src/tags.entrypoint.js'), entrypoint('job', 'src/job.entrypoint.js'), entrypoint('systems', 'src/systems.entrypoint.js'), entrypoint('node', 'src/node.entrypoint.js'), diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte new file mode 100644 index 0000000..4f7a34e --- /dev/null +++ b/web/frontend/src/Tags.root.svelte @@ -0,0 +1,50 @@ + + + + +
+
+
+ {#each Object.entries(tagmap) as [tagType, tagList]} +
+ Tag Type: {tagType} + + {tagList.length} Tag{(tagList.length != 1)?'s':''} + +
+ {#each tagList as tag (tag.id)} + {#if tag.scope == "global"} + + {tag.name} + {tag.count} Job{(tag.count != 1)?'s':''} + Global + + {:else if tag.scope == "admin"} + + {tag.name} + {tag.count} Job{(tag.count != 1)?'s':''} + Admin + + {:else} + + {tag.name} + {tag.count} Job{(tag.count != 1)?'s':''} + Private + + {/if} + {/each} + {/each} +
+
+
diff --git a/web/frontend/src/tags.entrypoint.js b/web/frontend/src/tags.entrypoint.js new file mode 100644 index 0000000..14df2f9 --- /dev/null +++ b/web/frontend/src/tags.entrypoint.js @@ -0,0 +1,12 @@ +import {} from './header.entrypoint.js' +import Tags from './Tags.root.svelte' + +new Tags({ + target: document.getElementById('svelte-app'), + props: { + // authlevel: authlevel, + tagmap: tagmap, + } +}) + + diff --git a/web/templates/monitoring/taglist.tmpl b/web/templates/monitoring/taglist.tmpl index 7d762c3..4388e94 100644 --- a/web/templates/monitoring/taglist.tmpl +++ b/web/templates/monitoring/taglist.tmpl @@ -1,37 +1,13 @@ {{define "content"}} -
-
-
- {{ range $tagType, $tagList := .Infos.tagmap }} -
- Tag Type: {{ $tagType }} - - {{len $tagList}} Tag{{if ne (len $tagList) 1}}s{{end}} - -
- {{ range $tagList }} - {{if eq .scope "global"}} - - {{ .name }} - {{ .count }} Job{{if ne .count 1}}s{{end}} - Global - - {{else if eq .scope "admin"}} - - {{ .name }} - {{ .count }} Job{{if ne .count 1}}s{{end}} - Admin - - {{else}} - - {{ .name }} - {{ .count }} Job{{if ne .count 1}}s{{end}} - Private - - {{end}} - {{end}} - {{end}} -
-
-
+
+{{end}} +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + {{end}} From a3fb47154627d4b8dcaacf121a29053527388081 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 22 Apr 2025 17:33:17 +0200 Subject: [PATCH 414/443] adapt and improve svelte taglist component --- web/frontend/src/Tags.root.svelte | 163 ++++++++++++++++++++------ web/frontend/src/tags.entrypoint.js | 8 +- web/templates/monitoring/taglist.tmpl | 4 +- 3 files changed, 137 insertions(+), 38 deletions(-) diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte index 4f7a34e..52288c9 100644 --- a/web/frontend/src/Tags.root.svelte +++ b/web/frontend/src/Tags.root.svelte @@ -2,49 +2,142 @@ @component Tag List Svelte Component. Displays All Tags, Allows deletion. Properties: - - `authlevel Int!`: Current Users Authority Level - - `tagmap Object!`: Map of Appwide Tags + - `username String!`: Users username. + - `isAdmin Bool!`: User has Admin Auth. + - `tagmap Object!`: Map of accessible, appwide tags. Prefiltered in backend. -->
-
-
- {#each Object.entries(tagmap) as [tagType, tagList]} -
- Tag Type: {tagType} - - {tagList.length} Tag{(tagList.length != 1)?'s':''} - -
- {#each tagList as tag (tag.id)} - {#if tag.scope == "global"} - - {tag.name} - {tag.count} Job{(tag.count != 1)?'s':''} - Global - - {:else if tag.scope == "admin"} - - {tag.name} - {tag.count} Job{(tag.count != 1)?'s':''} - Admin - - {:else} - - {tag.name} - {tag.count} Job{(tag.count != 1)?'s':''} - Private - - {/if} - {/each} - {/each} +
+
+ {#each Object.entries(tagmap) as [tagType, tagList]} +
+ Tag Type: {tagType} + {#if pendingChange === tagType} + + {/if} + + {tagList.length} Tag{(tagList.length != 1)?'s':''} +
+
+ {#each tagList as tag (tag.id)} + {#if tag.scope == "global"} + + + {#if isAdmin} + + {/if} + + {:else if tag.scope == "admin"} + + + {#if isAdmin} + + {/if} + + {:else} + + + {#if tag.scope == username} + + {/if} + + {/if} + {/each} +
+ {/each}
+
diff --git a/web/frontend/src/tags.entrypoint.js b/web/frontend/src/tags.entrypoint.js index 14df2f9..024a92d 100644 --- a/web/frontend/src/tags.entrypoint.js +++ b/web/frontend/src/tags.entrypoint.js @@ -4,9 +4,13 @@ import Tags from './Tags.root.svelte' new Tags({ target: document.getElementById('svelte-app'), props: { - // authlevel: authlevel, + username: username, + isAdmin: isAdmin, tagmap: tagmap, - } + }, + context: new Map([ + ['cc-config', clusterCockpitConfig] + ]) }) diff --git a/web/templates/monitoring/taglist.tmpl b/web/templates/monitoring/taglist.tmpl index 4388e94..66122fe 100644 --- a/web/templates/monitoring/taglist.tmpl +++ b/web/templates/monitoring/taglist.tmpl @@ -6,8 +6,10 @@ {{end}} {{define "javascript"}} {{end}} From 543ddf540ea540bf0c3dd5f550593cc096ca224a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Apr 2025 14:51:01 +0200 Subject: [PATCH 415/443] implement removeTagFromList mutation, add tag mutation access checks --- api/schema.graphqls | 1 + go.mod | 1 + internal/graph/generated/generated.go | 105 ++++++++++++++++++++++ internal/graph/schema.resolvers.go | 124 +++++++++++++++++++++++--- internal/repository/tags.go | 73 ++++++++------- pkg/schema/user.go | 8 +- web/frontend/src/Tags.root.svelte | 20 ++--- 7 files changed, 272 insertions(+), 60 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index ed8843c..9092b4f 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -277,6 +277,7 @@ type Mutation { deleteTag(id: ID!): ID! addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]! removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]! + removeTagFromList(tagIds: [ID!]!): [Int!]! updateConfiguration(name: String!, value: String!): String } diff --git a/go.mod b/go.mod index 2e2aa36..47e3497 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module github.com/ClusterCockpit/cc-backend go 1.23.5 + toolchain go1.24.1 require ( diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index e5c9ca2..5dbdfd9 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -250,6 +250,7 @@ type ComplexityRoot struct { AddTagsToJob func(childComplexity int, job string, tagIds []string) int CreateTag func(childComplexity int, typeArg string, name string, scope string) int DeleteTag func(childComplexity int, id string) int + RemoveTagFromList func(childComplexity int, tagIds []string) int RemoveTagsFromJob func(childComplexity int, job string, tagIds []string) int UpdateConfiguration func(childComplexity int, name string, value string) int } @@ -399,6 +400,7 @@ type MutationResolver interface { DeleteTag(ctx context.Context, id string) (string, error) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) + RemoveTagFromList(ctx context.Context, tagIds []string) ([]int, error) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) } type QueryResolver interface { @@ -1310,6 +1312,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Mutation.DeleteTag(childComplexity, args["id"].(string)), true + case "Mutation.removeTagFromList": + if e.complexity.Mutation.RemoveTagFromList == nil { + break + } + + args, err := ec.field_Mutation_removeTagFromList_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.RemoveTagFromList(childComplexity, args["tagIds"].([]string)), true + case "Mutation.removeTagsFromJob": if e.complexity.Mutation.RemoveTagsFromJob == nil { break @@ -2339,6 +2353,7 @@ type Mutation { deleteTag(id: ID!): ID! addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]! removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]! + removeTagFromList(tagIds: [ID!]!): [Int!]! updateConfiguration(name: String!, value: String!): String } @@ -2617,6 +2632,34 @@ func (ec *executionContext) field_Mutation_deleteTag_argsID( return zeroVal, nil } +func (ec *executionContext) field_Mutation_removeTagFromList_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Mutation_removeTagFromList_argsTagIds(ctx, rawArgs) + if err != nil { + return nil, err + } + args["tagIds"] = arg0 + return args, nil +} +func (ec *executionContext) field_Mutation_removeTagFromList_argsTagIds( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["tagIds"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("tagIds")) + if tmp, ok := rawArgs["tagIds"]; ok { + return ec.unmarshalNID2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Mutation_removeTagsFromJob_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -9690,6 +9733,61 @@ func (ec *executionContext) fieldContext_Mutation_removeTagsFromJob(ctx context. return fc, nil } +func (ec *executionContext) _Mutation_removeTagFromList(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_removeTagFromList(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Mutation().RemoveTagFromList(rctx, fc.Args["tagIds"].([]string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]int) + fc.Result = res + return ec.marshalNInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Mutation_removeTagFromList(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Mutation", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_removeTagFromList_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Mutation_updateConfiguration(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Mutation_updateConfiguration(ctx, field) if err != nil { @@ -17765,6 +17863,13 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) if out.Values[i] == graphql.Null { out.Invalids++ } + case "removeTagFromList": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_removeTagFromList(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "updateConfiguration": out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { return ec._Mutation_updateConfiguration(ctx, field) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 029be87..46f485b 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -125,23 +125,41 @@ func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue) // CreateTag is the resolver for the createTag field. func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) { - id, err := r.Repo.CreateTag(typeArg, name, scope) - if err != nil { - log.Warn("Error while creating tag") - return nil, err + user := repository.GetUserFromContext(ctx) + if user == nil { + return nil, fmt.Errorf("no user in context") } - return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil + // Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag + if user.HasRole(schema.RoleAdmin) && scope == "admin" || + user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && scope == "global" || + user.Username == scope { + // Create in DB + id, err := r.Repo.CreateTag(typeArg, name, scope) + if err != nil { + log.Warn("Error while creating tag") + return nil, err + } + return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil + } else { + log.Warn("Not authorized to create tag with scope: %s", scope) + return nil, fmt.Errorf("Not authorized to create tag with scope: %s", scope) + } } // DeleteTag is the resolver for the deleteTag field. func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) { + // This Uses ID string <-> ID string, removeTagFromList uses []string <-> []int panic(fmt.Errorf("not implemented: DeleteTag - deleteTag")) } // AddTagsToJob is the resolver for the addTagsToJob field. func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { - // Selectable Tags Pre-Filtered by Scope in Frontend: No backend check required + user := repository.GetUserFromContext(ctx) + if user == nil { + return nil, fmt.Errorf("no user in context") + } + jid, err := strconv.ParseInt(job, 10, 64) if err != nil { log.Warn("Error while adding tag to job") @@ -150,15 +168,32 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds tags := []*schema.Tag{} for _, tagId := range tagIds { + // Get ID tid, err := strconv.ParseInt(tagId, 10, 64) if err != nil { log.Warn("Error while parsing tag id") return nil, err } - if tags, err = r.Repo.AddTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { - log.Warn("Error while adding tag") - return nil, err + // Test Exists + _, _, tscope, exists := r.Repo.TagInfo(tid) + if !exists { + log.Warn("Tag does not exist (ID): %d", tid) + return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) + } + + // Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag + if user.HasRole(schema.RoleAdmin) && tscope == "admin" || + user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" || + user.Username == tscope { + // Add to Job + if tags, err = r.Repo.AddTag(user, jid, tid); err != nil { + log.Warn("Error while adding tag") + return nil, err + } + } else { + log.Warn("Not authorized to add tag: %d", tid) + return nil, fmt.Errorf("Not authorized to add tag: %d", tid) } } @@ -167,7 +202,11 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds // RemoveTagsFromJob is the resolver for the removeTagsFromJob field. func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { - // Removable Tags Pre-Filtered by Scope in Frontend: No backend check required + user := repository.GetUserFromContext(ctx) + if user == nil { + return nil, fmt.Errorf("no user in context") + } + jid, err := strconv.ParseInt(job, 10, 64) if err != nil { log.Warn("Error while parsing job id") @@ -176,21 +215,80 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta tags := []*schema.Tag{} for _, tagId := range tagIds { + // Get ID tid, err := strconv.ParseInt(tagId, 10, 64) if err != nil { log.Warn("Error while parsing tag id") return nil, err } - if tags, err = r.Repo.RemoveTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { - log.Warn("Error while removing tag") - return nil, err + // Test Exists + _, _, tscope, exists := r.Repo.TagInfo(tid) + if !exists { + log.Warn("Tag does not exist (ID): %d", tid) + return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) } + + // Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag + if user.HasRole(schema.RoleAdmin) && tscope == "admin" || + user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" || + user.Username == tscope { + // Remove from Job + if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil { + log.Warn("Error while removing tag") + return nil, err + } + } else { + log.Warn("Not authorized to remove tag: %d", tid) + return nil, fmt.Errorf("Not authorized to remove tag: %d", tid) + } + } return tags, nil } +// RemoveTagFromList is the resolver for the removeTagFromList field. +func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []string) ([]int, error) { + // Needs Contextuser + user := repository.GetUserFromContext(ctx) + if user == nil { + return nil, fmt.Errorf("no user in context") + } + + tags := []int{} + for _, tagId := range tagIds { + // Get ID + tid, err := strconv.ParseInt(tagId, 10, 64) + if err != nil { + log.Warn("Error while parsing tag id for removal") + return nil, err + } + + // Test Exists + _, _, tscope, exists := r.Repo.TagInfo(tid) + if !exists { + log.Warn("Tag does not exist (ID): %d", tid) + return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) + } + + // Test Access: Admins && Admin Tag OR Everyone && Private Tag + if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope { + // Remove from DB + if err = r.Repo.RemoveTagById(tid); err != nil { + log.Warn("Error while removing tag") + return nil, err + } else { + tags = append(tags, int(tid)) + } + } else { + log.Warn("Not authorized to remove tag: %d", tid) + return nil, fmt.Errorf("Not authorized to remove tag: %d", tid) + } + } + return tags, nil +} + // UpdateConfiguration is the resolver for the updateConfiguration field. func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) { if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil { diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 3a35b34..5712c94 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -79,10 +79,10 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema. // Removes a tag from a job by tag info func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagType string, tagName string, tagScope string) ([]*schema.Tag, error) { // Get Tag ID to delete - tagID, err := r.loadTagIDByInfo(tagName, tagType, tagScope) - if err != nil { - log.Warn("Error while finding tagId with: %s, %s, %s", tagName, tagType, tagScope) - return nil, err + tagID, exists := r.TagId(tagType, tagName, tagScope) + if !exists { + log.Warn("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + return nil, fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) } // Get Job @@ -119,12 +119,35 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT // Removes a tag from db by tag info func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagScope string) error { // Get Tag ID to delete - tagID, err := r.loadTagIDByInfo(tagName, tagType, tagScope) - if err != nil { - log.Warn("Error while finding tagId with: %s, %s, %s", tagName, tagType, tagScope) + tagID, exists := r.TagId(tagType, tagName, tagScope) + if !exists { + log.Warn("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + return fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + } + + // Handle Delete JobTagTable + qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID) + + if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := qJobTag.ToSql() + log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err) return err } + // Handle Delete TagTable + qTag := sq.Delete("tag").Where("tag.id = ?", tagID) + + if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := qTag.ToSql() + log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err) + return err + } + + return nil +} + +// Removes a tag from db by tag info +func (r *JobRepository) RemoveTagById(tagID int64) error { // Handle Delete JobTagTable qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID) @@ -279,6 +302,16 @@ func (r *JobRepository) TagId(tagType string, tagName string, tagScope string) ( return } +// TagInfo returns the database infos of the tag with the specified id. +func (r *JobRepository) TagInfo(tagId int64) (tagType string, tagName string, tagScope string, exists bool) { + exists = true + if err := sq.Select("tag.tag_type", "tag.tag_name", "tag.tag_scope").From("tag").Where("tag.id = ?", tagId). + RunWith(r.stmtCache).QueryRow().Scan(&tagType, &tagName, &tagScope); err != nil { + exists = false + } + return +} + // GetTags returns a list of all scoped tags if job is nil or of the tags that the job with that database ID has. func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, error) { q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag") @@ -395,29 +428,3 @@ func (r *JobRepository) checkScopeAuth(user *schema.User, operation string, scop return false, fmt.Errorf("error while checking tag operation auth: no user in context") } } - -func (r *JobRepository) loadTagIDByInfo(tagType string, tagName string, tagScope string) (tagID int64, err error) { - // Get Tag ID to delete - getq := sq.Select("id").From("tag"). - Where("tag_type = ?", tagType). - Where("tag_name = ?", tagName). - Where("tag_scope = ?", tagScope) - - rows, err := getq.RunWith(r.stmtCache).Query() - if err != nil { - s, _, _ := getq.ToSql() - log.Errorf("Error get tags for delete with %s: %v", s, err) - return 0, err - } - - dbTags := make([]*schema.Tag, 0) - for rows.Next() { - dbTag := &schema.Tag{} - if err := rows.Scan(&dbTag.ID); err != nil { - log.Warn("Error while scanning rows") - return 0, err - } - } - - return dbTags[0].ID, nil -} diff --git a/pkg/schema/user.go b/pkg/schema/user.go index c004254..9b62cfa 100644 --- a/pkg/schema/user.go +++ b/pkg/schema/user.go @@ -85,6 +85,7 @@ func IsValidRole(role string) bool { return getRoleEnum(role) != RoleError } +// Check if User has SPECIFIED role AND role is VALID func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) { if IsValidRole(role) { for _, r := range u.Roles { @@ -97,6 +98,7 @@ func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) { return false, false } +// Check if User has SPECIFIED role func (u *User) HasRole(role Role) bool { for _, r := range u.Roles { if r == GetRoleString(role) { @@ -106,7 +108,7 @@ func (u *User) HasRole(role Role) bool { return false } -// Role-Arrays are short: performance not impacted by nested loop +// Check if User has ANY of the listed roles func (u *User) HasAnyRole(queryroles []Role) bool { for _, ur := range u.Roles { for _, qr := range queryroles { @@ -118,7 +120,7 @@ func (u *User) HasAnyRole(queryroles []Role) bool { return false } -// Role-Arrays are short: performance not impacted by nested loop +// Check if User has ALL of the listed roles func (u *User) HasAllRoles(queryroles []Role) bool { target := len(queryroles) matches := 0 @@ -138,7 +140,7 @@ func (u *User) HasAllRoles(queryroles []Role) bool { } } -// Role-Arrays are short: performance not impacted by nested loop +// Check if User has NONE of the listed roles func (u *User) HasNotRoles(queryroles []Role) bool { matches := 0 for _, ur := range u.Roles { diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte index 52288c9..dc156e3 100644 --- a/web/frontend/src/Tags.root.svelte +++ b/web/frontend/src/Tags.root.svelte @@ -37,13 +37,8 @@ return mutationStore({ client: client, query: gql` - mutation ($job: ID!, $tagIds: [ID!]!) { - removeTag(tagIds: $tagIds) { - id - type - name - scope - } + mutation ($tagIds: [ID!]!) { + removeTagFromList(tagIds: $tagIds) } `, variables: { tagIds }, @@ -55,7 +50,13 @@ removeTagMutation({tagIds: [tag.id] }).subscribe( (res) => { if (res.fetching === false && !res.error) { - tagmap = res.data.removeTag; + // console.log('Removed:', res.data.removeTagFromList) + // console.log('Targets:', tagType, tagmap[tagType]) + // console.log('Filter:', tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id))) + tagmap[tagType] = tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id)); + if (tagmap[tagType].length === 0) { + delete tagmap[tagType] + } pendingChange = "none"; } else if (res.fetching === false && res.error) { throw res.error; @@ -63,9 +64,6 @@ }, ); } - - $: console.log(username, isAdmin) - $: console.log(pendingChange, tagmap)
From 1b3a12a4dcf1fcff6ea19680abb9f74d61062ce2 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Apr 2025 15:01:12 +0200 Subject: [PATCH 416/443] feat: add remove functionality to tag view, add confirm alert --- web/frontend/src/Tags.root.svelte | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte index dc156e3..441134a 100644 --- a/web/frontend/src/Tags.root.svelte +++ b/web/frontend/src/Tags.root.svelte @@ -46,23 +46,22 @@ }; function removeTag(tag, tagType) { - pendingChange = tagType; - removeTagMutation({tagIds: [tag.id] }).subscribe( - (res) => { - if (res.fetching === false && !res.error) { - // console.log('Removed:', res.data.removeTagFromList) - // console.log('Targets:', tagType, tagmap[tagType]) - // console.log('Filter:', tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id))) - tagmap[tagType] = tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id)); - if (tagmap[tagType].length === 0) { - delete tagmap[tagType] + if (confirm("Are you sure you want to completely remove this tag?\n\n" + tagType + ':' + tag.name)) { + pendingChange = tagType; + removeTagMutation({tagIds: [tag.id] }).subscribe( + (res) => { + if (res.fetching === false && !res.error) { + tagmap[tagType] = tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id)); + if (tagmap[tagType].length === 0) { + delete tagmap[tagType] + } + pendingChange = "none"; + } else if (res.fetching === false && res.error) { + throw res.error; } - pendingChange = "none"; - } else if (res.fetching === false && res.error) { - throw res.error; - } - }, - ); + }, + ); + } } From 48fa75386c4f1a6c145f34a7a197e7c2a470f88a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Apr 2025 16:12:56 +0200 Subject: [PATCH 417/443] feat: add tag removal api endpoints --- internal/api/rest.go | 14 ++++---------- internal/graph/schema.resolvers.go | 14 +++++++------- internal/repository/tags.go | 4 ++-- 3 files changed, 13 insertions(+), 19 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 89bdd5e..78cf276 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -78,12 +78,14 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + r.HandleFunc("/jobs/tag_job/{id}", api.removeTagJob).Methods(http.MethodDelete) r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch) r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) + r.HandleFunc("/tags/", api.removeTags).Methods(http.MethodDelete) r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) if api.MachineStateDir != "" { @@ -805,14 +807,6 @@ func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) { return } - // remainingTags := job.Tags[:0] - // for _, tag := range job.Tags { - // if tag.Type != rtag.Type && - // tag.Name != rtag.Name && - // tag.Scope != rtag.Scope { - // remainingTags = append(remainingTags, tag) - // } - // } job.Tags = remainingTags } @@ -836,7 +830,7 @@ func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) { // @failure 404 {object} api.ErrorResponse "Job or tag does not exist" // @failure 500 {object} api.ErrorResponse "Internal Server Error" // @security ApiKeyAuth -// @router /jobs/tag_job/ [delete] +// @router /tags/ [delete] func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) { var req TagJobApiRequest if err := decode(r.Body, &req); err != nil { @@ -863,7 +857,7 @@ func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) { } rw.WriteHeader(http.StatusOK) - rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d of %d", currentCount, targetCount))) + rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d successfull of %d requested\n", currentCount, targetCount))) } // startJob godoc diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 46f485b..10e1b55 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -142,7 +142,7 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s } return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil } else { - log.Warn("Not authorized to create tag with scope: %s", scope) + log.Warnf("Not authorized to create tag with scope: %s", scope) return nil, fmt.Errorf("Not authorized to create tag with scope: %s", scope) } } @@ -178,7 +178,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warn("Tag does not exist (ID): %d", tid) + log.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) } @@ -192,7 +192,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds return nil, err } } else { - log.Warn("Not authorized to add tag: %d", tid) + log.Warnf("Not authorized to add tag: %d", tid) return nil, fmt.Errorf("Not authorized to add tag: %d", tid) } } @@ -225,7 +225,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warn("Tag does not exist (ID): %d", tid) + log.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) } @@ -239,7 +239,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta return nil, err } } else { - log.Warn("Not authorized to remove tag: %d", tid) + log.Warnf("Not authorized to remove tag: %d", tid) return nil, fmt.Errorf("Not authorized to remove tag: %d", tid) } @@ -268,7 +268,7 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin // Test Exists _, _, tscope, exists := r.Repo.TagInfo(tid) if !exists { - log.Warn("Tag does not exist (ID): %d", tid) + log.Warnf("Tag does not exist (ID): %d", tid) return nil, fmt.Errorf("Tag does not exist (ID): %d", tid) } @@ -282,7 +282,7 @@ func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []strin tags = append(tags, int(tid)) } } else { - log.Warn("Not authorized to remove tag: %d", tid) + log.Warnf("Not authorized to remove tag: %d", tid) return nil, fmt.Errorf("Not authorized to remove tag: %d", tid) } } diff --git a/internal/repository/tags.go b/internal/repository/tags.go index 5712c94..db44dbc 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -81,7 +81,7 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT // Get Tag ID to delete tagID, exists := r.TagId(tagType, tagName, tagScope) if !exists { - log.Warn("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) return nil, fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) } @@ -121,7 +121,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc // Get Tag ID to delete tagID, exists := r.TagId(tagType, tagName, tagScope) if !exists { - log.Warn("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) + log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) return fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope) } From e3653daea3a792bf50a7d65381b1d511d83d8966 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Wed, 23 Apr 2025 17:59:26 +0200 Subject: [PATCH 418/443] reduce code in tag svelte view --- internal/repository/tags.go | 4 +- web/frontend/src/Tags.root.svelte | 66 +++++++++---------------------- 2 files changed, 20 insertions(+), 50 deletions(-) diff --git a/internal/repository/tags.go b/internal/repository/tags.go index db44dbc..544163e 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -45,7 +45,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche return tags, archive.UpdateTags(j, archiveTags) } -// Removes a tag from a job by its ID +// Removes a tag from a job by tag id func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) { j, err := r.FindByIdWithUser(user, job) if err != nil { @@ -146,7 +146,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc return nil } -// Removes a tag from db by tag info +// Removes a tag from db by tag id func (r *JobRepository) RemoveTagById(tagID int64) error { // Handle Delete JobTagTable qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID) diff --git a/web/frontend/src/Tags.root.svelte b/web/frontend/src/Tags.root.svelte index 441134a..03311b4 100644 --- a/web/frontend/src/Tags.root.svelte +++ b/web/frontend/src/Tags.root.svelte @@ -80,58 +80,28 @@
{#each tagList as tag (tag.id)} - {#if tag.scope == "global"} - - - {#if isAdmin} - - {/if} - - {:else if tag.scope == "admin"} - - - {#if isAdmin} - - {/if} - - {:else} - - - {#if tag.scope == username} - {/if} - - {/if} + + {#if (isAdmin && (tag.scope == "admin" || tag.scope == "global")) || tag.scope == username } + + {/if} + {/each}
{/each} From 94a39fc61f44a021ebfe0e4a288caf5e2fb0ed36 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 24 Apr 2025 10:53:55 +0200 Subject: [PATCH 419/443] Readd tag endpoints --- internal/api/rest.go | 111 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/internal/api/rest.go b/internal/api/rest.go index 0fa4611..7029d9d 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -82,12 +82,15 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) { r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + r.HandleFunc("/jobs/tag_job/{id}", api.removeTagJob).Methods(http.MethodDelete) r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch) r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) + r.HandleFunc("/tags/", api.removeTags).Methods(http.MethodDelete) + if api.MachineStateDir != "" { r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) r.HandleFunc("/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost) @@ -713,6 +716,114 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { json.NewEncoder(rw).Encode(job) } +// removeTagJob godoc +// @summary Removes one or more tags from a job +// @tags Job add and modify +// @description Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match. +// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. +// @description If tagged job is already finished: Tag will be removed from respective archive files. +// @accept json +// @produce json +// @param id path int true "Job Database ID" +// @param request body api.TagJobApiRequest true "Array of tag-objects to remove" +// @success 200 {object} schema.Job "Updated job resource" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 404 {object} api.ErrorResponse "Job or tag does not exist" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /jobs/tag_job/{id} [delete] +func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) { + id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + job, err := api.JobRepository.FindById(r.Context(), id) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + var req TagJobApiRequest + if err := decode(r.Body, &req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + for _, rtag := range req { + // Only Global and Admin Tags + if rtag.Scope != "global" && rtag.Scope != "admin" { + log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID) + continue + } + + remainingTags, err := api.JobRepository.RemoveJobTagByRequest(repository.GetUserFromContext(r.Context()), job.ID, rtag.Type, rtag.Name, rtag.Scope) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + job.Tags = remainingTags + } + + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) +} + +// removeTags godoc +// @summary Removes all tags and job-relations for type:name tuple +// @tags Tag remove +// @description Removes tags by type and name. Name and Type of Tag(s) must match. +// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. +// @description Tag wills be removed from respective archive files. +// @accept json +// @produce plain +// @param request body api.TagJobApiRequest true "Array of tag-objects to remove" +// @success 200 {string} string "Success Response" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 404 {object} api.ErrorResponse "Job or tag does not exist" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /tags/ [delete] +func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) { + var req TagJobApiRequest + if err := decode(r.Body, &req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + targetCount := len(req) + currentCount := 0 + for _, rtag := range req { + // Only Global and Admin Tags + if rtag.Scope != "global" && rtag.Scope != "admin" { + log.Warn("Cannot delete private tag: Skip") + continue + } + + err := api.JobRepository.RemoveTagByRequest(rtag.Type, rtag.Name, rtag.Scope) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } else { + currentCount++ + } + } + + rw.WriteHeader(http.StatusOK) + rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d successfull of %d requested\n", currentCount, targetCount))) +} + // startJob godoc // @summary Adds a new job as "running" // @tags Job add and modify From 570eba37947636bcc383ee8aa9213e54c88f9585 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 24 Apr 2025 11:01:13 +0200 Subject: [PATCH 420/443] Cleanup Swagger docs --- internal/api/rest.go | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/internal/api/rest.go b/internal/api/rest.go index 7029d9d..8a21e68 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1228,21 +1228,6 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { fmt.Fprintf(rw, "User %v successfully created!\n", username) } -// deleteUser godoc -// @summary Deletes a user -// @tags User -// @description User defined by username in form data will be deleted from database. -// @accept mpfd -// @produce plain -// @param username formData string true "User ID to delete" -// @success 200 "User deleted successfully" -// @failure 400 {string} string "Bad Request" -// @failure 401 {string} string "Unauthorized" -// @failure 403 {string} string "Forbidden" -// @failure 422 {string} string "Unprocessable Entity: deleting user failed" -// @failure 500 {string} string "Internal Server Error" -// @security ApiKeyAuth -// @router /users/ [delete] func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { // SecuredCheck() only worked with TokenAuth: Removed @@ -1291,26 +1276,6 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { json.NewEncoder(rw).Encode(users) } -// updateUser godoc -// @summary Updates an existing user -// @tags User -// @description Modifies user defined by username (id) in one of four possible ways. -// @description If more than one formValue is set then only the highest priority field is used. -// @accept mpfd -// @produce plain -// @param id path string true "Database ID of User" -// @param add-role formData string false "Priority 1: Role to add" Enums(admin, support, manager, user, api) -// @param remove-role formData string false "Priority 2: Role to remove" Enums(admin, support, manager, user, api) -// @param add-project formData string false "Priority 3: Project to add" -// @param remove-project formData string false "Priority 4: Project to remove" -// @success 200 {string} string "Success Response Message" -// @failure 400 {string} string "Bad Request" -// @failure 401 {string} string "Unauthorized" -// @failure 403 {string} string "Forbidden" -// @failure 422 {string} string "Unprocessable Entity: The user could not be updated" -// @failure 500 {string} string "Internal Server Error" -// @security ApiKeyAuth -// @router /user/{id} [post] func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { // SecuredCheck() only worked with TokenAuth: Removed From 65df27154c3f26d1748c2ca160af43f6abbc283d Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 24 Apr 2025 11:14:51 +0200 Subject: [PATCH 421/443] Cleanup and regenerate Swagger docs --- api/swagger.json | 396 +++++++++++-------------------------------- api/swagger.yaml | 279 ++++++++---------------------- internal/api/docs.go | 396 +++++++++++-------------------------------- internal/api/rest.go | 38 +---- 4 files changed, 269 insertions(+), 840 deletions(-) diff --git a/api/swagger.json b/api/swagger.json index 683b520..c05ec77 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -826,185 +826,14 @@ } } }, - "/config/notice/": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Modifies the content of notice.txt, shown as notice box on the homepage.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Updates or empties the notice box content", - "parameters": [ - { - "type": "string", - "description": "Priority 1: New content to display", - "name": "new-content", - "in": "formData" - } - ], - "responses": { - "200": { - "description": "Success Response Message", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: The user could not be updated", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" - } - } - } - } - }, - "/config/user/{id}": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Updates an existing user", - "parameters": [ - { - "type": "string", - "description": "Database ID of User", - "name": "id", - "in": "path", - "required": true - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "Priority 1: Role to add", - "name": "add-role", - "in": "formData" - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "Priority 2: Role to remove", - "name": "remove-role", - "in": "formData" - }, - { - "type": "string", - "description": "Priority 3: Project to add", - "name": "add-project", - "in": "formData" - }, - { - "type": "string", - "description": "Priority 4: Project to remove", - "name": "remove-project", - "in": "formData" - } - ], - "responses": { - "200": { - "description": "Success Response Message", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: The user could not be updated", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" - } - } - } - } - }, - "/config/users/": { + "/api/users/": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", + "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.", "produces": [ "application/json" ], @@ -1056,70 +885,111 @@ } } } - }, - "post": { + } + }, + "/jobs/tag_job/{id}": { + "delete": { "security": [ { "ApiKeyAuth": [] } ], - "description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", + "description": "Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nIf tagged job is already finished: Tag will be removed from respective archive files.", "consumes": [ - "multipart/form-data" + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Job add and modify" + ], + "summary": "Removes one or more tags from a job", + "parameters": [ + { + "type": "integer", + "description": "Job Database ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Array of tag-objects to remove", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.ApiTag" + } + } + } + ], + "responses": { + "200": { + "description": "Updated job resource", + "schema": { + "$ref": "#/definitions/schema.Job" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Job or tag does not exist", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, + "/tags/": { + "delete": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Removes tags by type and name. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nTag wills be removed from respective archive files.", + "consumes": [ + "application/json" ], "produces": [ "text/plain" ], "tags": [ - "User" + "Tag remove" ], - "summary": "Adds a new user", + "summary": "Removes all tags and job-relations for type:name tuple", "parameters": [ { - "type": "string", - "description": "Unique user ID", - "name": "username", - "in": "formData", - "required": true - }, - { - "type": "string", - "description": "User password", - "name": "password", - "in": "formData", - "required": true - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "User role", - "name": "role", - "in": "formData", - "required": true - }, - { - "type": "string", - "description": "Managed project, required for new manager role user", - "name": "project", - "in": "formData" - }, - { - "type": "string", - "description": "Users name", - "name": "name", - "in": "formData" - }, - { - "type": "string", - "description": "Users email", - "name": "email", - "in": "formData" + "description": "Array of tag-objects to remove", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.ApiTag" + } + } } ], "responses": { @@ -1132,93 +1002,25 @@ "400": { "description": "Bad Request", "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, "401": { "description": "Unauthorized", "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, - "403": { - "description": "Forbidden", + "404": { + "description": "Job or tag does not exist", "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: creating user failed", - "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, "500": { "description": "Internal Server Error", "schema": { - "type": "string" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Deletes a user", - "parameters": [ - { - "type": "string", - "description": "User ID to delete", - "name": "username", - "in": "formData", - "required": true - } - ], - "responses": { - "200": { - "description": "User deleted successfully" - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: deleting user failed", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } } } diff --git a/api/swagger.yaml b/api/swagger.yaml index 35ec6c4..26210be 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -1217,173 +1217,11 @@ paths: summary: Adds one or more tags to a job tags: - Job add and modify - /config/notice/: - post: - consumes: - - multipart/form-data - description: |- - Modifies the content of notice.txt, shown as notice box on the homepage. - If more than one formValue is set then only the highest priority field is used. - Only accessible from IPs registered with apiAllowedIPs configuration option. - parameters: - - description: 'Priority 1: New content to display' - in: formData - name: new-content - type: string - produces: - - text/plain - responses: - "200": - description: Success Response Message - schema: - type: string - "400": - description: Bad Request - schema: - type: string - "401": - description: Unauthorized - schema: - type: string - "403": - description: Forbidden - schema: - type: string - "422": - description: 'Unprocessable Entity: The user could not be updated' - schema: - type: string - "500": - description: Internal Server Error - schema: - type: string - security: - - ApiKeyAuth: [] - summary: Updates or empties the notice box content - tags: - - User - /config/user/{id}: - post: - consumes: - - multipart/form-data - description: |- - Modifies user defined by username (id) in one of four possible ways. - If more than one formValue is set then only the highest priority field is used. - Only accessible from IPs registered with apiAllowedIPs configuration option. - parameters: - - description: Database ID of User - in: path - name: id - required: true - type: string - - description: 'Priority 1: Role to add' - enum: - - admin - - support - - manager - - user - - api - in: formData - name: add-role - type: string - - description: 'Priority 2: Role to remove' - enum: - - admin - - support - - manager - - user - - api - in: formData - name: remove-role - type: string - - description: 'Priority 3: Project to add' - in: formData - name: add-project - type: string - - description: 'Priority 4: Project to remove' - in: formData - name: remove-project - type: string - produces: - - text/plain - responses: - "200": - description: Success Response Message - schema: - type: string - "400": - description: Bad Request - schema: - type: string - "401": - description: Unauthorized - schema: - type: string - "403": - description: Forbidden - schema: - type: string - "422": - description: 'Unprocessable Entity: The user could not be updated' - schema: - type: string - "500": - description: Internal Server Error - schema: - type: string - security: - - ApiKeyAuth: [] - summary: Updates an existing user - tags: - - User - /config/users/: - delete: - consumes: - - multipart/form-data - description: |- - User defined by username in form data will be deleted from database. - Only accessible from IPs registered with apiAllowedIPs configuration option. - parameters: - - description: User ID to delete - in: formData - name: username - required: true - type: string - produces: - - text/plain - responses: - "200": - description: User deleted successfully - "400": - description: Bad Request - schema: - type: string - "401": - description: Unauthorized - schema: - type: string - "403": - description: Forbidden - schema: - type: string - "422": - description: 'Unprocessable Entity: deleting user failed' - schema: - type: string - "500": - description: Internal Server Error - schema: - type: string - security: - - ApiKeyAuth: [] - summary: Deletes a user - tags: - - User + /api/users/: get: description: |- Returns a JSON-encoded list of users. Required query-parameter defines if all users or only users with additional special roles are returned. - Only accessible from IPs registered with apiAllowedIPs configuration option. parameters: - description: If returned list should contain all users or only users with additional special roles @@ -1421,46 +1259,73 @@ paths: summary: Returns a list of users tags: - User - post: + /jobs/tag_job/{id}: + delete: consumes: - - multipart/form-data + - application/json description: |- - User specified in form data will be saved to database. - Only accessible from IPs registered with apiAllowedIPs configuration option. + Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match. + Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. + If tagged job is already finished: Tag will be removed from respective archive files. parameters: - - description: Unique user ID - in: formData - name: username + - description: Job Database ID + in: path + name: id required: true - type: string - - description: User password - in: formData - name: password + type: integer + - description: Array of tag-objects to remove + in: body + name: request required: true - type: string - - description: User role - enum: - - admin - - support - - manager - - user - - api - in: formData - name: role + schema: + items: + $ref: '#/definitions/api.ApiTag' + type: array + produces: + - application/json + responses: + "200": + description: Updated job resource + schema: + $ref: '#/definitions/schema.Job' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.ErrorResponse' + "401": + description: Unauthorized + schema: + $ref: '#/definitions/api.ErrorResponse' + "404": + description: Job or tag does not exist + schema: + $ref: '#/definitions/api.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.ErrorResponse' + security: + - ApiKeyAuth: [] + summary: Removes one or more tags from a job + tags: + - Job add and modify + /tags/: + delete: + consumes: + - application/json + description: |- + Removes tags by type and name. Name and Type of Tag(s) must match. + Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API. + Tag wills be removed from respective archive files. + parameters: + - description: Array of tag-objects to remove + in: body + name: request required: true - type: string - - description: Managed project, required for new manager role user - in: formData - name: project - type: string - - description: Users name - in: formData - name: name - type: string - - description: Users email - in: formData - name: email - type: string + schema: + items: + $ref: '#/definitions/api.ApiTag' + type: array produces: - text/plain responses: @@ -1471,28 +1336,24 @@ paths: "400": description: Bad Request schema: - type: string + $ref: '#/definitions/api.ErrorResponse' "401": description: Unauthorized schema: - type: string - "403": - description: Forbidden + $ref: '#/definitions/api.ErrorResponse' + "404": + description: Job or tag does not exist schema: - type: string - "422": - description: 'Unprocessable Entity: creating user failed' - schema: - type: string + $ref: '#/definitions/api.ErrorResponse' "500": description: Internal Server Error schema: - type: string + $ref: '#/definitions/api.ErrorResponse' security: - ApiKeyAuth: [] - summary: Adds a new user + summary: Removes all tags and job-relations for type:name tuple tags: - - User + - Tag remove securityDefinitions: ApiKeyAuth: in: header diff --git a/internal/api/docs.go b/internal/api/docs.go index 2408f85..c1cd391 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -833,185 +833,14 @@ const docTemplate = `{ } } }, - "/config/notice/": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Modifies the content of notice.txt, shown as notice box on the homepage.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Updates or empties the notice box content", - "parameters": [ - { - "type": "string", - "description": "Priority 1: New content to display", - "name": "new-content", - "in": "formData" - } - ], - "responses": { - "200": { - "description": "Success Response Message", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: The user could not be updated", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" - } - } - } - } - }, - "/config/user/{id}": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Updates an existing user", - "parameters": [ - { - "type": "string", - "description": "Database ID of User", - "name": "id", - "in": "path", - "required": true - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "Priority 1: Role to add", - "name": "add-role", - "in": "formData" - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "Priority 2: Role to remove", - "name": "remove-role", - "in": "formData" - }, - { - "type": "string", - "description": "Priority 3: Project to add", - "name": "add-project", - "in": "formData" - }, - { - "type": "string", - "description": "Priority 4: Project to remove", - "name": "remove-project", - "in": "formData" - } - ], - "responses": { - "200": { - "description": "Success Response Message", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: The user could not be updated", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" - } - } - } - } - }, - "/config/users/": { + "/api/users/": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", + "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.", "produces": [ "application/json" ], @@ -1063,70 +892,111 @@ const docTemplate = `{ } } } - }, - "post": { + } + }, + "/jobs/tag_job/{id}": { + "delete": { "security": [ { "ApiKeyAuth": [] } ], - "description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", + "description": "Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nIf tagged job is already finished: Tag will be removed from respective archive files.", "consumes": [ - "multipart/form-data" + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Job add and modify" + ], + "summary": "Removes one or more tags from a job", + "parameters": [ + { + "type": "integer", + "description": "Job Database ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Array of tag-objects to remove", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.ApiTag" + } + } + } + ], + "responses": { + "200": { + "description": "Updated job resource", + "schema": { + "$ref": "#/definitions/schema.Job" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Job or tag does not exist", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, + "/tags/": { + "delete": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Removes tags by type and name. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nTag wills be removed from respective archive files.", + "consumes": [ + "application/json" ], "produces": [ "text/plain" ], "tags": [ - "User" + "Tag remove" ], - "summary": "Adds a new user", + "summary": "Removes all tags and job-relations for type:name tuple", "parameters": [ { - "type": "string", - "description": "Unique user ID", - "name": "username", - "in": "formData", - "required": true - }, - { - "type": "string", - "description": "User password", - "name": "password", - "in": "formData", - "required": true - }, - { - "enum": [ - "admin", - "support", - "manager", - "user", - "api" - ], - "type": "string", - "description": "User role", - "name": "role", - "in": "formData", - "required": true - }, - { - "type": "string", - "description": "Managed project, required for new manager role user", - "name": "project", - "in": "formData" - }, - { - "type": "string", - "description": "Users name", - "name": "name", - "in": "formData" - }, - { - "type": "string", - "description": "Users email", - "name": "email", - "in": "formData" + "description": "Array of tag-objects to remove", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.ApiTag" + } + } } ], "responses": { @@ -1139,93 +1009,25 @@ const docTemplate = `{ "400": { "description": "Bad Request", "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, "401": { "description": "Unauthorized", "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, - "403": { - "description": "Forbidden", + "404": { + "description": "Job or tag does not exist", "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: creating user failed", - "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } }, "500": { "description": "Internal Server Error", "schema": { - "type": "string" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", - "consumes": [ - "multipart/form-data" - ], - "produces": [ - "text/plain" - ], - "tags": [ - "User" - ], - "summary": "Deletes a user", - "parameters": [ - { - "type": "string", - "description": "User ID to delete", - "name": "username", - "in": "formData", - "required": true - } - ], - "responses": { - "200": { - "description": "User deleted successfully" - }, - "400": { - "description": "Bad Request", - "schema": { - "type": "string" - } - }, - "401": { - "description": "Unauthorized", - "schema": { - "type": "string" - } - }, - "403": { - "description": "Forbidden", - "schema": { - "type": "string" - } - }, - "422": { - "description": "Unprocessable Entity: deleting user failed", - "schema": { - "type": "string" - } - }, - "500": { - "description": "Internal Server Error", - "schema": { - "type": "string" + "$ref": "#/definitions/api.ErrorResponse" } } } diff --git a/internal/api/rest.go b/internal/api/rest.go index 2b2a3bd..669768e 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1163,26 +1163,6 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { }) } -// createUser godoc -// @summary Adds a new user -// @tags User -// @description User specified in form data will be saved to database. -// @accept mpfd -// @produce plain -// @param username formData string true "Unique user ID" -// @param password formData string true "User password" -// @param role formData string true "User role" Enums(admin, support, manager, user, api) -// @param project formData string false "Managed project, required for new manager role user" -// @param name formData string false "Users name" -// @param email formData string false "Users email" -// @success 200 {string} string "Success Response" -// @failure 400 {string} string "Bad Request" -// @failure 401 {string} string "Unauthorized" -// @failure 403 {string} string "Forbidden" -// @failure 422 {string} string "Unprocessable Entity: creating user failed" -// @failure 500 {string} string "Internal Server Error" -// @security ApiKeyAuth -// @router /config/users/ [post] func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { // SecuredCheck() only worked with TokenAuth: Removed @@ -1257,7 +1237,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { // @failure 403 {string} string "Forbidden" // @failure 500 {string} string "Internal Server Error" // @security ApiKeyAuth -// @router /config/users/ [get] +// @router /api/users/ [get] func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { // SecuredCheck() only worked with TokenAuth: Removed @@ -1319,22 +1299,6 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { } } -// editNotice godoc -// @summary Updates or empties the notice box content -// @tags User -// @description Modifies the content of notice.txt, shown as notice box on the homepage. -// @description If more than one formValue is set then only the highest priority field is used. -// @accept mpfd -// @produce plain -// @param new-content formData string false "Priority 1: New content to display" -// @success 200 {string} string "Success Response Message" -// @failure 400 {string} string "Bad Request" -// @failure 401 {string} string "Unauthorized" -// @failure 403 {string} string "Forbidden" -// @failure 422 {string} string "Unprocessable Entity: The user could not be updated" -// @failure 500 {string} string "Internal Server Error" -// @security ApiKeyAuth -// @router /notice/ [post] func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { // SecuredCheck() only worked with TokenAuth: Removed From acaad69917acba109a6f544656271925f846c790 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 24 Apr 2025 11:42:34 +0200 Subject: [PATCH 422/443] Prepare Bugfix Release 1.4.4 --- Makefile | 2 +- ReleaseNotes.md | 18 ++---------------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/Makefile b/Makefile index 0721fc4..5702ba1 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ TARGET = ./cc-backend VAR = ./var CFG = config.json .env FRONTEND = ./web/frontend -VERSION = 1.4.3 +VERSION = 1.4.4 GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' diff --git a/ReleaseNotes.md b/ReleaseNotes.md index beb8ee1..3e3939d 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,4 +1,4 @@ -# `cc-backend` version 1.4.3 +# `cc-backend` version 1.4.4 Supports job archive version 2 and database version 8. @@ -22,21 +22,7 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus ## New features -- Detailed Node List - - Adds new routes `/systems/list/$cluster` and `/systems/list/$cluster/$subcluster` - - Displays live, scoped metric data requested from the nodes indepenent of jobs -- Color Blind Mode - - Set on a per-user basis in options - - Applies to plot data, plot background color, statsseries colors, roofline timescale -- Job-View metric selection is now persisted based on the jobs subcluster. -Helpful for heterogeneous subcluster configurations. -- Histogram Bin Select in User-View - - Metric-Histograms: `10 Bins` now default, selectable options `20, 50, 100` - - Job-Duration-Histogram: `48h in 1h Bins` now default, selectable options: - - `60 minutes in 1 minute Bins` - - `12 hours in 10 minute Bins` - - `3 days in 6 hour Bins` - - `7 days in 12 hour Bins` +- Enable to delete tags from the web interface ## Known issues From aba75b3a1901be6b4117968cb22049ce0452ae6d Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Thu, 24 Apr 2025 12:57:37 +0200 Subject: [PATCH 423/443] Remove websocket sse GraphQL support --- cmd/cc-backend/server.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go index 3c19730..cbd85b7 100644 --- a/cmd/cc-backend/server.go +++ b/cmd/cc-backend/server.go @@ -32,7 +32,6 @@ import ( "github.com/ClusterCockpit/cc-backend/web" "github.com/gorilla/handlers" "github.com/gorilla/mux" - "github.com/gorilla/websocket" httpSwagger "github.com/swaggo/http-swagger" ) @@ -58,16 +57,16 @@ func serverInit() { graphQLServer := handler.New( generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) - graphQLServer.AddTransport(transport.SSE{}) + // graphQLServer.AddTransport(transport.SSE{}) graphQLServer.AddTransport(transport.POST{}) - graphQLServer.AddTransport(transport.Websocket{ - KeepAlivePingInterval: 10 * time.Second, - Upgrader: websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { - return true - }, - }, - }) + // graphQLServer.AddTransport(transport.Websocket{ + // KeepAlivePingInterval: 10 * time.Second, + // Upgrader: websocket.Upgrader{ + // CheckOrigin: func(r *http.Request) bool { + // return true + // }, + // }, + // }) if os.Getenv("DEBUG") != "1" { // Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed. From 6ca14c55f2e05d39f538feb9c9484300cfb2702a Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 25 Apr 2025 18:09:21 +0200 Subject: [PATCH 424/443] fix: fix error in jobsMetricStatisticsHistogram calculation - also reduces overhead, simplifies query --- internal/repository/stats.go | 95 ++++++++++++++---------------------- 1 file changed, 36 insertions(+), 59 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ad518bd..410ba6c 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -674,57 +674,32 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( } } - // log.Debugf("Metric %s, Peak %f, Unit %s, Aggregation %s", metric, peak, unit, aggreg) - // Make bins, see https://jereze.com/code/sql-histogram/ - + // log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit) + // Make bins, see https://jereze.com/code/sql-histogram/ (Modified here) start := time.Now() - jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, (metric + "_" + footprintStat)) - crossJoinQuery := sq.Select( - fmt.Sprintf(`max(%s) as max`, jm), - fmt.Sprintf(`min(%s) as min`, jm), - ).From("job").Where( - "JSON_VALID(footprint)", - ).Where( - fmt.Sprintf(`%s is not null`, jm), - ).Where( - fmt.Sprintf(`%s <= %f`, jm, peak), - ) - - crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) - - if cjqerr != nil { - return nil, cjqerr - } - - for _, f := range filters { - crossJoinQuery = BuildWhereClause(f, crossJoinQuery) - } - - crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql() - if sqlerr != nil { - return nil, sqlerr - } - - binQuery := fmt.Sprintf(`CAST( (case when %s = value.max - then value.max*0.999999999 else %s end - value.min) / (value.max - - value.min) * %v as INTEGER )`, jm, jm, *bins) + // Find Jobs' Value Bin Number: Divide Value by Peak, Multiply by RequestedBins, then CAST to INT: Gets Bin-Number of Job + binQuery := fmt.Sprintf(`CAST( + ((case when json_extract(footprint, "$.%s") = %f then %f*0.999999999 else json_extract(footprint, "$.%s") end) / %f) + * %v as INTEGER )`, + (metric + "_" + footprintStat), peak, peak, (metric + "_" + footprintStat), peak, *bins) mainQuery := sq.Select( fmt.Sprintf(`%s + 1 as bin`, binQuery), - fmt.Sprintf(`count(%s) as count`, jm), - fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery), - fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery), - ).From("job").CrossJoin( - fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., - ).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak)) + fmt.Sprintf(`count(*) as count`), + // For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * %s as min`, peak, *bins, binQuery), + // For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * (%s + 1) as max`, peak, *bins, binQuery), + ).From("job").Where( + "JSON_VALID(footprint)", + ).Where(fmt.Sprintf(`json_extract(footprint, "$.%s") is not null and json_extract(footprint, "$.%s") <= %f`, (metric + "_" + footprintStat), (metric + "_" + footprintStat), peak)) + // Only accessible Jobs... mainQuery, qerr := SecurityCheck(ctx, mainQuery) - if qerr != nil { return nil, qerr } + // Filters... for _, f := range filters { mainQuery = BuildWhereClause(f, mainQuery) } @@ -738,32 +713,34 @@ func (r *JobRepository) jobsMetricStatisticsHistogram( return nil, err } - // Setup Array + // Setup Return Array With Bin-Numbers for Match and Min/Max based on Peak points := make([]*model.MetricHistoPoint, 0) + binStep := int(peak) / *bins for i := 1; i <= *bins; i++ { - binMax := ((int(peak) / *bins) * i) - binMin := ((int(peak) / *bins) * (i - 1)) - point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax} - points = append(points, &point) + binMin := (binStep * (i - 1)) + binMax := (binStep * i) + epoint := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax} + points = append(points, &epoint) } - for rows.Next() { - point := model.MetricHistoPoint{} - if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { - log.Warnf("Error while scanning rows for %s", jm) - return nil, err // Totally bricks cc-backend if returned and if all metrics requested? + for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!) + rpoint := model.MetricHistoPoint{} + if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max + log.Warnf("Error while scanning rows for %s", metric) + return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested? } for _, e := range points { - if e.Bin != nil && point.Bin != nil { - if *e.Bin == *point.Bin { - e.Count = point.Count - if point.Min != nil { - e.Min = point.Min - } - if point.Max != nil { - e.Max = point.Max - } + if e.Bin != nil && rpoint.Bin != nil { + if *e.Bin == *rpoint.Bin { + e.Count = rpoint.Count + // Only Required For Debug: Check DB returned Min/Max against Backend Init above + // if rpoint.Min != nil { + // log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min) + // } + // if rpoint.Max != nil { + // log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max) + // } break } } From 61f0521072bd4d7151e4ef56695983bd2462608b Mon Sep 17 00:00:00 2001 From: brinkcoder Date: Fri, 25 Apr 2025 22:37:16 +0200 Subject: [PATCH 425/443] fix: correct logging variable from err to ipErr in AuthApi --- internal/auth/auth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index 9201315..5f88bbb 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -331,7 +331,7 @@ func (auth *Authentication) AuthApi( ipErr := securedCheck(user, r) if ipErr != nil { - log.Infof("auth api -> secured check failed: %s", err.Error()) + log.Infof("auth api -> secured check failed: %s", ipErr.Error()) onfailure(rw, r, ipErr) return } From 161f0744aa7ffd4b5f93836c614cdfbfb2a2a441 Mon Sep 17 00:00:00 2001 From: Jan Eitzinger Date: Mon, 28 Apr 2025 09:54:22 +0200 Subject: [PATCH 426/443] fix: enforce apiAllowedIPs config option Fixes #385 --- ReleaseNotes.md | 14 ++++ cmd/cc-backend/init.go | 12 +++ configs/config-demo.json | 3 + configs/config.json | 106 ++++++++++++++----------- internal/api/api_test.go | 3 + internal/importer/importer_test.go | 3 + internal/repository/userConfig_test.go | 3 + pkg/schema/schemas/config.schema.json | 3 +- pkg/schema/validate_test.go | 26 +++--- 9 files changed, 113 insertions(+), 60 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 3e3939d..860f62a 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -6,6 +6,20 @@ This is a bug fix release of `cc-backend`, the API backend and frontend implementation of ClusterCockpit. For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). +## Breaking changes + +The option `apiAllowedIPs` is now a required configuration attribute in +`config.json`. This option restricts access to the admin API. + +To retain the previous behavior that the API is per default accessible from +everywhere set: + +```json + "apiAllowedIPs": [ + "*" + ] +``` + ## Breaking changes for minor release 1.4.x - You need to perform a database migration. Depending on your database size the diff --git a/cmd/cc-backend/init.go b/cmd/cc-backend/init.go index f899ec1..0a5b836 100644 --- a/cmd/cc-backend/init.go +++ b/cmd/cc-backend/init.go @@ -32,6 +32,18 @@ const configString = ` "jwts": { "max-age": "2000h" }, + "apiAllowedIPs": [ + "*" + ], + "enable-resampling": { + "trigger": 30, + "resolutions": [ + 600, + 300, + 120, + 60 + ] + }, "clusters": [ { "name": "name", diff --git a/configs/config-demo.json b/configs/config-demo.json index e8d4570..9425bd2 100644 --- a/configs/config-demo.json +++ b/configs/config-demo.json @@ -17,6 +17,9 @@ 60 ] }, + "apiAllowedIPs": [ + "*" + ], "emission-constant": 317, "clusters": [ { diff --git a/configs/config.json b/configs/config.json index d5b8ada..f946b20 100644 --- a/configs/config.json +++ b/configs/config.json @@ -1,50 +1,62 @@ { - "addr": "0.0.0.0:443", - "ldap": { - "url": "ldaps://test", - "user_base": "ou=people,ou=hpc,dc=test,dc=de", - "search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de", - "user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de", - "user_filter": "(&(objectclass=posixAccount))" - }, - "https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem", - "https-key-file": "/etc/letsencrypt/live/url/privkey.pem", - "user": "clustercockpit", - "group": "clustercockpit", - "archive": { - "kind": "file", - "path": "./var/job-archive" - }, - "validate": true, - "clusters": [ - { - "name": "test", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "http://localhost:8082", - "token": "eyJhbGciOiJF-E-pQBQ" - }, - "filterRanges": { - "numNodes": { - "from": 1, - "to": 64 - }, - "duration": { - "from": 0, - "to": 86400 - }, - "startTime": { - "from": "2022-01-01T00:00:00Z", - "to": null - } - } + "addr": "0.0.0.0:443", + "ldap": { + "url": "ldaps://test", + "user_base": "ou=people,ou=hpc,dc=test,dc=de", + "search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de", + "user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de", + "user_filter": "(&(objectclass=posixAccount))" + }, + "https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem", + "https-key-file": "/etc/letsencrypt/live/url/privkey.pem", + "user": "clustercockpit", + "group": "clustercockpit", + "archive": { + "kind": "file", + "path": "./var/job-archive" + }, + "validate": false, + "apiAllowedIPs": [ + "*" + ], + "clusters": [ + { + "name": "test", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "http://localhost:8082", + "token": "eyJhbGciOiJF-E-pQBQ" + }, + "filterRanges": { + "numNodes": { + "from": 1, + "to": 64 + }, + "duration": { + "from": 0, + "to": 86400 + }, + "startTime": { + "from": "2022-01-01T00:00:00Z", + "to": null } - ], - "jwts": { - "cookieName": "", - "validateUser": false, - "max-age": "2000h", - "trustedIssuer": "" - }, - "short-running-jobs-duration": 300 + } + } + ], + "jwts": { + "cookieName": "", + "validateUser": false, + "max-age": "2000h", + "trustedIssuer": "" + }, + "enable-resampling": { + "trigger": 30, + "resolutions": [ + 600, + 300, + 120, + 60 + ] + }, + "short-running-jobs-duration": 300 } diff --git a/internal/api/api_test.go b/internal/api/api_test.go index c47bd4d..e67813c 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -45,6 +45,9 @@ func setup(t *testing.T) *api.RestApi { "jwts": { "max-age": "2m" }, + "apiAllowedIPs": [ + "*" + ], "clusters": [ { "name": "testcluster", diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index 4e839cf..209b6be 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -45,6 +45,9 @@ func setup(t *testing.T) *repository.JobRepository { "jwts": { "max-age": "2m" }, + "apiAllowedIPs": [ + "*" + ], "clusters": [ { "name": "testcluster", diff --git a/internal/repository/userConfig_test.go b/internal/repository/userConfig_test.go index c01bb5c..cd15c9d 100644 --- a/internal/repository/userConfig_test.go +++ b/internal/repository/userConfig_test.go @@ -25,6 +25,9 @@ func setupUserTest(t *testing.T) *UserCfgRepo { "jwts": { "max-age": "2m" }, + "apiAllowedIPs": [ + "*" + ], "clusters": [ { "name": "testcluster", diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index f372fc1..c844174 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -492,6 +492,7 @@ }, "required": [ "jwts", - "clusters" + "clusters", + "apiAllowedIPs" ] } diff --git a/pkg/schema/validate_test.go b/pkg/schema/validate_test.go index 2dc97c1..f4943f0 100644 --- a/pkg/schema/validate_test.go +++ b/pkg/schema/validate_test.go @@ -14,17 +14,20 @@ func TestValidateConfig(t *testing.T) { "jwts": { "max-age": "2m" }, - "clusters": [ - { - "name": "testcluster", - "metricDataRepository": { - "kind": "cc-metric-store", - "url": "localhost:8082"}, - "filterRanges": { - "numNodes": { "from": 1, "to": 64 }, - "duration": { "from": 0, "to": 86400 }, - "startTime": { "from": "2022-01-01T00:00:00Z", "to": null } - }}] + "apiAllowedIPs": [ + "*" + ], + "clusters": [ + { + "name": "testcluster", + "metricDataRepository": { + "kind": "cc-metric-store", + "url": "localhost:8082"}, + "filterRanges": { + "numNodes": { "from": 1, "to": 64 }, + "duration": { "from": 0, "to": 86400 }, + "startTime": { "from": "2022-01-01T00:00:00Z", "to": null } + }}] }`) if err := Validate(Config, bytes.NewReader(json)); err != nil { @@ -33,7 +36,6 @@ func TestValidateConfig(t *testing.T) { } func TestValidateJobMeta(t *testing.T) { - } func TestValidateCluster(t *testing.T) { From df497d5952a66771fd7cc20160186564d996a6ed Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 29 Apr 2025 15:10:06 +0200 Subject: [PATCH 427/443] initial branch commit, add job compare switch, add gql resolver --- api/schema.graphqls | 19 +- internal/graph/generated/generated.go | 1153 +++++++++++------ internal/graph/model/models_gen.go | 22 +- internal/graph/schema.resolvers.go | 68 +- web/frontend/src/Jobs.root.svelte | 62 +- web/frontend/src/generic/JobCompare.svelte | 156 +++ web/frontend/src/generic/JobList.svelte | 6 +- web/frontend/src/generic/plots/Polar.svelte | 2 +- .../job/jobsummary/JobFootprintPolar.svelte | 2 +- 9 files changed, 1050 insertions(+), 440 deletions(-) create mode 100644 web/frontend/src/generic/JobCompare.svelte diff --git a/api/schema.graphqls b/api/schema.graphqls index 9092b4f..1942454 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -158,7 +158,7 @@ type StatsSeries { max: [NullableFloat!]! } -type JobStatsWithScope { +type NamedStatsWithScope { name: String! scope: MetricScope! stats: [ScopedStats!]! @@ -171,8 +171,13 @@ type ScopedStats { } type JobStats { - name: String! - stats: MetricStatistics! + jobId: Int! + stats: [NamedStats!]! +} + +type NamedStats { + name: String! + data: MetricStatistics! } type Unit { @@ -259,12 +264,13 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobStats(id: ID!, metrics: [String!]): [JobStats!]! - scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! - jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints + jobStats(id: ID!, metrics: [String!]): [NamedStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [NamedStatsWithScope!]! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]! + jobsMetricStats(filter: [JobFilter!], metrics: [String!]): [JobStats!]! + jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -288,6 +294,7 @@ type TimeRangeOutput { range: String, from: Time!, to: Time! } input JobFilter { tags: [ID!] jobId: StringInput + jobIds: [ID!] arrayJobId: Int user: StringInput project: StringInput diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 5dbdfd9..14d1b57 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -171,13 +171,7 @@ type ComplexityRoot struct { } JobStats struct { - Name func(childComplexity int) int - Stats func(childComplexity int) int - } - - JobStatsWithScope struct { - Name func(childComplexity int) int - Scope func(childComplexity int) int + JobID func(childComplexity int) int Stats func(childComplexity int) int } @@ -255,6 +249,17 @@ type ComplexityRoot struct { UpdateConfiguration func(childComplexity int, name string, value string) int } + NamedStats struct { + Data func(childComplexity int) int + Name func(childComplexity int) int + } + + NamedStatsWithScope struct { + Name func(childComplexity int) int + Scope func(childComplexity int) int + Stats func(childComplexity int) int + } + NodeMetrics struct { Host func(childComplexity int) int Metrics func(childComplexity int) int @@ -279,6 +284,7 @@ type ComplexityRoot struct { JobStats func(childComplexity int, id string, metrics []string) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int + JobsMetricStats func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int NodeMetricsList func(childComplexity int, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) int @@ -411,11 +417,12 @@ type QueryResolver interface { AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) - JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) - ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) - JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) + JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) + ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate, numDurationBins *string, numMetricBins *int) ([]*model.JobsStatistics, error) + JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error) + JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) NodeMetricsList(ctx context.Context, cluster string, subCluster string, nodeFilter string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time, page *model.PageRequest, resolution *int) (*model.NodesResultList, error) @@ -933,12 +940,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true - case "JobStats.name": - if e.complexity.JobStats.Name == nil { + case "JobStats.jobId": + if e.complexity.JobStats.JobID == nil { break } - return e.complexity.JobStats.Name(childComplexity), true + return e.complexity.JobStats.JobID(childComplexity), true case "JobStats.stats": if e.complexity.JobStats.Stats == nil { @@ -947,27 +954,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobStats.Stats(childComplexity), true - case "JobStatsWithScope.name": - if e.complexity.JobStatsWithScope.Name == nil { - break - } - - return e.complexity.JobStatsWithScope.Name(childComplexity), true - - case "JobStatsWithScope.scope": - if e.complexity.JobStatsWithScope.Scope == nil { - break - } - - return e.complexity.JobStatsWithScope.Scope(childComplexity), true - - case "JobStatsWithScope.stats": - if e.complexity.JobStatsWithScope.Stats == nil { - break - } - - return e.complexity.JobStatsWithScope.Stats(childComplexity), true - case "JobsStatistics.histDuration": if e.complexity.JobsStatistics.HistDuration == nil { break @@ -1348,6 +1334,41 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Mutation.UpdateConfiguration(childComplexity, args["name"].(string), args["value"].(string)), true + case "NamedStats.data": + if e.complexity.NamedStats.Data == nil { + break + } + + return e.complexity.NamedStats.Data(childComplexity), true + + case "NamedStats.name": + if e.complexity.NamedStats.Name == nil { + break + } + + return e.complexity.NamedStats.Name(childComplexity), true + + case "NamedStatsWithScope.name": + if e.complexity.NamedStatsWithScope.Name == nil { + break + } + + return e.complexity.NamedStatsWithScope.Name(childComplexity), true + + case "NamedStatsWithScope.scope": + if e.complexity.NamedStatsWithScope.Scope == nil { + break + } + + return e.complexity.NamedStatsWithScope.Scope(childComplexity), true + + case "NamedStatsWithScope.stats": + if e.complexity.NamedStatsWithScope.Stats == nil { + break + } + + return e.complexity.NamedStatsWithScope.Stats(childComplexity), true + case "NodeMetrics.host": if e.complexity.NodeMetrics.Host == nil { break @@ -1497,6 +1518,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.JobsFootprints(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string)), true + case "Query.jobsMetricStats": + if e.complexity.Query.JobsMetricStats == nil { + break + } + + args, err := ec.field_Query_jobsMetricStats_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.JobsMetricStats(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string)), true + case "Query.jobsStatistics": if e.complexity.Query.JobsStatistics == nil { break @@ -2234,7 +2267,7 @@ type StatsSeries { max: [NullableFloat!]! } -type JobStatsWithScope { +type NamedStatsWithScope { name: String! scope: MetricScope! stats: [ScopedStats!]! @@ -2247,8 +2280,13 @@ type ScopedStats { } type JobStats { - name: String! - stats: MetricStatistics! + jobId: Int! + stats: [NamedStats!]! +} + +type NamedStats { + name: String! + data: MetricStatistics! } type Unit { @@ -2335,12 +2373,13 @@ type Query { job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! - jobStats(id: ID!, metrics: [String!]): [JobStats!]! - scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]! - jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints + jobStats(id: ID!, metrics: [String!]): [NamedStats!]! + scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [NamedStatsWithScope!]! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]! + jobsMetricStats(filter: [JobFilter!], metrics: [String!]): [JobStats!]! + jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -2364,6 +2403,7 @@ type TimeRangeOutput { range: String, from: Time!, to: Time! } input JobFilter { tags: [ID!] jobId: StringInput + jobIds: [ID!] arrayJobId: Int user: StringInput project: StringInput @@ -3045,6 +3085,57 @@ func (ec *executionContext) field_Query_jobsFootprints_argsMetrics( return zeroVal, nil } +func (ec *executionContext) field_Query_jobsMetricStats_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_jobsMetricStats_argsFilter(ctx, rawArgs) + if err != nil { + return nil, err + } + args["filter"] = arg0 + arg1, err := ec.field_Query_jobsMetricStats_argsMetrics(ctx, rawArgs) + if err != nil { + return nil, err + } + args["metrics"] = arg1 + return args, nil +} +func (ec *executionContext) field_Query_jobsMetricStats_argsFilter( + ctx context.Context, + rawArgs map[string]any, +) ([]*model.JobFilter, error) { + if _, ok := rawArgs["filter"]; !ok { + var zeroVal []*model.JobFilter + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + if tmp, ok := rawArgs["filter"]; ok { + return ec.unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobFilterᚄ(ctx, tmp) + } + + var zeroVal []*model.JobFilter + return zeroVal, nil +} + +func (ec *executionContext) field_Query_jobsMetricStats_argsMetrics( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["metrics"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + if tmp, ok := rawArgs["metrics"]; ok { + return ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -7265,8 +7356,8 @@ func (ec *executionContext) fieldContext_JobResultList_hasNextPage(_ context.Con return fc, nil } -func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobStats_name(ctx, field) +func (ec *executionContext) _JobStats_jobId(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_jobId(ctx, field) if err != nil { return graphql.Null } @@ -7279,7 +7370,7 @@ func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Name, nil + return obj.JobID, nil }) if err != nil { ec.Error(ctx, err) @@ -7291,19 +7382,19 @@ func (ec *executionContext) _JobStats_name(ctx context.Context, field graphql.Co } return graphql.Null } - res := resTmp.(string) + res := resTmp.(int) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_JobStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_JobStats_jobId(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "JobStats", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + return nil, errors.New("field of type Int does not have child fields") }, } return fc, nil @@ -7335,9 +7426,9 @@ func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.C } return graphql.Null } - res := resTmp.(*schema.MetricStatistics) + res := resTmp.([]*model.NamedStats) fc.Result = res - return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) + return ec.marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -7348,154 +7439,12 @@ func (ec *executionContext) fieldContext_JobStats_stats(_ context.Context, field IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { - case "avg": - return ec.fieldContext_MetricStatistics_avg(ctx, field) - case "min": - return ec.fieldContext_MetricStatistics_min(ctx, field) - case "max": - return ec.fieldContext_MetricStatistics_max(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) - }, - } - return fc, nil -} - -func (ec *executionContext) _JobStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobStatsWithScope_name(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Name, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobStatsWithScope", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _JobStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobStatsWithScope_scope(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Scope, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(schema.MetricScope) - fc.Result = res - return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobStatsWithScope", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type MetricScope does not have child fields") - }, - } - return fc, nil -} - -func (ec *executionContext) _JobStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStatsWithScope) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_JobStatsWithScope_stats(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Stats, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.([]*model.ScopedStats) - fc.Result = res - return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_JobStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "JobStatsWithScope", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "hostname": - return ec.fieldContext_ScopedStats_hostname(ctx, field) - case "id": - return ec.fieldContext_ScopedStats_id(ctx, field) + case "name": + return ec.fieldContext_NamedStats_name(ctx, field) case "data": - return ec.fieldContext_ScopedStats_data(ctx, field) + return ec.fieldContext_NamedStats_data(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name) + return nil, fmt.Errorf("no field named %q was found under type NamedStats", field.Name) }, } return fc, nil @@ -9840,6 +9789,242 @@ func (ec *executionContext) fieldContext_Mutation_updateConfiguration(ctx contex return fc, nil } +func (ec *executionContext) _NamedStats_name(ctx context.Context, field graphql.CollectedField, obj *model.NamedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NamedStats_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NamedStats_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NamedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NamedStats_data(ctx context.Context, field graphql.CollectedField, obj *model.NamedStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NamedStats_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalNMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NamedStats_data(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NamedStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "avg": + return ec.fieldContext_MetricStatistics_avg(ctx, field) + case "min": + return ec.fieldContext_MetricStatistics_min(ctx, field) + case "max": + return ec.fieldContext_MetricStatistics_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricStatistics", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _NamedStatsWithScope_name(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NamedStatsWithScope_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NamedStatsWithScope_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NamedStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NamedStatsWithScope_scope(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NamedStatsWithScope_scope(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(schema.MetricScope) + fc.Result = res + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NamedStatsWithScope_scope(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NamedStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type MetricScope does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _NamedStatsWithScope_stats(ctx context.Context, field graphql.CollectedField, obj *model.NamedStatsWithScope) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_NamedStatsWithScope_stats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Stats, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.ScopedStats) + fc.Result = res + return ec.marshalNScopedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐScopedStatsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_NamedStatsWithScope_stats(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "NamedStatsWithScope", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "hostname": + return ec.fieldContext_ScopedStats_hostname(ctx, field) + case "id": + return ec.fieldContext_ScopedStats_id(ctx, field) + case "data": + return ec.fieldContext_ScopedStats_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type ScopedStats", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _NodeMetrics_host(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetrics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_NodeMetrics_host(ctx, field) if err != nil { @@ -10715,9 +10900,9 @@ func (ec *executionContext) _Query_jobStats(ctx context.Context, field graphql.C } return graphql.Null } - res := resTmp.([]*model.JobStats) + res := resTmp.([]*model.NamedStats) fc.Result = res - return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res) + return ec.marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10729,11 +10914,11 @@ func (ec *executionContext) fieldContext_Query_jobStats(ctx context.Context, fie Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "name": - return ec.fieldContext_JobStats_name(ctx, field) - case "stats": - return ec.fieldContext_JobStats_stats(ctx, field) + return ec.fieldContext_NamedStats_name(ctx, field) + case "data": + return ec.fieldContext_NamedStats_data(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name) + return nil, fmt.Errorf("no field named %q was found under type NamedStats", field.Name) }, } defer func() { @@ -10776,9 +10961,9 @@ func (ec *executionContext) _Query_scopedJobStats(ctx context.Context, field gra } return graphql.Null } - res := resTmp.([]*model.JobStatsWithScope) + res := resTmp.([]*model.NamedStatsWithScope) fc.Result = res - return ec.marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx, field.Selections, res) + return ec.marshalNNamedStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScopeᚄ(ctx, field.Selections, res) } func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { @@ -10790,13 +10975,13 @@ func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Contex Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "name": - return ec.fieldContext_JobStatsWithScope_name(ctx, field) + return ec.fieldContext_NamedStatsWithScope_name(ctx, field) case "scope": - return ec.fieldContext_JobStatsWithScope_scope(ctx, field) + return ec.fieldContext_NamedStatsWithScope_scope(ctx, field) case "stats": - return ec.fieldContext_JobStatsWithScope_stats(ctx, field) + return ec.fieldContext_NamedStatsWithScope_stats(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type JobStatsWithScope", field.Name) + return nil, fmt.Errorf("no field named %q was found under type NamedStatsWithScope", field.Name) }, } defer func() { @@ -10813,64 +10998,6 @@ func (ec *executionContext) fieldContext_Query_scopedJobStats(ctx context.Contex return fc, nil } -func (ec *executionContext) _Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_jobsFootprints(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsFootprints(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string)) - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*model.Footprints) - fc.Result = res - return ec.marshalOFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprints(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Query", - Field: field, - IsMethod: true, - IsResolver: true, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "timeWeights": - return ec.fieldContext_Footprints_timeWeights(ctx, field) - case "metrics": - return ec.fieldContext_Footprints_metrics(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Footprints", field.Name) - }, - } - defer func() { - if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) - } - }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_jobsFootprints_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { - ec.Error(ctx, err) - return fc, err - } - return fc, nil -} - func (ec *executionContext) _Query_jobs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_jobs(ctx, field) if err != nil { @@ -11029,6 +11156,125 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return fc, nil } +func (ec *executionContext) _Query_jobsMetricStats(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_jobsMetricStats(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().JobsMetricStats(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.JobStats) + fc.Result = res + return ec.marshalNJobStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_jobsMetricStats(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "jobId": + return ec.fieldContext_JobStats_jobId(ctx, field) + case "stats": + return ec.fieldContext_JobStats_stats(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type JobStats", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_jobsMetricStats_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_jobsFootprints(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().JobsFootprints(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*model.Footprints) + fc.Result = res + return ec.marshalOFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFootprints(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_jobsFootprints(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "timeWeights": + return ec.fieldContext_Footprints_timeWeights(ctx, field) + case "metrics": + return ec.fieldContext_Footprints_metrics(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Footprints", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_jobsFootprints_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_rooflineHeatmap(ctx, field) if err != nil { @@ -15822,7 +16068,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any asMap[k] = v } - fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"} + fieldsInOrder := [...]string{"tags", "jobId", "jobIds", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "energy", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "metricStats", "exclusive", "node"} for _, k := range fieldsInOrder { v, ok := asMap[k] if !ok { @@ -15843,6 +16089,13 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj any return it, err } it.JobID = data + case "jobIds": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobIds")) + data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) + if err != nil { + return it, err + } + it.JobIds = data case "arrayJobId": ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) @@ -17269,8 +17522,8 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobStats") - case "name": - out.Values[i] = ec._JobStats_name(ctx, field, obj) + case "jobId": + out.Values[i] = ec._JobStats_jobId(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } @@ -17302,55 +17555,6 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, return out } -var jobStatsWithScopeImplementors = []string{"JobStatsWithScope"} - -func (ec *executionContext) _JobStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.JobStatsWithScope) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobStatsWithScopeImplementors) - - out := graphql.NewFieldSet(fields) - deferred := make(map[string]*graphql.FieldSet) - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobStatsWithScope") - case "name": - out.Values[i] = ec._JobStatsWithScope_name(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "scope": - out.Values[i] = ec._JobStatsWithScope_scope(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - case "stats": - out.Values[i] = ec._JobStatsWithScope_stats(ctx, field, obj) - if out.Values[i] == graphql.Null { - out.Invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch(ctx) - if out.Invalids > 0 { - return graphql.Null - } - - atomic.AddInt32(&ec.deferred, int32(len(deferred))) - - for label, dfs := range deferred { - ec.processDeferredGroup(graphql.DeferredGroup{ - Label: label, - Path: graphql.GetPath(ctx), - FieldSet: dfs, - Context: ctx, - }) - } - - return out -} - var jobsStatisticsImplementors = []string{"JobsStatistics"} func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler { @@ -17897,6 +18101,99 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) return out } +var namedStatsImplementors = []string{"NamedStats"} + +func (ec *executionContext) _NamedStats(ctx context.Context, sel ast.SelectionSet, obj *model.NamedStats) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, namedStatsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NamedStats") + case "name": + out.Values[i] = ec._NamedStats_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "data": + out.Values[i] = ec._NamedStats_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var namedStatsWithScopeImplementors = []string{"NamedStatsWithScope"} + +func (ec *executionContext) _NamedStatsWithScope(ctx context.Context, sel ast.SelectionSet, obj *model.NamedStatsWithScope) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, namedStatsWithScopeImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NamedStatsWithScope") + case "name": + out.Values[i] = ec._NamedStatsWithScope_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "scope": + out.Values[i] = ec._NamedStatsWithScope_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "stats": + out.Values[i] = ec._NamedStatsWithScope_stats(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var nodeMetricsImplementors = []string{"NodeMetrics"} func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionSet, obj *model.NodeMetrics) graphql.Marshaler { @@ -18205,25 +18502,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } - out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) - case "jobsFootprints": - field := field - - innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - } - }() - res = ec._Query_jobsFootprints(ctx, field) - return res - } - - rrm := func(ctx context.Context) graphql.Marshaler { - return ec.OperationContext.RootResolverMiddleware(ctx, - func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) - } - out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "jobs": field := field @@ -18268,6 +18546,47 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "jobsMetricStats": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_jobsMetricStats(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "jobsFootprints": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_jobsFootprints(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "rooflineHeatmap": field := field @@ -20195,60 +20514,6 @@ func (ec *executionContext) marshalNJobStats2ᚖgithubᚗcomᚋClusterCockpitᚋ return ec._JobStats(ctx, sel, v) } -func (ec *executionContext) marshalNJobStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobStatsWithScope) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - - for _, e := range ret { - if e == graphql.Null { - return graphql.Null - } - } - - return ret -} - -func (ec *executionContext) marshalNJobStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.JobStatsWithScope) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "the requested element is null which the schema does not allow") - } - return graphql.Null - } - return ec._JobStatsWithScope(ctx, sel, v) -} - func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -20498,6 +20763,114 @@ func (ec *executionContext) marshalNMetricValue2githubᚗcomᚋClusterCockpitᚋ return ec._MetricValue(ctx, sel, &v) } +func (ec *executionContext) marshalNNamedStats2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NamedStats) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNNamedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStats(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNNamedStats2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStats(ctx context.Context, sel ast.SelectionSet, v *model.NamedStats) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._NamedStats(ctx, sel, v) +} + +func (ec *executionContext) marshalNNamedStatsWithScope2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NamedStatsWithScope) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNNamedStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScope(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNNamedStatsWithScope2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNamedStatsWithScope(ctx context.Context, sel ast.SelectionSet, v *model.NamedStatsWithScope) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._NamedStatsWithScope(ctx, sel, v) +} + func (ec *executionContext) marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetrics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 43c4e37..fdd3bf3 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -51,6 +51,7 @@ type IntRangeOutput struct { type JobFilter struct { Tags []string `json:"tags,omitempty"` JobID *StringInput `json:"jobId,omitempty"` + JobIds []string `json:"jobIds,omitempty"` ArrayJobID *int `json:"arrayJobId,omitempty"` User *StringInput `json:"user,omitempty"` Project *StringInput `json:"project,omitempty"` @@ -96,14 +97,8 @@ type JobResultList struct { } type JobStats struct { - Name string `json:"name"` - Stats *schema.MetricStatistics `json:"stats"` -} - -type JobStatsWithScope struct { - Name string `json:"name"` - Scope schema.MetricScope `json:"scope"` - Stats []*ScopedStats `json:"stats"` + JobID int `json:"jobId"` + Stats []*NamedStats `json:"stats"` } type JobsStatistics struct { @@ -153,6 +148,17 @@ type MetricStatItem struct { type Mutation struct { } +type NamedStats struct { + Name string `json:"name"` + Data *schema.MetricStatistics `json:"data"` +} + +type NamedStatsWithScope struct { + Name string `json:"name"` + Scope schema.MetricScope `json:"scope"` + Stats []*ScopedStats `json:"stats"` +} + type NodeMetrics struct { Host string `json:"host"` SubCluster string `json:"subCluster"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 10e1b55..2920e0e 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -400,7 +400,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str } // JobStats is the resolver for the jobStats field. -func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) { +func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) { job, err := r.Query().Job(ctx, id) if err != nil { log.Warnf("Error while querying job %s for metadata", id) @@ -413,11 +413,11 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin return nil, err } - res := []*model.JobStats{} + res := []*model.NamedStats{} for name, md := range data { - res = append(res, &model.JobStats{ - Name: name, - Stats: &md, + res = append(res, &model.NamedStats{ + Name: name, + Data: &md, }) } @@ -425,7 +425,7 @@ func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []strin } // ScopedJobStats is the resolver for the scopedJobStats field. -func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) { +func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) { job, err := r.Query().Job(ctx, id) if err != nil { log.Warnf("Error while querying job %s for metadata", id) @@ -438,7 +438,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ return nil, err } - res := make([]*model.JobStatsWithScope, 0) + res := make([]*model.NamedStatsWithScope, 0) for name, scoped := range data { for scope, stats := range scoped { @@ -451,7 +451,7 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ }) } - res = append(res, &model.JobStatsWithScope{ + res = append(res, &model.NamedStatsWithScope{ Name: name, Scope: scope, Stats: mdlStats, @@ -462,12 +462,6 @@ func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics [ return res, nil } -// JobsFootprints is the resolver for the jobsFootprints field. -func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { - // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! - return r.jobsFootprints(ctx, filter, metrics) -} - // Jobs is the resolver for the jobs field. func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) { if page == nil { @@ -589,6 +583,52 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF return stats, nil } +// JobsMetricStats is the resolver for the jobsMetricStats field. +func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error) { + // No Paging, Fixed Order by StartTime ASC + order := &model.OrderByInput{ + Field: "startTime", + Type: "col", + Order: "ASC", + } + + jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order) + if err != nil { + log.Warn("Error while querying jobs for comparison") + return nil, err + } + + res := []*model.JobStats{} + for _, job := range jobs { + data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx) + if err != nil { + log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID) + continue + // return nil, err + } + + sres := []*model.NamedStats{} + for name, md := range data { + sres = append(sres, &model.NamedStats{ + Name: name, + Data: &md, + }) + } + + res = append(res, &model.JobStats{ + JobID: int(job.JobID), + Stats: sres, + }) + } + return res, err +} + +// JobsFootprints is the resolver for the jobsFootprints field. +func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { + // NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! + return r.jobsFootprints(ctx, filter, metrics) +} + // RooflineHeatmap is the resolver for the rooflineHeatmap field. func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) { return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY) diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 7faa8b8..718ccda 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -21,6 +21,7 @@ import { init } from "./generic/utils.js"; import Filters from "./generic/Filters.svelte"; import JobList from "./generic/JobList.svelte"; + import JobCompare from "./generic/JobCompare.svelte"; import TextFilter from "./generic/helper/TextFilter.svelte"; import Refresher from "./generic/helper/Refresher.svelte"; import Sorting from "./generic/select/SortSelection.svelte"; @@ -36,7 +37,9 @@ let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let jobList, - matchedJobs = null; + jobCompare, + matchedListJobs, + matchedCompareJobs = null; let sorting = { field: "startTime", type: "col", order: "DESC" }, isSortingOpen = false, isMetricsSelectionOpen = false; @@ -49,6 +52,7 @@ : !!ccconfig.plot_list_showFootprint; let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null; let presetProject = filterPresets?.project ? filterPresets.project : "" + let showCompare = false; // The filterPresets are handled by the Filters component, // so we need to wait for it to be ready before we can start a query. @@ -72,7 +76,7 @@ {/if} - + - + { selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null; - jobList.queryJobs(detail.filters); + if (showCompare) { + jobCompare.queryJobs(detail.filters); + } else { + jobList.queryJobs(detail.filters); + } }} /> - + filterComponent.updateFilters(detail)} /> - + { - jobList.refreshJobs() - jobList.refreshAllMetrics() + if (showCompare) { + jobCompare.refreshJobs() + jobCompare.refreshAllMetrics() + } else { + jobList.refreshJobs() + jobList.refreshAllMetrics() + } }} /> + + + - + - + {#if !showCompare} + + {:else} + + {/if} diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte new file mode 100644 index 0000000..9ba1bbf --- /dev/null +++ b/web/frontend/src/generic/JobCompare.svelte @@ -0,0 +1,156 @@ + + + + +{#if $compareData.fetching} + + + + + +{:else if $compareData.error} + + +

{$compareData.error.message}

+ +
+{:else} + {#each $compareData.data.jobsMetricStats as job (job.jobId)} + + {job.jobId} + {#each job.stats as stat (stat.name)} + {stat.name} + Min {stat.data.min} + Avg {stat.data.avg} + Max {stat.data.max} + {/each} + +
+ {:else} +
No jobs found
+ {/each} +{/if} \ No newline at end of file diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index 89b8fad..b31a496 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -35,7 +35,7 @@ } export let sorting = { field: "startTime", type: "col", order: "DESC" }; - export let matchedJobs = 0; + export let matchedListJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; export let showFootprint; @@ -141,7 +141,7 @@ } } - $: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : -1; + $: matchedListJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : -1; // Force refresh list with existing unchanged variables (== usually would not trigger reactivity) export function refreshJobs() { @@ -310,7 +310,7 @@ bind:page {itemsPerPage} itemText="Jobs" - totalItems={matchedJobs} + totalItems={matchedListJobs} on:update-paging={({ detail }) => { if (detail.itemsPerPage != itemsPerPage) { updateConfiguration(detail.itemsPerPage.toString(), detail.page); diff --git a/web/frontend/src/generic/plots/Polar.svelte b/web/frontend/src/generic/plots/Polar.svelte index 765667a..9ae693a 100644 --- a/web/frontend/src/generic/plots/Polar.svelte +++ b/web/frontend/src/generic/plots/Polar.svelte @@ -55,7 +55,7 @@ const getValues = (type) => labels.map(name => { // Peak is adapted and scaled for job shared state const peak = polarMetrics.find(m => m?.name == name)?.peak - const metric = polarData.find(m => m?.name == name)?.stats + const metric = polarData.find(m => m?.name == name)?.data const value = (peak && metric) ? (metric[type] / peak) : 0 return value <= 1. ? value : 1. }) diff --git a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte index fe6693b..4048b2b 100644 --- a/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte +++ b/web/frontend/src/job/jobsummary/JobFootprintPolar.svelte @@ -42,7 +42,7 @@ query ($dbid: ID!, $selectedMetrics: [String!]!) { jobStats(id: $dbid, metrics: $selectedMetrics) { name - stats { + data { min avg max From 1c84bcae35eebb91571c06d38f1c93a81564cf6c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 29 Apr 2025 18:40:44 +0200 Subject: [PATCH 428/443] add filterBuffer for seamless view switch --- web/frontend/src/Jobs.root.svelte | 6 +++++- web/frontend/src/generic/JobCompare.svelte | 3 ++- web/frontend/src/generic/JobList.svelte | 3 ++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 718ccda..57f0b5b 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -36,6 +36,7 @@ export let roles; let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the + let filterBuffer = []; let jobList, jobCompare, matchedListJobs, @@ -100,6 +101,7 @@ selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null; + filterBuffer = [...detail.filters] if (showCompare) { jobCompare.queryJobs(detail.filters); } else { @@ -131,7 +133,7 @@
@@ -146,12 +148,14 @@ bind:sorting bind:matchedListJobs bind:showFootprint + {filterBuffer} /> {:else} {/if} diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte index 9ba1bbf..17da18f 100644 --- a/web/frontend/src/generic/JobCompare.svelte +++ b/web/frontend/src/generic/JobCompare.svelte @@ -32,9 +32,10 @@ } export let matchedCompareJobs = 0; + export let filterBuffer = []; export let metrics = ccconfig.plot_list_selectedMetrics; - let filter = []; + let filter = [...filterBuffer]; const sorting = { field: "startTime", type: "col", order: "DESC" }; /* GQL */ diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index b31a496..03044f0 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -38,12 +38,13 @@ export let matchedListJobs = 0; export let metrics = ccconfig.plot_list_selectedMetrics; export let showFootprint; + export let filterBuffer = []; let usePaging = ccconfig.job_list_usePaging let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10; let page = 1; let paging = { itemsPerPage, page }; - let filter = []; + let filter = [...filterBuffer]; let lastFilter = []; let lastSorting = null; let triggerMetricRefresh = false; From 1d13d3dccf3389ec0925c704d63fc551d5841eb8 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 5 May 2025 11:26:39 +0200 Subject: [PATCH 429/443] add and integrate job comparison plot component --- web/frontend/src/generic/JobCompare.svelte | 52 +++- .../src/generic/plots/Comparogram.svelte | 281 ++++++++++++++++++ 2 files changed, 331 insertions(+), 2 deletions(-) create mode 100644 web/frontend/src/generic/plots/Comparogram.svelte diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte index 17da18f..345bfa8 100644 --- a/web/frontend/src/generic/JobCompare.svelte +++ b/web/frontend/src/generic/JobCompare.svelte @@ -19,12 +19,13 @@ queryStore, gql, getContextClient, - mutationStore, + // mutationStore, } from "@urql/svelte"; import { Row, Col, Card, Spinner } from "@sveltestrap/sveltestrap"; + import Comparogram from "./plots/Comparogram.svelte"; const ccconfig = getContext("cc-config"), - initialized = getContext("initialized"), + // initialized = getContext("initialized"), globalMetrics = getContext("globalMetrics"); const equalsCheck = (a, b) => { @@ -36,6 +37,8 @@ export let metrics = ccconfig.plot_list_selectedMetrics; let filter = [...filterBuffer]; + let comparePlotData = {}; + let jobIds = []; const sorting = { field: "startTime", type: "col", order: "DESC" }; /* GQL */ @@ -58,6 +61,8 @@ } `; + /* REACTIVES */ + $: compareData = queryStore({ client: client, query: compareQuery, @@ -65,6 +70,11 @@ }); $: matchedCompareJobs = $compareData.data != null ? $compareData.data.jobsMetricStats.length : -1; + $: if ($compareData.data != null) { + jobIds = []; + comparePlotData = {} + jobs2uplot($compareData.data.jobsMetricStats, metrics) + } /* FUNCTIONS */ // Force refresh list with existing unchanged variables (== usually would not trigger reactivity) @@ -96,6 +106,32 @@ } } + function jobs2uplot(jobs, metrics) { + // Prep + for (let m of metrics) { + // Get Unit + const rawUnit = globalMetrics.find((gm) => gm.name == m)?.unit + const metricUnit = (rawUnit?.prefix ? rawUnit.prefix : "") + (rawUnit?.base ? rawUnit.base : "") + // Init + comparePlotData[m] = {unit: metricUnit, data: [[],[],[],[]]} // data: [X, Y1, Y2, Y3] + } + + // Iterate jobs if exists + if (jobs) { + let plotIndex = 0 + jobs.forEach((j) => { + jobIds.push(j.jobId) + for (let s of j.stats) { + comparePlotData[s.name].data[0].push(plotIndex) + comparePlotData[s.name].data[1].push(s.data.min) + comparePlotData[s.name].data[2].push(s.data.avg) + comparePlotData[s.name].data[3].push(s.data.max) + } + plotIndex++ + }) + } +} + // Adapt for Persisting Job Selections in DB later down the line // const updateConfigurationMutation = ({ name, value }) => { // return mutationStore({ @@ -140,6 +176,18 @@ {:else} + {#each metrics as m} + + {/each} +

{#each $compareData.data.jobsMetricStats as job (job.jobId)} {job.jobId} diff --git a/web/frontend/src/generic/plots/Comparogram.svelte b/web/frontend/src/generic/plots/Comparogram.svelte new file mode 100644 index 0000000..92db086 --- /dev/null +++ b/web/frontend/src/generic/plots/Comparogram.svelte @@ -0,0 +1,281 @@ + + + + + +{#if data && data[0].length > 0} +
+{:else} + Cannot render plot: No series data returned for {metric} +{/if} From fd52fdd35bd87a876be3cae79431c89e3523ee24 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 5 May 2025 16:41:05 +0200 Subject: [PATCH 430/443] add job starttime to legend --- api/schema.graphqls | 1 + internal/graph/generated/generated.go | 64 ++++++++++++++++++- internal/graph/model/models_gen.go | 5 +- internal/graph/schema.resolvers.go | 5 +- web/frontend/src/generic/JobCompare.svelte | 11 +++- .../src/generic/plots/Comparogram.svelte | 3 +- 6 files changed, 80 insertions(+), 9 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 1942454..b911d07 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -172,6 +172,7 @@ type ScopedStats { type JobStats { jobId: Int! + startTime: Int! stats: [NamedStats!]! } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 14d1b57..0671d48 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -171,8 +171,9 @@ type ComplexityRoot struct { } JobStats struct { - JobID func(childComplexity int) int - Stats func(childComplexity int) int + JobID func(childComplexity int) int + StartTime func(childComplexity int) int + Stats func(childComplexity int) int } JobsStatistics struct { @@ -947,6 +948,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobStats.JobID(childComplexity), true + case "JobStats.startTime": + if e.complexity.JobStats.StartTime == nil { + break + } + + return e.complexity.JobStats.StartTime(childComplexity), true + case "JobStats.stats": if e.complexity.JobStats.Stats == nil { break @@ -2281,6 +2289,7 @@ type ScopedStats { type JobStats { jobId: Int! + startTime: Int! stats: [NamedStats!]! } @@ -7400,6 +7409,50 @@ func (ec *executionContext) fieldContext_JobStats_jobId(_ context.Context, field return fc, nil } +func (ec *executionContext) _JobStats_startTime(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_startTime(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.StartTime, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_startTime(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobStats_stats(ctx, field) if err != nil { @@ -11197,6 +11250,8 @@ func (ec *executionContext) fieldContext_Query_jobsMetricStats(ctx context.Conte switch field.Name { case "jobId": return ec.fieldContext_JobStats_jobId(ctx, field) + case "startTime": + return ec.fieldContext_JobStats_startTime(ctx, field) case "stats": return ec.fieldContext_JobStats_stats(ctx, field) } @@ -17527,6 +17582,11 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, if out.Values[i] == graphql.Null { out.Invalids++ } + case "startTime": + out.Values[i] = ec._JobStats_startTime(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "stats": out.Values[i] = ec._JobStats_stats(ctx, field, obj) if out.Values[i] == graphql.Null { diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index fdd3bf3..4cadf22 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -97,8 +97,9 @@ type JobResultList struct { } type JobStats struct { - JobID int `json:"jobId"` - Stats []*NamedStats `json:"stats"` + JobID int `json:"jobId"` + StartTime int `json:"startTime"` + Stats []*NamedStats `json:"stats"` } type JobsStatistics struct { diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 2920e0e..a93a67e 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -616,8 +616,9 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job } res = append(res, &model.JobStats{ - JobID: int(job.JobID), - Stats: sres, + JobID: int(job.JobID), + StartTime: int(job.StartTime.Unix()), + Stats: sres, }) } return res, err diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte index 345bfa8..9c1ff94 100644 --- a/web/frontend/src/generic/JobCompare.svelte +++ b/web/frontend/src/generic/JobCompare.svelte @@ -39,6 +39,7 @@ let filter = [...filterBuffer]; let comparePlotData = {}; let jobIds = []; + let jobStarts = []; const sorting = { field: "startTime", type: "col", order: "DESC" }; /* GQL */ @@ -49,6 +50,7 @@ query ($filter: [JobFilter!]!, $metrics: [String!]!) { jobsMetricStats(filter: $filter, metrics: $metrics) { jobId + startTime stats { name data { @@ -72,6 +74,7 @@ $: matchedCompareJobs = $compareData.data != null ? $compareData.data.jobsMetricStats.length : -1; $: if ($compareData.data != null) { jobIds = []; + jobStarts = []; comparePlotData = {} jobs2uplot($compareData.data.jobsMetricStats, metrics) } @@ -121,7 +124,9 @@ let plotIndex = 0 jobs.forEach((j) => { jobIds.push(j.jobId) + jobStarts.push(j.startTime) for (let s of j.stats) { + // comparePlotData[s.name].data[0].push(j.startTime) comparePlotData[s.name].data[0].push(plotIndex) comparePlotData[s.name].data[1].push(s.data.min) comparePlotData[s.name].data[2].push(s.data.avg) @@ -181,6 +186,7 @@ title={'Compare '+ m} xlabel="JobIds" xticks={jobIds} + xtimes={jobStarts} ylabel={m} metric={m} yunit={comparePlotData[m].unit} @@ -188,9 +194,10 @@ /> {/each}

- {#each $compareData.data.jobsMetricStats as job (job.jobId)} + {#each $compareData.data.jobsMetricStats as job, jindex (job.jobId)} - {job.jobId} + {jindex}: {job.jobId} + {new Date(job.startTime * 1000)} {#each job.stats as stat (stat.name)} {stat.name} Min {stat.data.min} diff --git a/web/frontend/src/generic/plots/Comparogram.svelte b/web/frontend/src/generic/plots/Comparogram.svelte index 92db086..386434d 100644 --- a/web/frontend/src/generic/plots/Comparogram.svelte +++ b/web/frontend/src/generic/plots/Comparogram.svelte @@ -24,6 +24,7 @@ export let data; export let xlabel; export let xticks; + export let xtimes; export let ylabel; export let yunit; export let title; @@ -120,7 +121,7 @@ { label: "JobID", value: (u, ts, sidx, didx) => { - return xticks[didx]; + return xticks[didx] + ' (' + new Date(xtimes[didx] * 1000).toLocaleString() + ')'; }, } ]; From 33ecfe88ef52102f5796c84c30e8cefa5cc38fec Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 6 May 2025 09:58:28 +0200 Subject: [PATCH 431/443] add job duration, add starttime and duration to legend --- api/schema.graphqls | 1 + internal/graph/generated/generated.go | 60 ++++++++++++ internal/graph/model/models_gen.go | 1 + internal/graph/schema.resolvers.go | 1 + web/frontend/src/generic/JobCompare.svelte | 16 ++-- .../src/generic/plots/Comparogram.svelte | 94 +++++++++++++------ 6 files changed, 137 insertions(+), 36 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index b911d07..ca8ab95 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -173,6 +173,7 @@ type ScopedStats { type JobStats { jobId: Int! startTime: Int! + duration: Int! stats: [NamedStats!]! } diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 0671d48..1eaf841 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -171,6 +171,7 @@ type ComplexityRoot struct { } JobStats struct { + Duration func(childComplexity int) int JobID func(childComplexity int) int StartTime func(childComplexity int) int Stats func(childComplexity int) int @@ -941,6 +942,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true + case "JobStats.duration": + if e.complexity.JobStats.Duration == nil { + break + } + + return e.complexity.JobStats.Duration(childComplexity), true + case "JobStats.jobId": if e.complexity.JobStats.JobID == nil { break @@ -2290,6 +2298,7 @@ type ScopedStats { type JobStats { jobId: Int! startTime: Int! + duration: Int! stats: [NamedStats!]! } @@ -7453,6 +7462,50 @@ func (ec *executionContext) fieldContext_JobStats_startTime(_ context.Context, f return fc, nil } +func (ec *executionContext) _JobStats_duration(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobStats_duration(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Duration, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobStats_duration(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobStats", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobStats_stats(ctx context.Context, field graphql.CollectedField, obj *model.JobStats) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobStats_stats(ctx, field) if err != nil { @@ -11252,6 +11305,8 @@ func (ec *executionContext) fieldContext_Query_jobsMetricStats(ctx context.Conte return ec.fieldContext_JobStats_jobId(ctx, field) case "startTime": return ec.fieldContext_JobStats_startTime(ctx, field) + case "duration": + return ec.fieldContext_JobStats_duration(ctx, field) case "stats": return ec.fieldContext_JobStats_stats(ctx, field) } @@ -17587,6 +17642,11 @@ func (ec *executionContext) _JobStats(ctx context.Context, sel ast.SelectionSet, if out.Values[i] == graphql.Null { out.Invalids++ } + case "duration": + out.Values[i] = ec._JobStats_duration(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "stats": out.Values[i] = ec._JobStats_stats(ctx, field, obj) if out.Values[i] == graphql.Null { diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 4cadf22..d4486fc 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -99,6 +99,7 @@ type JobResultList struct { type JobStats struct { JobID int `json:"jobId"` StartTime int `json:"startTime"` + Duration int `json:"duration"` Stats []*NamedStats `json:"stats"` } diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index a93a67e..771565b 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -618,6 +618,7 @@ func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.Job res = append(res, &model.JobStats{ JobID: int(job.JobID), StartTime: int(job.StartTime.Unix()), + Duration: int(job.Duration), Stats: sres, }) } diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte index 9c1ff94..cd37a07 100644 --- a/web/frontend/src/generic/JobCompare.svelte +++ b/web/frontend/src/generic/JobCompare.svelte @@ -39,7 +39,6 @@ let filter = [...filterBuffer]; let comparePlotData = {}; let jobIds = []; - let jobStarts = []; const sorting = { field: "startTime", type: "col", order: "DESC" }; /* GQL */ @@ -51,6 +50,7 @@ jobsMetricStats(filter: $filter, metrics: $metrics) { jobId startTime + duration stats { name data { @@ -74,7 +74,6 @@ $: matchedCompareJobs = $compareData.data != null ? $compareData.data.jobsMetricStats.length : -1; $: if ($compareData.data != null) { jobIds = []; - jobStarts = []; comparePlotData = {} jobs2uplot($compareData.data.jobsMetricStats, metrics) } @@ -116,7 +115,7 @@ const rawUnit = globalMetrics.find((gm) => gm.name == m)?.unit const metricUnit = (rawUnit?.prefix ? rawUnit.prefix : "") + (rawUnit?.base ? rawUnit.base : "") // Init - comparePlotData[m] = {unit: metricUnit, data: [[],[],[],[]]} // data: [X, Y1, Y2, Y3] + comparePlotData[m] = {unit: metricUnit, data: [[],[],[],[],[],[]]} // data: [X, XST, XRT, YMIN, YAVG, YMAX] } // Iterate jobs if exists @@ -124,13 +123,13 @@ let plotIndex = 0 jobs.forEach((j) => { jobIds.push(j.jobId) - jobStarts.push(j.startTime) for (let s of j.stats) { - // comparePlotData[s.name].data[0].push(j.startTime) comparePlotData[s.name].data[0].push(plotIndex) - comparePlotData[s.name].data[1].push(s.data.min) - comparePlotData[s.name].data[2].push(s.data.avg) - comparePlotData[s.name].data[3].push(s.data.max) + comparePlotData[s.name].data[1].push(j.startTime) + comparePlotData[s.name].data[2].push(j.duration) + comparePlotData[s.name].data[3].push(s.data.min) + comparePlotData[s.name].data[4].push(s.data.avg) + comparePlotData[s.name].data[5].push(s.data.max) } plotIndex++ }) @@ -186,7 +185,6 @@ title={'Compare '+ m} xlabel="JobIds" xticks={jobIds} - xtimes={jobStarts} ylabel={m} metric={m} yunit={comparePlotData[m].unit} diff --git a/web/frontend/src/generic/plots/Comparogram.svelte b/web/frontend/src/generic/plots/Comparogram.svelte index 386434d..da31b5c 100644 --- a/web/frontend/src/generic/plots/Comparogram.svelte +++ b/web/frontend/src/generic/plots/Comparogram.svelte @@ -24,15 +24,15 @@ export let data; export let xlabel; export let xticks; - export let xtimes; export let ylabel; export let yunit; export let title; // export let cluster = ""; // export let subCluster = ""; - // $: console.log('LABEL:', metric, yunit) - // $: console.log('DATA:', data) + $: console.log('LABEL:', metric, yunit) + $: console.log('DATA:', data) + $: console.log('XTICKS:', xticks) const metricConfig = null // DEBUG FILLER // const metricConfig = getContext("getMetricConfig")(cluster, subCluster, metric); // Args woher @@ -40,6 +40,22 @@ const lineWidth = clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio; const cbmode = clusterCockpitConfig?.plot_general_colorblindMode || false; + // Format Seconds to hh:mm + function formatTime(t) { + if (t !== null) { + if (isNaN(t)) { + return t; + } else { + const tAbs = Math.abs(t); + const h = Math.floor(tAbs / 3600); + const m = Math.floor((tAbs % 3600) / 60); + if (h == 0) return `${m}m`; + else if (m == 0) return `${h}h`; + else return `${h}:${m}h`; + } + } + } + // UPLOT PLUGIN // converts the legend into a simple tooltip function legendAsTooltipPlugin({ className, @@ -120,34 +136,48 @@ const plotSeries = [ { label: "JobID", + scale: "x", value: (u, ts, sidx, didx) => { - return xticks[didx] + ' (' + new Date(xtimes[didx] * 1000).toLocaleString() + ')'; + return xticks[didx]; }, + }, + { + label: "Starttime", + scale: "xst", + value: (u, ts, sidx, didx) => { + return new Date(ts * 1000).toLocaleString(); + }, + }, + { + label: "Duration", + scale: "xrt", + value: (u, ts, sidx, didx) => { + return formatTime(ts); + }, + }, + { + label: "Min", + scale: "y", + width: lineWidth, + stroke: cbmode ? "rgb(0,255,0)" : "red", + }, + { + label: "Avg", + scale: "y", + width: lineWidth, + stroke: "black", + }, + { + label: "Max", + scale: "y", + width: lineWidth, + stroke: cbmode ? "rgb(0,0,255)" : "green", } ]; - plotSeries.push({ - label: "min", - scale: "y", - width: lineWidth, - stroke: cbmode ? "rgb(0,255,0)" : "red", - }); - plotSeries.push({ - label: "avg", - scale: "y", - width: lineWidth, - stroke: "black", - }); - plotSeries.push({ - label: "max", - scale: "y", - width: lineWidth, - stroke: cbmode ? "rgb(0,0,255)" : "green", - }); - const plotBands = [ - { series: [3, 2], fill: cbmode ? "rgba(0,0,255,0.1)" : "rgba(0,255,0,0.1)" }, - { series: [2, 1], fill: cbmode ? "rgba(0,255,0,0.1)" : "rgba(255,0,0,0.1)" }, + { series: [5, 4], fill: cbmode ? "rgba(0,0,255,0.1)" : "rgba(0,255,0,0.1)" }, + { series: [4, 3], fill: cbmode ? "rgba(0,255,0,0.1)" : "rgba(255,0,0,0.1)" }, ]; const opts = { @@ -167,6 +197,14 @@ return splits.map(s => xticks[s]); } }, + { + scale: "xst", + show: false, + }, + { + scale: "xrt", + show: false, + }, { scale: "y", grid: { show: true }, @@ -180,8 +218,8 @@ draw: [ (u) => { // Draw plot type label: - let textl = "Jobs min/avg/max"; - let textr = ""; + let textl = "Metric Min/Avg/Max in Duration"; + let textr = "Earlier <- StartTime -> Later"; u.ctx.save(); u.ctx.textAlign = "start"; // 'end' u.ctx.fillStyle = "black"; @@ -216,6 +254,8 @@ }, scales: { x: { time: false }, + xst: { time: false }, + xrt: { time: false }, y: maxY ? { min: 0, max: (maxY * 1.1) } : {auto: true}, // Add some space to upper render limit }, legend: { From d3d752f90cae1bcd2bd1470c6e19d6cc07d5b426 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 6 May 2025 10:46:30 +0200 Subject: [PATCH 432/443] finalize compareplot prototype, move formattime to units.js --- web/frontend/src/generic/JobCompare.svelte | 4 ++- .../src/generic/plots/Comparogram.svelte | 35 +++++++------------ .../src/generic/plots/Histogram.svelte | 17 +-------- .../src/generic/plots/MetricPlot.svelte | 18 +--------- web/frontend/src/generic/units.js | 20 +++++++++++ 5 files changed, 37 insertions(+), 57 deletions(-) diff --git a/web/frontend/src/generic/JobCompare.svelte b/web/frontend/src/generic/JobCompare.svelte index cd37a07..ba40bcc 100644 --- a/web/frontend/src/generic/JobCompare.svelte +++ b/web/frontend/src/generic/JobCompare.svelte @@ -22,6 +22,7 @@ // mutationStore, } from "@urql/svelte"; import { Row, Col, Card, Spinner } from "@sveltestrap/sveltestrap"; + import { formatTime } from "./units.js"; import Comparogram from "./plots/Comparogram.svelte"; const ccconfig = getContext("cc-config"), @@ -195,7 +196,8 @@ {#each $compareData.data.jobsMetricStats as job, jindex (job.jobId)} {jindex}: {job.jobId} - {new Date(job.startTime * 1000)} + {new Date(job.startTime * 1000).toISOString()} + {formatTime(job.duration)} {#each job.stats as stat (stat.name)} {stat.name} Min {stat.data.min} diff --git a/web/frontend/src/generic/plots/Comparogram.svelte b/web/frontend/src/generic/plots/Comparogram.svelte index da31b5c..b4d8d77 100644 --- a/web/frontend/src/generic/plots/Comparogram.svelte +++ b/web/frontend/src/generic/plots/Comparogram.svelte @@ -14,7 +14,7 @@ @@ -80,7 +85,7 @@ - + + + + @@ -148,6 +162,7 @@ bind:sorting bind:matchedListJobs bind:showFootprint + bind:selectedJobs {filterBuffer} /> {:else} @@ -161,7 +176,7 @@ - + Math.floor(Date.parse(rfc3339) / 1000); - let opts = []; if (filters.cluster) opts.push(`cluster=${filters.cluster}`); if (filters.node) opts.push(`node=${filters.node}`); @@ -196,6 +198,11 @@ if (filters.startTime.range) { opts.push(`startTime=${filters.startTime.range}`) } + if (filters.dbId.length != 0) { + for (let dbi of filters.dbId) { + opts.push(`dbId=${dbi}`); + } + } if (filters.jobId.length != 0) if (filters.jobIdMatch != "in") { opts.push(`jobId=${filters.jobId}`); diff --git a/web/frontend/src/generic/JobList.svelte b/web/frontend/src/generic/JobList.svelte index 03044f0..e6ae6f6 100644 --- a/web/frontend/src/generic/JobList.svelte +++ b/web/frontend/src/generic/JobList.svelte @@ -39,6 +39,7 @@ export let metrics = ccconfig.plot_list_selectedMetrics; export let showFootprint; export let filterBuffer = []; + export let selectedJobs = []; let usePaging = ccconfig.job_list_usePaging let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10; @@ -285,7 +286,10 @@ {:else} {#each jobs as job (job)} - + selectedJobs = [...selectedJobs, detail]} + on:unselect-job={({detail}) => selectedJobs = selectedJobs.filter(item => item !== detail)} + /> {:else} No jobs found diff --git a/web/frontend/src/generic/joblist/JobInfo.svelte b/web/frontend/src/generic/joblist/JobInfo.svelte index 8917653..f5cb066 100644 --- a/web/frontend/src/generic/joblist/JobInfo.svelte +++ b/web/frontend/src/generic/joblist/JobInfo.svelte @@ -18,6 +18,8 @@ export let username = null; export let authlevel= null; export let roles = null; + export let isSelected = null; + export let showSelect = false; function formatDuration(duration) { const hours = Math.floor(duration / 3600); @@ -76,18 +78,39 @@ {job.jobId} ({job.cluster}) - + + { 'Add or Remove Job to/from Comparison Selection' } + {/if} - - - { displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' } - + + + { displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' } + + {#if job.metaData?.jobName} {#if job.metaData?.jobName.length <= 25} diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 82cf2ed..7d94943 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -12,7 +12,7 @@