From 0afaea95139480136135b2a3c47cd22256857e20 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 8 Aug 2024 12:28:36 +0200 Subject: [PATCH 01/32] initial commit with example event dispatch --- web/frontend/src/Job.root.svelte | 8 +++++++- web/frontend/src/Node.root.svelte | 2 +- web/frontend/src/job/Metric.svelte | 9 +++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index f991e4f..31ca6e7 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -75,7 +75,7 @@ duration, numNodes, numHWThreads, numAcc, SMT, exclusive, partition, subCluster, arrayJobId, monitoringStatus, state, walltime, - tags { id, type, name, scope }, + tags { id, type, name }, resources { hostname, hwthreads, accelerators }, metaData, userData { name, email }, @@ -229,6 +229,11 @@ $initq.data.job.subCluster, ), })); + + + const loadRes = ({ detail }) => { + console.log(">>> UPPER RES REQUEST", detail) + } @@ -358,6 +363,7 @@ gm.name == item.metric)?.unit} diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index ad6983b..2d58540 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -27,7 +27,7 @@ import { init, checkMetricDisabled, - } from "./utils.js"; + } from "./generic/utils.js"; import PlotTable from "./generic/PlotTable.svelte"; import MetricPlot from "./generic/plots/MetricPlot.svelte"; import TimeSelection from "./generic/select/TimeSelection.svelte"; diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 88c6da8..8184e50 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -40,11 +40,15 @@ fetching = false, error = null; let selectedScope = minScope(scopes); + let selectedResolution = 60 + $: dispatch("new-res", selectedResolution) let statsPattern = /(.*)-stat$/ let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) let selectedScopeIndex + const resolutions = [60, 240, 600] + $: availableScopes = scopes; $: patternMatches = statsPattern.exec(selectedScope) $: if (!patternMatches) { @@ -83,6 +87,11 @@ {/each} {/if} + {#key series} {#if fetching == true} From b1fd07cd30b61ab00808f2b47fdb95fa107f9880 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 16 Aug 2024 14:50:31 +0200 Subject: [PATCH 02/32] add single update gql queries to metric wrapper --- api/schema.graphqls | 2 +- internal/api/rest.go | 2 +- internal/graph/generated/generated.go | 19 ++++-- internal/graph/schema.resolvers.go | 8 ++- web/frontend/src/Job.root.svelte | 23 +++---- web/frontend/src/job/Metric.svelte | 98 +++++++++++++++++++++++---- 6 files changed, 114 insertions(+), 38 deletions(-) diff --git a/api/schema.graphqls b/api/schema.graphqls index 568c15d..8c27504 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -223,7 +223,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/internal/api/rest.go b/internal/api/rest.go index c8f4e7a..7946ab7 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1110,7 +1110,7 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) { } resolver := graph.GetResolverInstance() - data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes) + data, err := resolver.Query().JobMetrics(r.Context(), id, metrics, scopes, nil) if err != nil { json.NewEncoder(rw).Encode(Respone{ Error: &struct { diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 9ca0a60..23d31e7 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -246,7 +246,7 @@ type ComplexityRoot struct { Clusters func(childComplexity int) int GlobalMetrics func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int + JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope, resolution *int) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int @@ -368,7 +368,7 @@ type QueryResolver interface { User(ctx context.Context, username string) (*model.User, error) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) Job(ctx context.Context, id string) (*schema.Job, error) - JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) + JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) @@ -1290,7 +1290,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true + return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope), args["resolution"].(*int)), true case "Query.jobs": if e.complexity.Query.Jobs == nil { @@ -2059,7 +2059,7 @@ type Query { allocatedNodes(cluster: String!): [Count!]! job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -2370,6 +2370,15 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw } } args["scopes"] = arg2 + var arg3 *int + if tmp, ok := rawArgs["resolution"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("resolution")) + arg3, err = ec.unmarshalOInt2áš–int(ctx, tmp) + if err != nil { + return nil, err + } + } + args["resolution"] = arg3 return args, nil } @@ -8499,7 +8508,7 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope)) + return ec.resolvers.Query().JobMetrics(rctx, fc.Args["id"].(string), fc.Args["metrics"].([]string), fc.Args["scopes"].([]schema.MetricScope), fc.Args["resolution"].(*int)) }) if err != nil { ec.Error(ctx, err) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index f36e25a..9e7bd3d 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -224,13 +224,19 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) } // JobMetrics is the resolver for the jobMetrics field. -func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) { +func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) { + defaultRes := 600 + if resolution == nil { + resolution = &defaultRes + } + job, err := r.Query().Job(ctx, id) if err != nil { log.Warn("Error while querying job for metrics") return nil, err } + log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) data, err := metricdata.LoadData(job, metrics, scopes, ctx) if err != nil { log.Warn("Error while loading job data") diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 31ca6e7..d183920 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -122,14 +122,14 @@ variables: { dbid, selectedMetrics, selectedScopes }, }); - function loadAllScopes() { - selectedScopes = [...selectedScopes, "socket", "core"] - jobMetrics = queryStore({ - client: client, - query: query, - variables: { dbid, selectedMetrics, selectedScopes}, - }); - } + // function loadAllScopes() { + // selectedScopes = [...selectedScopes, "socket", "core"] + // jobMetrics = queryStore({ + // client: client, + // query: query, + // variables: { dbid, selectedMetrics, selectedScopes}, + // }); + // } // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { @@ -229,11 +229,6 @@ $initq.data.job.subCluster, ), })); - - - const loadRes = ({ detail }) => { - console.log(">>> UPPER RES REQUEST", detail) - } @@ -362,8 +357,6 @@ {#if item.data} gm.name == item.metric)?.unit} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 8184e50..d57fcd6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -13,7 +13,12 @@ --> @@ -69,13 +132,13 @@ {metricName} ({unit}) @@ -87,14 +150,19 @@ {/each} {/if} - { + scopes = ["node"] + selectedScope = "node" + selectedScopes = [...scopes] + loadUpdate + }}> {#each resolutions as res} {/each} {#key series} - {#if fetching == true} + {#if $metricData?.fetching == true} {:else if error != null} {error.message} From b70de5a4be12f3d7bcc18e70700c9940b759d104 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 16 Aug 2024 16:35:17 +0200 Subject: [PATCH 03/32] Handle single update data --- web/frontend/src/job/Metric.svelte | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index d57fcd6..bfa3adc 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -25,7 +25,7 @@ Spinner, Card, } from "@sveltestrap/sveltestrap"; - import { minScope } from "../generic/utils"; + import { minScope } from "../generic/utils.js"; import Timeseries from "../generic/plots/MetricPlot.svelte"; export let job; @@ -87,20 +87,13 @@ const selectedMetrics = [metricName] function loadUpdate() { - - // useQuery('repoData', () => - // fetch('https://api.github.com/repos/SvelteStack/svelte-query').then(res => - // res.json() - // ) - + console.log('S> OLD DATA:', rawData) metricData = queryStore({ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, }); - console.log('S> OLD DATA:', rawData) - // rawData = {...$metricData?.data?.singleUpdate} }; $: if (selectedScope == "load-all") { @@ -121,10 +114,11 @@ (series) => selectedHost == null || series.hostname == selectedHost, ); - $: if ($metricData && !$metricData.fetching) console.log('S> NEW DATA:', rawData) - // $: console.log('Pattern', patternMatches) + $: if ($metricData && !$metricData.fetching) { + rawData = $metricData.data.singleUpdate.map((x) => x.metric) + console.log('S> NEW DATA:', rawData) + } $: console.log('SelectedScope', selectedScope) - $: console.log('ScopeIndex', selectedScopeIndex) @@ -154,7 +148,7 @@ scopes = ["node"] selectedScope = "node" selectedScopes = [...scopes] - loadUpdate + loadUpdate() }}> {#each resolutions as res} From e4f8022b7a16687ee6d87a78c3ceab95f1403a2b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 11:39:19 +0200 Subject: [PATCH 04/32] change to one reactive metric data load on two variables --- web/frontend/src/job/Metric.svelte | 104 ++++++++++++++++++----------- 1 file changed, 65 insertions(+), 39 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index bfa3adc..f47a3c7 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -41,14 +41,16 @@ plot, error = null; let selectedScope = minScope(scopes); - let selectedResolution = 600 - let statsPattern = /(.*)-stat$/ - let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) - let selectedScopeIndex + let selectedResolution; + let pendingResolution = 600; + let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); + const statsPattern = /(.*)-stat$/; + let patternMatches = false; + let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); // const dispatch = createEventDispatcher(); - const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : "") - const resolutions = [600, 240, 60] + const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); + const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); const subQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!, $selectedResolution: Int) { @@ -86,39 +88,68 @@ const dbid = job.id; const selectedMetrics = [metricName] - function loadUpdate() { - console.log('S> OLD DATA:', rawData) - metricData = queryStore({ - client: client, - query: subQuery, - variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - }); - - }; - - $: if (selectedScope == "load-all") { - scopes = [...scopes, "socket", "core"] - selectedScope = nativeScope - selectedScopes = [...scopes] - loadUpdate() - }; - - $: patternMatches = statsPattern.exec(selectedScope) - $: if (!patternMatches) { - selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + $: if (selectedScope == "load-all" || pendingResolution) { + + if (selectedScope == "load-all") { + console.log('Triggered load-all') + selectedScopes = [...scopes, "socket", "core"] } else { - selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); + console.log("Triggered scope switch:", selectedScope, pendingResolution) } + + // What if accelerator scope / native core scopes? + if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { + selectedScope = String("node") + selectedScopes = ["node"] + console.log("New Resolution: Reset to node scope") + } else { + console.log("New Resolution: No change in Res or just node scope") + } + + if (!selectedResolution) { + selectedResolution = Number(pendingResolution) + } else { + selectedResolution = Number(pendingResolution) + + metricData = queryStore({ + client: client, + query: subQuery, + variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, + // requestPolicy: "network-only", + }); + + if ($metricData && !$metricData.fetching) { + console.log('Trigger Data Handling') + + rawData = $metricData.data.singleUpdate.map((x) => x.metric) + scopes = $metricData.data.singleUpdate.map((x) => x.scope) + statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) + + // Handle Selected Scope on load-all + if (selectedScope == "load-all") { + selectedScope = minScope(scopes) + console.log('Set New SelectedScope after Load-All', selectedScope, scopes) + } else { + console.log('Set New SelectedScope', selectedScope) + } + + patternMatches = statsPattern.exec(selectedScope) + if (!patternMatches) { + selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); + console.log("Selected Index # from Array", selectedScopeIndex, scopes) + } else { + selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); + console.log("Selected Stats Index # from Array", selectedScopeIndex, scopes) + } + } + } + } + $: data = rawData[selectedScopeIndex]; + $: series = data?.series.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); - - $: if ($metricData && !$metricData.fetching) { - rawData = $metricData.data.singleUpdate.map((x) => x.metric) - console.log('S> NEW DATA:', rawData) - } - $: console.log('SelectedScope', selectedScope) @@ -144,12 +175,7 @@ {/each} {/if} - {#each resolutions as res} {/each} From 613e128cab900e4c4d9e2ab9338aee010c0adbca Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 11:51:38 +0200 Subject: [PATCH 05/32] cleanup dev logging --- web/frontend/src/job/Metric.svelte | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index f47a3c7..eb4c90b 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -88,25 +88,20 @@ const dbid = job.id; const selectedMetrics = [metricName] - $: if (selectedScope == "load-all" || pendingResolution) { + $: if (selectedScope || pendingResolution) { if (selectedScope == "load-all") { - console.log('Triggered load-all') selectedScopes = [...scopes, "socket", "core"] - } else { - console.log("Triggered scope switch:", selectedScope, pendingResolution) } // What if accelerator scope / native core scopes? if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { selectedScope = String("node") selectedScopes = ["node"] - console.log("New Resolution: Reset to node scope") - } else { - console.log("New Resolution: No change in Res or just node scope") } if (!selectedResolution) { + // Skips reactive data load on init selectedResolution = Number(pendingResolution) } else { selectedResolution = Number(pendingResolution) @@ -115,31 +110,25 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - // requestPolicy: "network-only", }); if ($metricData && !$metricData.fetching) { - console.log('Trigger Data Handling') rawData = $metricData.data.singleUpdate.map((x) => x.metric) scopes = $metricData.data.singleUpdate.map((x) => x.scope) statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) - // Handle Selected Scope on load-all + // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) - console.log('Set New SelectedScope after Load-All', selectedScope, scopes) - } else { - console.log('Set New SelectedScope', selectedScope) } patternMatches = statsPattern.exec(selectedScope) + if (!patternMatches) { selectedScopeIndex = scopes.findIndex((s) => s == selectedScope); - console.log("Selected Index # from Array", selectedScopeIndex, scopes) } else { selectedScopeIndex = scopes.findIndex((s) => s == patternMatches[1]); - console.log("Selected Stats Index # from Array", selectedScopeIndex, scopes) } } } From 599a36466a064102ba461195c4c30fdc69e019d8 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 14:52:13 +0200 Subject: [PATCH 06/32] fix new data reactivity for accelerators --- web/frontend/src/job/Metric.svelte | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index eb4c90b..5c5a87a 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -18,7 +18,6 @@ gql, getContextClient } from "@urql/svelte"; - // import { createEventDispatcher } from "svelte"; import { InputGroup, InputGroupText, @@ -44,11 +43,10 @@ let selectedResolution; let pendingResolution = 600; let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); - const statsPattern = /(.*)-stat$/; let patternMatches = false; let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); - // const dispatch = createEventDispatcher(); + const statsPattern = /(.*)-stat$/; const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); @@ -89,21 +87,23 @@ const selectedMetrics = [metricName] $: if (selectedScope || pendingResolution) { - - if (selectedScope == "load-all") { - selectedScopes = [...scopes, "socket", "core"] - } - - // What if accelerator scope / native core scopes? - if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { - selectedScope = String("node") - selectedScopes = ["node"] - } - if (!selectedResolution) { // Skips reactive data load on init selectedResolution = Number(pendingResolution) + } else { + + if (selectedScope == "load-all") { + selectedScopes = [...scopes, "socket", "core", "accelerator"] + } + + if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { + selectedScope = String("node") + selectedScopes = ["node"] + // Instead of adding acc to load-all: always add by default if native is acc + // selectedScopes = nativeScope == "accelerator" ? ["node", "accelerator"] : ["node"] + } + selectedResolution = Number(pendingResolution) metricData = queryStore({ From e74e506ffe6148076e6aa70621436e0712b71a5c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 20 Aug 2024 16:41:35 +0200 Subject: [PATCH 07/32] cleanup outdated code --- web/frontend/src/Job.root.svelte | 9 --------- 1 file changed, 9 deletions(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index d183920..5c1d004 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -122,15 +122,6 @@ variables: { dbid, selectedMetrics, selectedScopes }, }); - // function loadAllScopes() { - // selectedScopes = [...selectedScopes, "socket", "core"] - // jobMetrics = queryStore({ - // client: client, - // query: query, - // variables: { dbid, selectedMetrics, selectedScopes}, - // }); - // } - // Handle Job Query on Init -> is not executed anymore getContext("on-init")(() => { let job = $initq.data.job; From ceb3a095d80914c0af9793afc1b52b40025499f8 Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Thu, 22 Aug 2024 14:29:51 +0200 Subject: [PATCH 08/32] Sampling Feature for archived and fresh data --- internal/api/api_test.go | 5 +- internal/api/rest.go | 18 ++- internal/graph/schema.resolvers.go | 2 +- internal/graph/util.go | 10 +- internal/metricdata/cc-metric-store.go | 175 ++++++++++++++----------- internal/metricdata/influxdb-v2.go | 3 +- internal/metricdata/metricdata.go | 39 +++++- internal/metricdata/prometheus.go | 3 +- internal/metricdata/utils.go | 45 ++++++- pkg/archive/json.go | 2 +- pkg/resampler/resampler.go | 113 ++++++++++++++++ pkg/resampler/util.go | 25 ++++ sample.txt | 12 ++ web/frontend/src/job/Metric.svelte | 1 + 14 files changed, 358 insertions(+), 95 deletions(-) create mode 100644 pkg/resampler/resampler.go create mode 100644 pkg/resampler/util.go create mode 100644 sample.txt diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 80a7e64..acf609f 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -172,7 +172,6 @@ func cleanup() { func TestRestApi(t *testing.T) { restapi := setup(t) t.Cleanup(cleanup) - testData := schema.JobData{ "load_one": map[schema.MetricScope]*schema.JobMetric{ schema.MetricScopeNode: { @@ -189,7 +188,7 @@ func TestRestApi(t *testing.T) { }, } - metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { + metricdata.TestLoadDataCallback = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { return testData, nil } @@ -341,7 +340,7 @@ func TestRestApi(t *testing.T) { } t.Run("CheckArchive", func(t *testing.T) { - data, err := metricdata.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background()) + data, err := metricdata.LoadData(stoppedJob, []string{"load_one"}, []schema.MetricScope{schema.MetricScopeNode}, context.Background(), 60) if err != nil { t.Fatal(err) } diff --git a/internal/api/rest.go b/internal/api/rest.go index 7946ab7..1695c0f 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -514,8 +514,15 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) var data schema.JobData + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + if r.URL.Query().Get("all-metrics") == "true" { - data, err = metricdata.LoadData(job, nil, scopes, r.Context()) + data, err = metricdata.LoadData(job, nil, scopes, r.Context(), resolution) if err != nil { log.Warn("Error while loading job data") return @@ -604,7 +611,14 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { scopes = []schema.MetricScope{"node"} } - data, err := metricdata.LoadData(job, metrics, scopes, r.Context()) + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + + data, err := metricdata.LoadData(job, metrics, scopes, r.Context(), resolution) if err != nil { log.Warn("Error while loading job data") return diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e7bd3d..0eba013 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -237,7 +237,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str } log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) - data, err := metricdata.LoadData(job, metrics, scopes, ctx) + data, err := metricdata.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { log.Warn("Error while loading job data") return nil, err diff --git a/internal/graph/util.go b/internal/graph/util.go index 3e65b6c..29e282c 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -12,6 +12,7 @@ import ( "github.com/99designs/gqlgen/graphql" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" // "github.com/ClusterCockpit/cc-backend/pkg/archive" @@ -47,7 +48,14 @@ func (r *queryResolver) rooflineHeatmap( continue } - jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx) + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + + for _, mc := range metricConfigs { + resolution = max(resolution, mc.Timestep) + } + + jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx, resolution) if err != nil { log.Errorf("Error while loading roofline metrics for job %d", job.ID) return nil, err diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index e564db6..53469f0 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -55,6 +55,7 @@ type ApiQuery struct { SubType *string `json:"subtype,omitempty"` Metric string `json:"metric"` Hostname string `json:"host"` + Resolution int `json:"resolution"` TypeIds []string `json:"type-ids,omitempty"` SubTypeIds []string `json:"subtype-ids,omitempty"` Aggregate bool `json:"aggreg"` @@ -66,13 +67,14 @@ type ApiQueryResponse struct { } type ApiMetricData struct { - Error *string `json:"error"` - Data []schema.Float `json:"data"` - From int64 `json:"from"` - To int64 `json:"to"` - Avg schema.Float `json:"avg"` - Min schema.Float `json:"min"` - Max schema.Float `json:"max"` + Error *string `json:"error"` + Data []schema.Float `json:"data"` + From int64 `json:"from"` + To int64 `json:"to"` + Resolution int `json:"resolution"` + Avg schema.Float `json:"avg"` + Min schema.Float `json:"min"` + Max schema.Float `json:"max"` } func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { @@ -83,7 +85,7 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { } ccms.url = config.Url - ccms.queryEndpoint = fmt.Sprintf("%s/api/query", config.Url) + ccms.queryEndpoint = fmt.Sprintf("%s/api/query/", config.Url) ccms.jwt = config.Token ccms.client = http.Client{ Timeout: 10 * time.Second, @@ -129,7 +131,7 @@ func (ccms *CCMetricStore) doRequest( return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) if err != nil { log.Warn("Error while building request body") return nil, err @@ -162,8 +164,9 @@ func (ccms *CCMetricStore) LoadData( metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) + queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) if err != nil { log.Warn("Error while building queries") return nil, err @@ -196,10 +199,11 @@ func (ccms *CCMetricStore) LoadData( } jobMetric, ok := jobData[metric][scope] + if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, - Timestep: mc.Timestep, + Timestep: row[0].Resolution, Series: make([]schema.Series, 0), } jobData[metric][scope] = jobMetric @@ -251,7 +255,6 @@ func (ccms *CCMetricStore) LoadData( /* Returns list for "partial errors" */ return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", ")) } - return jobData, nil } @@ -267,6 +270,7 @@ func (ccms *CCMetricStore) buildQueries( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) ([]ApiQuery, []schema.MetricScope, error) { queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) assignedScope := []schema.MetricScope{} @@ -318,11 +322,12 @@ func (ccms *CCMetricStore) buildQueries( } queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &acceleratorString, - TypeIds: host.Accelerators, + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &acceleratorString, + TypeIds: host.Accelerators, + Resolution: resolution, }) assignedScope = append(assignedScope, schema.MetricScopeAccelerator) continue @@ -335,11 +340,12 @@ func (ccms *CCMetricStore) buildQueries( } queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &acceleratorString, - TypeIds: host.Accelerators, + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &acceleratorString, + TypeIds: host.Accelerators, + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -348,11 +354,12 @@ func (ccms *CCMetricStore) buildQueries( // HWThread -> HWThead if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &hwthreadString, + TypeIds: intToStringSlice(hwthreads), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -363,11 +370,12 @@ func (ccms *CCMetricStore) buildQueries( cores, _ := topology.GetCoresFromHWThreads(hwthreads) for _, core := range cores { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Core[core]), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Core[core]), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) } @@ -379,11 +387,12 @@ func (ccms *CCMetricStore) buildQueries( sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) for _, socket := range sockets { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(topology.Socket[socket]), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(topology.Socket[socket]), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) } @@ -393,11 +402,12 @@ func (ccms *CCMetricStore) buildQueries( // HWThread -> Node if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &hwthreadString, - TypeIds: intToStringSlice(hwthreads), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &hwthreadString, + TypeIds: intToStringSlice(hwthreads), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -407,11 +417,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore { cores, _ := topology.GetCoresFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &coreString, - TypeIds: intToStringSlice(cores), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -421,11 +432,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode { cores, _ := topology.GetCoresFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &coreString, - TypeIds: intToStringSlice(cores), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &coreString, + TypeIds: intToStringSlice(cores), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -435,11 +447,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -449,11 +462,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode { sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &memoryDomainString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &memoryDomainString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -463,11 +477,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: false, - Type: &socketString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: false, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -477,11 +492,12 @@ func (ccms *CCMetricStore) buildQueries( if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode { sockets, _ := topology.GetSocketsFromHWThreads(hwthreads) queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, - Aggregate: true, - Type: &socketString, - TypeIds: intToStringSlice(sockets), + Metric: remoteName, + Hostname: host.Hostname, + Aggregate: true, + Type: &socketString, + TypeIds: intToStringSlice(sockets), + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -490,8 +506,9 @@ func (ccms *CCMetricStore) buildQueries( // Node -> Node if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode { queries = append(queries, ApiQuery{ - Metric: remoteName, - Hostname: host.Hostname, + Metric: remoteName, + Hostname: host.Hostname, + Resolution: resolution, }) assignedScope = append(assignedScope, scope) continue @@ -510,7 +527,15 @@ func (ccms *CCMetricStore) LoadStats( metrics []string, ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? + + metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 9000 + + for _, mc := range metricConfigs { + resolution = min(resolution, mc.Timestep) + } + + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, resolution) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index b95f07e..b416fa5 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -60,7 +60,8 @@ func (idb *InfluxDBv2DataRepository) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { + ctx context.Context, + resolution int) (schema.JobData, error) { measurementsConds := make([]string, 0, len(metrics)) for _, m := range metrics { diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index eba9dee..e79261b 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -15,6 +15,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" + "github.com/ClusterCockpit/cc-backend/pkg/resampler" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -24,7 +25,7 @@ type MetricDataRepository interface { Init(rawConfig json.RawMessage) error // Return the JobData for the given job, only with the requested metrics. - LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) + LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) @@ -80,8 +81,9 @@ func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - data := cache.Get(cacheKey(job, metrics, scopes), func() (_ interface{}, ttl time.Duration, size int) { + data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ interface{}, ttl time.Duration, size int) { var jd schema.JobData var err error @@ -106,7 +108,7 @@ func LoadData(job *schema.Job, } } - jd, err = repo.LoadData(job, metrics, scopes, ctx) + jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) if err != nil { if len(jd) != 0 { log.Warnf("partial error: %s", err.Error()) @@ -118,12 +120,31 @@ func LoadData(job *schema.Job, } size = jd.Size() } else { - jd, err = archive.GetHandle().LoadJobData(job) + var jd_temp schema.JobData + jd_temp, err = archive.GetHandle().LoadJobData(job) if err != nil { log.Error("Error while loading job data from archive") return err, 0, 0 } + //Deep copy the cached arhive hashmap + jd = DeepCopy(jd_temp) + + //Resampling for archived data. + //Pass the resolution from frontend here. + for _, v := range jd { + for _, v_ := range v { + timestep := 0 + for i := 0; i < len(v_.Series); i += 1 { + v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, v_.Timestep, resolution) + if err != nil { + return err, 0, 0 + } + } + v_.Timestep = timestep + } + } + // Avoid sending unrequested data to the client: if metrics != nil || scopes != nil { if metrics == nil { @@ -254,11 +275,12 @@ func cacheKey( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) string { // Duration and StartTime do not need to be in the cache key as StartTime is less unique than // job.ID and the TTL of the cache entry makes sure it does not stay there forever. - return fmt.Sprintf("%d(%s):[%v],[%v]", - job.ID, job.State, metrics, scopes) + return fmt.Sprintf("%d(%s):[%v],[%v]-%d", + job.ID, job.State, metrics, scopes, resolution) } // For /monitoring/job/ and some other places, flops_any and mem_bw need @@ -297,8 +319,11 @@ func prepareJobData( func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { allMetrics := make([]string, 0) metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + resolution := 0 + for _, mc := range metricConfigs { allMetrics = append(allMetrics, mc.Name) + resolution = mc.Timestep } // TODO: Talk about this! What resolutions to store data at... @@ -311,7 +336,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { scopes = append(scopes, schema.MetricScopeAccelerator) } - jobData, err := LoadData(job, allMetrics, scopes, ctx) + jobData, err := LoadData(job, allMetrics, scopes, ctx, resolution) if err != nil { log.Error("Error wile loading job data for archiving") return nil, err diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index a8d9f39..0611824 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -265,6 +265,7 @@ func (pdb *PrometheusDataRepository) LoadData( metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { // TODO respect requested scope if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) { @@ -356,7 +357,7 @@ func (pdb *PrometheusDataRepository) LoadStats( // map of metrics of nodes of stats stats := map[string]map[string]schema.MetricStatistics{} - data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx) + data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/) if err != nil { log.Warn("Error while loading job for stats") return nil, err diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 6d490fe..f480e40 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -12,7 +12,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/schema" ) -var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { +var TestLoadDataCallback func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) = func(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) { panic("TODO") } @@ -27,9 +27,10 @@ func (tmdr *TestMetricDataRepository) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { + ctx context.Context, + resolution int) (schema.JobData, error) { - return TestLoadDataCallback(job, metrics, scopes, ctx) + return TestLoadDataCallback(job, metrics, scopes, ctx, resolution) } func (tmdr *TestMetricDataRepository) LoadStats( @@ -48,3 +49,41 @@ func (tmdr *TestMetricDataRepository) LoadNodeData( panic("TODO") } + +func DeepCopy(jd_temp schema.JobData) schema.JobData { + var jd schema.JobData + + jd = make(schema.JobData, len(jd_temp)) + for k, v := range jd_temp { + jd[k] = make(map[schema.MetricScope]*schema.JobMetric, len(jd_temp[k])) + for k_, v_ := range v { + jd[k][k_] = new(schema.JobMetric) + jd[k][k_].Series = make([]schema.Series, len(v_.Series)) + for i := 0; i < len(v_.Series); i += 1 { + jd[k][k_].Series[i].Data = make([]schema.Float, len(v_.Series[i].Data)) + copy(jd[k][k_].Series[i].Data, v_.Series[i].Data) + jd[k][k_].Series[i].Hostname = v_.Series[i].Hostname + jd[k][k_].Series[i].Id = v_.Series[i].Id + jd[k][k_].Series[i].Statistics.Avg = v_.Series[i].Statistics.Avg + jd[k][k_].Series[i].Statistics.Min = v_.Series[i].Statistics.Min + jd[k][k_].Series[i].Statistics.Max = v_.Series[i].Statistics.Max + } + jd[k][k_].Timestep = v_.Timestep + jd[k][k_].Unit.Base = v_.Unit.Base + jd[k][k_].Unit.Prefix = v_.Unit.Prefix + if v_.StatisticsSeries != nil { + jd[k][k_].StatisticsSeries = new(schema.StatsSeries) + copy(jd[k][k_].StatisticsSeries.Max, v_.StatisticsSeries.Max) + copy(jd[k][k_].StatisticsSeries.Min, v_.StatisticsSeries.Min) + copy(jd[k][k_].StatisticsSeries.Median, v_.StatisticsSeries.Median) + copy(jd[k][k_].StatisticsSeries.Mean, v_.StatisticsSeries.Mean) + for k__, v__ := range v_.StatisticsSeries.Percentiles { + jd[k][k_].StatisticsSeries.Percentiles[k__] = v__ + } + } else { + jd[k][k_].StatisticsSeries = v_.StatisticsSeries + } + } + } + return jd +} diff --git a/pkg/archive/json.go b/pkg/archive/json.go index ff2c6d9..1219658 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -9,8 +9,8 @@ import ( "io" "time" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" ) func DecodeJobData(r io.Reader, k string) (schema.JobData, error) { diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go new file mode 100644 index 0000000..2c06b38 --- /dev/null +++ b/pkg/resampler/resampler.go @@ -0,0 +1,113 @@ +package resampler + +import ( + "errors" + "fmt" + "math" + + "github.com/ClusterCockpit/cc-backend/pkg/schema" +) + +func SimpleResampler(data []schema.Float, old_frequency int64, new_frequency int64) ([]schema.Float, error) { + if old_frequency == 0 || new_frequency == 0 { + return nil, errors.New("either old or new frequency is set to 0") + } + + if new_frequency%old_frequency != 0 { + return nil, errors.New("new sampling frequency should be multiple of the old frequency") + } + + var step int = int(new_frequency / old_frequency) + var new_data_length = len(data) / step + + if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { + return data, nil + } + + new_data := make([]schema.Float, new_data_length) + + for i := 0; i < new_data_length; i++ { + new_data[i] = data[i*step] + } + + return new_data, nil +} + +// Inspired by one of the algorithms from https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf +// Adapted from https://github.com/haoel/downsampling/blob/master/core/lttb.go +func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_frequency int) ([]schema.Float, int, error) { + + if old_frequency == 0 || new_frequency == 0 { + return data, old_frequency, nil + } + + if new_frequency%old_frequency != 0 { + return nil, 0, errors.New(fmt.Sprintf("new sampling frequency : %d should be multiple of the old frequency : %d", new_frequency, old_frequency)) + } + + var step int = int(new_frequency / old_frequency) + var new_data_length = len(data) / step + + if new_data_length == 0 || len(data) < 100 || new_data_length >= len(data) { + return data, old_frequency, nil + } + + new_data := make([]schema.Float, 0, new_data_length) + + // Bucket size. Leave room for start and end data points + bucketSize := float64(len(data)-2) / float64(new_data_length-2) + + new_data = append(new_data, data[0]) // Always add the first point + + // We have 3 pointers represent for + // > bucketLow - the current bucket's beginning location + // > bucketMiddle - the current bucket's ending location, + // also the beginning location of next bucket + // > bucketHight - the next bucket's ending location. + bucketLow := 1 + bucketMiddle := int(math.Floor(bucketSize)) + 1 + + var prevMaxAreaPoint int + + for i := 0; i < new_data_length-2; i++ { + + bucketHigh := int(math.Floor(float64(i+2)*bucketSize)) + 1 + if bucketHigh >= len(data)-1 { + bucketHigh = len(data) - 2 + } + + // Calculate point average for next bucket (containing c) + avgPointX, avgPointY := calculateAverageDataPoint(data[bucketMiddle:bucketHigh+1], int64(bucketMiddle)) + + // Get the range for current bucket + currBucketStart := bucketLow + currBucketEnd := bucketMiddle + + // Point a + pointX := prevMaxAreaPoint + pointY := data[prevMaxAreaPoint] + + maxArea := -1.0 + + var maxAreaPoint int + for ; currBucketStart < currBucketEnd; currBucketStart++ { + + area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart]) + if area > maxArea { + maxArea = area + maxAreaPoint = currBucketStart + } + } + + new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket + prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint + + //move to the next window + bucketLow = bucketMiddle + bucketMiddle = bucketHigh + } + + new_data = append(new_data, data[len(data)-1]) // Always add last + + return new_data, new_frequency, nil +} diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go new file mode 100644 index 0000000..605f638 --- /dev/null +++ b/pkg/resampler/util.go @@ -0,0 +1,25 @@ +package resampler + +import ( + "math" + + "github.com/ClusterCockpit/cc-backend/pkg/schema" +) + +func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 { + area := ((paX-pcX)*(pbY-paY) - (paX-pbX)*(pcY-paY)) * 0.5 + return math.Abs(float64(area)) +} + +func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) { + + for _, point := range points { + avgX += schema.Float(xStart) + avgY += point + xStart++ + } + l := schema.Float(len(points)) + avgX /= l + avgY /= l + return avgX, avgY +} diff --git a/sample.txt b/sample.txt new file mode 100644 index 0000000..953def6 --- /dev/null +++ b/sample.txt @@ -0,0 +1,12 @@ +HTTP server listening at 127.0.0.1:8080...Key : "demo" +Loading data with res : 600 +Key : "255(completed):[[]],[[]]-600" +Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" +Key : "partitions:fritz" +Key : "partitions:alex" +Key : "metadata:255" +Key : "footprint:255" +Loading data with res : 600 +Key : "255(completed):[[flops_any mem_bw core_power acc_mem_used cpu_load mem_used acc_power cpu_power nv_sm_clock ipc cpu_user clock nv_mem_util nv_temp acc_utilization]],[[node accelerator socket core]]-600" +Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" +Existing key : "var/job-archive/alex/679/951/1675866122/data.json.gz" in cache with value diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 5c5a87a..c551750 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,6 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, + requestPolicy:"network-only" }); if ($metricData && !$metricData.fetching) { From 708eaf4178f6eb739478baeabef146def7cec100 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 17:55:21 +0200 Subject: [PATCH 09/32] fix dev leftovers --- sample.txt | 12 ------------ web/frontend/src/job/Metric.svelte | 2 +- 2 files changed, 1 insertion(+), 13 deletions(-) delete mode 100644 sample.txt diff --git a/sample.txt b/sample.txt deleted file mode 100644 index 953def6..0000000 --- a/sample.txt +++ /dev/null @@ -1,12 +0,0 @@ -HTTP server listening at 127.0.0.1:8080...Key : "demo" -Loading data with res : 600 -Key : "255(completed):[[]],[[]]-600" -Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" -Key : "partitions:fritz" -Key : "partitions:alex" -Key : "metadata:255" -Key : "footprint:255" -Loading data with res : 600 -Key : "255(completed):[[flops_any mem_bw core_power acc_mem_used cpu_load mem_used acc_power cpu_power nv_sm_clock ipc cpu_user clock nv_mem_util nv_temp acc_utilization]],[[node accelerator socket core]]-600" -Key : "var/job-archive/alex/679/951/1675866122/data.json.gz" -Existing key : "var/job-archive/alex/679/951/1675866122/data.json.gz" in cache with value diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index c551750..d3fe0d6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,7 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - requestPolicy:"network-only" + // requestPolicy:"network-only" }); if ($metricData && !$metricData.fetching) { From 01845a0cb71fec382df774afb44a41809a9c82ea Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 22 Aug 2024 18:33:18 +0200 Subject: [PATCH 10/32] add comment regarding metric data load --- web/frontend/src/job/Metric.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index d3fe0d6..f5e1851 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -110,7 +110,7 @@ client: client, query: subQuery, variables: { dbid, selectedMetrics, selectedScopes, selectedResolution }, - // requestPolicy:"network-only" + // Never user network-only: causes reactive load-loop! }); if ($metricData && !$metricData.fetching) { From 95fe36964841e538410063d2fe633d6b1283bed1 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:26:56 +0200 Subject: [PATCH 11/32] fix: add additionally loaded scopes to statsTable again --- web/frontend/src/Job.root.svelte | 1 + web/frontend/src/job/Metric.svelte | 11 ++++++++++- web/frontend/src/job/StatsTable.svelte | 7 +++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 5c1d004..57600b7 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -348,6 +348,7 @@ {#if item.data} statsTable.moreLoaded(detail)} job={$initq.data.job} metricName={item.metric} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index f5e1851..e6d7af6 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -13,6 +13,9 @@ --> From adc3502b6b31535fd015defc5f046b3a8d87cb0d Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:37:42 +0200 Subject: [PATCH 12/32] cleanup dev logline --- internal/graph/schema.resolvers.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 0eba013..fc3ff42 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -236,7 +236,6 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return nil, err } - log.Debugf(">>>>> REQUEST DATA HERE FOR %v AT SCOPE %v WITH RESOLUTION OF %d", metrics, scopes, *resolution) data, err := metricdata.LoadData(job, metrics, scopes, ctx, *resolution) if err != nil { log.Warn("Error while loading job data") From 9fe7cdca9215220a19930779a60c8afc910276a3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 13:53:15 +0200 Subject: [PATCH 13/32] fix: fix plot labeling if specific host selected, hide loadall if only node returned --- web/frontend/src/job/Metric.svelte | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index e6d7af6..10a533a 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -47,6 +47,7 @@ let pendingResolution = 600; let selectedScopeIndex = scopes.findIndex((s) => s == minScope(scopes)); let patternMatches = false; + let nodeOnly = false; // If, after load-all, still only node scope returned let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); const dispatch = createEventDispatcher(); @@ -126,6 +127,7 @@ // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) + nodeOnly = (selectedScope == "node") // "node" still only scope after load-all } const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node") @@ -146,9 +148,14 @@ $: data = rawData[selectedScopeIndex]; - $: series = data?.series.filter( + $: series = data?.series?.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); + + $: resources = job?.resources?.filter( + (resource) => selectedHost == null || resource.hostname == selectedHost, + ); + @@ -162,7 +169,7 @@ {/if} {/each} - {#if scopes.length == 1 && nativeScope != "node"} + {#if scopes.length == 1 && nativeScope != "node" && !nodeOnly} {/if} @@ -197,7 +204,7 @@ metric={metricName} {series} {isShared} - resources={job.resources} + {resources} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} From d34e0d9348041d6856be6c777b84419bd046da54 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Fri, 23 Aug 2024 16:59:45 +0200 Subject: [PATCH 14/32] fix: omit resources prop from metricPlot, use series for legend instead --- web/frontend/src/Node.root.svelte | 1 - web/frontend/src/Systems.root.svelte | 1 - web/frontend/src/generic/joblist/JobListRow.svelte | 1 - web/frontend/src/generic/plots/MetricPlot.svelte | 9 +++------ web/frontend/src/job/Metric.svelte | 6 ------ 5 files changed, 3 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index 2d58540..0553035 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -207,7 +207,6 @@ cluster={clusters.find((c) => c.name == cluster)} subCluster={$nodeMetricsData.data.nodeMetrics[0].subCluster} series={item.metric.series} - resources={[{ hostname: hostname }]} forNode={true} /> {:else if item.disabled === true && item.metric} diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index c483401..0d5e70e 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -206,7 +206,6 @@ metric={item.data.name} cluster={clusters.find((c) => c.name == cluster)} subCluster={item.subCluster} - resources={[{ hostname: item.host }]} forNode={true} /> {:else if item.disabled === true && item.data} diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 1d8529e..274e4f0 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -169,7 +169,6 @@ {cluster} subCluster={job.subCluster} isShared={job.exclusive != 1} - resources={job.resources} numhwthreads={job.numHWThreads} numaccs={job.numAcc} /> diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 8dd1dbf..d092413 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -6,7 +6,6 @@ Properties: - `metric String`: The metric name - `scope String?`: Scope of the displayed data [Default: node] - - `resources [GraphQL.Resource]`: List of resources used for parent job - `width Number`: The plot width - `height Number`: The plot height - `timestep Number`: The timestep used for X-axis rendering @@ -16,7 +15,7 @@ - `cluster GraphQL.Cluster`: Cluster Object of the parent job - `subCluster String`: Name of the subCluster of the parent job - `isShared Bool?`: If this job used shared resources; will adapt threshold indicators accordingly [Default: false] - - `forNode Bool?`: If this plot is used for node data display; will render x-axis as negative time with $now as maximum [Default: false] + - `forNode Bool?`: If this plot is used for node data display; will ren[data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)](https://github.com/ClusterCockpit/cc-backend/blob/9fe7cdca9215220a19930779a60c8afc910276a3/internal/graph/schema.resolvers.go#L391-L392)der x-axis as negative time with $now as maximum [Default: false] - `numhwthreads Number?`: Number of job HWThreads [Default: 0] - `numaccs Number?`: Number of job Accelerators [Default: 0] --> @@ -118,7 +117,6 @@ export let metric; export let scope = "node"; - export let resources = []; export let width; export let height; export let timestep; @@ -363,9 +361,8 @@ plotSeries.push({ label: scope === "node" - ? resources[i].hostname - : // scope === 'accelerator' ? resources[0].accelerators[i] : - scope + " #" + (i + 1), + ? series[i].hostname + : scope + " #" + (i + 1), scale: "y", width: lineWidth, stroke: lineColor(i, series.length), diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 10a533a..71cf2e7 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -151,11 +151,6 @@ $: series = data?.series?.filter( (series) => selectedHost == null || series.hostname == selectedHost, ); - - $: resources = job?.resources?.filter( - (resource) => selectedHost == null || resource.hostname == selectedHost, - ); - @@ -204,7 +199,6 @@ metric={metricName} {series} {isShared} - {resources} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} Date: Sun, 25 Aug 2024 16:13:43 +0200 Subject: [PATCH 15/32] Fix for resampler --- internal/metricdata/cc-metric-store.go | 12 ++++++------ pkg/resampler/resampler.go | 14 ++++++++++++-- pkg/resampler/util.go | 14 ++++++++++++-- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 53469f0..4a86352 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -528,14 +528,14 @@ func (ccms *CCMetricStore) LoadStats( ctx context.Context, ) (map[string]map[string]schema.MetricStatistics, error) { - metricConfigs := archive.GetCluster(job.Cluster).MetricConfig - resolution := 9000 + // metricConfigs := archive.GetCluster(job.Cluster).MetricConfig + // resolution := 9000 - for _, mc := range metricConfigs { - resolution = min(resolution, mc.Timestep) - } + // for _, mc := range metricConfigs { + // resolution = min(resolution, mc.Timestep) + // } - queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, resolution) // #166 Add scope shere for analysis view accelerator normalization? + queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") return nil, err diff --git a/pkg/resampler/resampler.go b/pkg/resampler/resampler.go index 2c06b38..26cead0 100644 --- a/pkg/resampler/resampler.go +++ b/pkg/resampler/resampler.go @@ -90,6 +90,7 @@ func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_freq maxArea := -1.0 var maxAreaPoint int + flag_ := 0 for ; currBucketStart < currBucketEnd; currBucketStart++ { area := calculateTriangleArea(schema.Float(pointX), pointY, avgPointX, avgPointY, schema.Float(currBucketStart), data[currBucketStart]) @@ -97,10 +98,19 @@ func LargestTriangleThreeBucket(data []schema.Float, old_frequency int, new_freq maxArea = area maxAreaPoint = currBucketStart } + if math.IsNaN(float64(avgPointY)) { + flag_ = 1 + + } } - new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket - prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint + if flag_ == 1 { + new_data = append(new_data, schema.NaN) // Pick this point from the bucket + + } else { + new_data = append(new_data, data[maxAreaPoint]) // Pick this point from the bucket + } + prevMaxAreaPoint = maxAreaPoint // This MaxArea point is the next's prevMAxAreaPoint //move to the next window bucketLow = bucketMiddle diff --git a/pkg/resampler/util.go b/pkg/resampler/util.go index 605f638..36d8bed 100644 --- a/pkg/resampler/util.go +++ b/pkg/resampler/util.go @@ -12,14 +12,24 @@ func calculateTriangleArea(paX, paY, pbX, pbY, pcX, pcY schema.Float) float64 { } func calculateAverageDataPoint(points []schema.Float, xStart int64) (avgX schema.Float, avgY schema.Float) { - + flag := 0 for _, point := range points { avgX += schema.Float(xStart) avgY += point xStart++ + if math.IsNaN(float64(point)) { + flag = 1 + } } + l := schema.Float(len(points)) + avgX /= l avgY /= l - return avgX, avgY + + if flag == 1 { + return avgX, schema.NaN + } else { + return avgX, avgY + } } From 55027cb63030ae255dbcefd6aba47b4bef074556 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 26 Aug 2024 09:55:33 +0200 Subject: [PATCH 16/32] fix: add resolution 60 default to ccms nodeData query --- internal/metricdata/cc-metric-store.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 53469f0..eedb601 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -613,8 +613,9 @@ func (ccms *CCMetricStore) LoadNodeData( for _, node := range nodes { for _, metric := range metrics { req.Queries = append(req.Queries, ApiQuery{ - Hostname: node, - Metric: ccms.toRemoteName(metric), + Hostname: node, + Metric: ccms.toRemoteName(metric), + Resolution: 60, // Default for Node Queries }) } } From a59df12595d0a7ae43a5a87d2f252746311fa958 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 26 Aug 2024 17:37:23 +0200 Subject: [PATCH 17/32] init basic proof of concept --- .../src/generic/joblist/JobListRow.svelte | 4 ++++ .../src/generic/plots/MetricPlot.svelte | 20 ++++++++++++++++++- web/frontend/src/job/Metric.svelte | 8 ++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 274e4f0..197b8b3 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -159,6 +159,10 @@ {#if metric.disabled == false && metric.data} { + // filterComponent.updateFilters(detail) + console.log("Upstream New Res:", detail) + }} width={plotWidth} height={plotHeight} timestep={metric.data.metric.timestep} diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index d092413..4fc1b77 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -112,7 +112,7 @@ {#each links as item} - {#if !item.perCluster} + {#if item.listOptions} + + + + {item.title} + + + + All Clusters + + + {#each clusters as cluster} + + + {cluster.name} + + + + Running Jobs + + + + {/each} + + + {:else if !item.perCluster} {item.title} From 54f3a261c5d4af0d313c54d1cae465762b6fe82b Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:20:32 +0200 Subject: [PATCH 22/32] Rewrite sqlite indices from scratch for v8 migration --- .../sqlite3/08_add-footprint.up.sql | 52 ++++++++++++++++++- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index bcd6494..c101c6e 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -1,5 +1,11 @@ -CREATE INDEX IF NOT EXISTS job_by_project ON job (project); -CREATE INDEX IF NOT EXISTS job_list_projects ON job (project, job_state); +DROP INDEX job_stats; +DROP INDEX job_by_user; +DROP INDEX job_by_starttime; +DROP INDEX job_by_job_id; +DROP INDEX job_list; +DROP INDEX job_list_user; +DROP INDEX job_list_users; +DROP INDEX job_list_users_start; ALTER TABLE job ADD COLUMN energy REAL NOT NULL DEFAULT 0.0; ALTER TABLE job ADD COLUMN energy_footprint TEXT DEFAULT NULL; @@ -24,3 +30,45 @@ ALTER TABLE job DROP net_bw_avg; ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; + +CREATE INDEX jobs_cluster IF NOT EXISTS ON job (cluster); +CREATE INDEX jobs_cluster_starttime IF NOT EXISTS ON job (cluster, start_time); +CREATE INDEX jobs_cluster_user IF NOT EXISTS ON job (cluster, user); +CREATE INDEX jobs_cluster_project IF NOT EXISTS ON job (cluster, project); +CREATE INDEX jobs_cluster_subcluster IF NOT EXISTS ON job (cluster, subcluster); + +CREATE INDEX jobs_cluster_partition IF NOT EXISTS ON job (cluster, partition); +CREATE INDEX jobs_cluster_partition_starttime IF NOT EXISTS ON job (cluster, partition, start_time); +CREATE INDEX jobs_cluster_partition_jobstate IF NOT EXISTS ON job (cluster, partition, job_state); +CREATE INDEX jobs_cluster_partition_jobstate_user IF NOT EXISTS ON job (cluster, partition, job_state, user); +CREATE INDEX jobs_cluster_partition_jobstate_project IF NOT EXISTS ON job (cluster, partition, job_state, project); +CREATE INDEX jobs_cluster_partition_jobstate_starttime IF NOT EXISTS ON job (cluster, partition, job_state, start_time); + +CREATE INDEX jobs_cluster_jobstate IF NOT EXISTS ON job (cluster, job_state); +CREATE INDEX jobs_cluster_jobstate_starttime IF NOT EXISTS ON job (cluster, job_state, starttime); +CREATE INDEX jobs_cluster_jobstate_user IF NOT EXISTS ON job (cluster, job_state, user); +CREATE INDEX jobs_cluster_jobstate_project IF NOT EXISTS ON job (cluster, job_state, project); + +CREATE INDEX jobs_user IF NOT EXISTS ON job (user); +CREATE INDEX jobs_user_starttime IF NOT EXISTS ON job (user, start_time); + +CREATE INDEX jobs_project IF NOT EXISTS ON job (project); +CREATE INDEX jobs_project_starttime IF NOT EXISTS ON job (project, start_time); +CREATE INDEX jobs_project_user IF NOT EXISTS ON job (project, user); + +CREATE INDEX jobs_jobstate IF NOT EXISTS ON job (job_state); +CREATE INDEX jobs_jobstate_user IF NOT EXISTS ON job (job_state, user); +CREATE INDEX jobs_jobstate_project IF NOT EXISTS ON job (job_state, project); +CREATE INDEX jobs_jobstate_cluster IF NOT EXISTS ON job (job_state, cluster); +CREATE INDEX jobs_jobstate_starttime IF NOT EXISTS ON job (job_state, start_time); + +CREATE INDEX jobs_arrayjobid_starttime IF NOT EXISTS ON job (array_job_id, start_time); +CREATE INDEX jobs_cluster_arrayjobid_starttime IF NOT EXISTS ON job (cluster, array_job_id, start_time); + +CREATE INDEX jobs_starttime IF NOT EXISTS ON job (start_time); +CREATE INDEX jobs_duration IF NOT EXISTS ON job (duration); +CREATE INDEX jobs_numnodes IF NOT EXISTS ON job (num_nodes); +CREATE INDEX jobs_numhwthreads IF NOT EXISTS ON job (num_hwthreads); +CREATE INDEX jobs_numacc IF NOT EXISTS ON job (num_acc); + +PRAGMA optimize; From 7602641909c15576a4a626c3069ec020ac7badf7 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:22:34 +0200 Subject: [PATCH 23/32] feat: change to resolution increase on zoom --- .../src/generic/joblist/JobListRow.svelte | 29 ++++++++--- .../src/generic/plots/MetricPlot.svelte | 52 ++++++++++++++----- web/frontend/src/job/Metric.svelte | 50 ++++++++++-------- 3 files changed, 90 insertions(+), 41 deletions(-) diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 197b8b3..5581903 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -32,12 +32,14 @@ ? ["core", "accelerator"] : ["core"] : ["node"]; + let selectedResolution = 600; + let zoomStates = {}; const cluster = getContext("clusters").find((c) => c.name == job.cluster); const client = getContextClient(); const query = gql` - query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!) { - jobMetrics(id: $id, metrics: $metrics, scopes: $scopes) { + query ($id: ID!, $metrics: [String!]!, $scopes: [MetricScope!]!, $selectedResolution: Int) { + jobMetrics(id: $id, metrics: $metrics, scopes: $scopes, resolution: $selectedResolution) { name scope metric { @@ -66,17 +68,30 @@ } `; + function handleZoom(detail, metric) { + if ( + (zoomStates[metric]?.x?.min !== detail?.lastZoomState?.x?.min) && + (zoomStates[metric]?.y?.max !== detail?.lastZoomState?.y?.max) + ) { + zoomStates[metric] = {...detail.lastZoomState} + } + + if (detail?.newRes) { // Triggers GQL + selectedResolution = detail.newRes + } + } + $: metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes }, + variables: { id, metrics, scopes, selectedResolution }, }); function refreshMetrics() { metricsQuery = queryStore({ client: client, query: query, - variables: { id, metrics, scopes }, + variables: { id, metrics, scopes, selectedResolution }, // requestPolicy: 'network-only' // use default cache-first for refresh }); } @@ -159,10 +174,7 @@ {#if metric.disabled == false && metric.data} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail, metric.data.name) }} width={plotWidth} height={plotHeight} timestep={metric.data.metric.timestep} @@ -175,6 +187,7 @@ isShared={job.exclusive != 1} numhwthreads={job.numHWThreads} numaccs={job.numAcc} + zoomState={zoomStates[metric.data.name]} /> {:else if metric.disabled == true && metric.data} { + u.over.addEventListener("dblclick", (e) => { + console.log('Dispatch Reset') + dispatch('zoom', { + lastZoomState: { + x: { time: false }, + y: { auto: true } + } + }); + }); + } + ], draw: [ (u) => { // Draw plot type label: @@ -437,17 +453,26 @@ setScale: [ (u, key) => { if (key === 'x') { - // Start - console.log('setScale X', key); - - // Decide which resolution to request - - // Dispatch request - const res = 1337; - dispatch('zoom-in', { - newres: res, - }); - + const numX = (u.series[0].idxs[1] - u.series[0].idxs[0]) + if (numX <= 20 && timestep !== 60) { // Zoom IN if not at MAX + console.log('Dispatch Zoom') + if (timestep == 600) { + dispatch('zoom', { + newRes: 240, + lastZoomState: u?.scales + }); + } else if (timestep === 240) { + dispatch('zoom', { + newRes: 60, + lastZoomState: u?.scales + }); + } + } else { + console.log('Dispatch Update') + dispatch('zoom', { + lastZoomState: u?.scales + }); + } }; } ] @@ -481,6 +506,10 @@ if (!uplot) { opts.width = width; opts.height = height; + if (zoomState) { + // console.log('Use last state for uPlot init:', metric, scope, zoomState) + opts.scales = {...zoomState} + } uplot = new uPlot(opts, plotData, plotWrapper); } else { uplot.setSize({ width, height }); @@ -489,7 +518,6 @@ function onSizeChange() { if (!uplot) return; - if (timeoutId != null) clearTimeout(timeoutId); timeoutId = setTimeout(() => { diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index 41e3046..ceacca5 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -27,7 +27,9 @@ Spinner, Card, } from "@sveltestrap/sveltestrap"; - import { minScope } from "../generic/utils.js"; + import { + minScope, + } from "../generic/utils.js"; import Timeseries from "../generic/plots/MetricPlot.svelte"; export let job; @@ -39,9 +41,8 @@ export let rawData; export let isShared = false; - let selectedHost = null, - plot, - error = null; + let selectedHost = null; + let error = null; let selectedScope = minScope(scopes); let selectedResolution; let pendingResolution = 600; @@ -49,11 +50,12 @@ let patternMatches = false; let nodeOnly = false; // If, after load-all, still only node scope returned let statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null); + let zoomState = null; + let pendingZoomState = null; const dispatch = createEventDispatcher(); const statsPattern = /(.*)-stat$/; const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); - const resolutions = [600, 240, 60] // DEV: Make configable const client = getContextClient(); const subQuery = gql` query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!, $selectedResolution: Int) { @@ -86,6 +88,19 @@ } `; + function handleZoom(detail) { + if ( // States have to differ, causes deathloop if just set + (pendingZoomState?.x?.min !== detail?.lastZoomState?.x?.min) && + (pendingZoomState?.y?.max !== detail?.lastZoomState?.y?.max) + ) { + pendingZoomState = {...detail.lastZoomState} + } + + if (detail?.newRes) { // Triggers GQL + pendingResolution = detail.newRes + } + } + let metricData; let selectedScopes = [...scopes] const dbid = job.id; @@ -119,11 +134,15 @@ }); if ($metricData && !$metricData.fetching) { - rawData = $metricData.data.singleUpdate.map((x) => x.metric) scopes = $metricData.data.singleUpdate.map((x) => x.scope) statsSeries = rawData.map((data) => data?.statisticsSeries ? data.statisticsSeries : null) + // Keep Zoomlevel if ResChange By Zoom + if (pendingZoomState) { + zoomState = {...pendingZoomState} + } + // Set selected scope to min of returned scopes if (selectedScope == "load-all") { selectedScope = minScope(scopes) @@ -176,11 +195,6 @@ {/each} {/if} - {#key series} {#if $metricData?.fetching == true} @@ -189,11 +203,7 @@ {error.message} {:else if series != null && !patternMatches} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail) }} {width} height={300} cluster={job.cluster} @@ -203,14 +213,11 @@ metric={metricName} {series} {isShared} + {zoomState} /> {:else if statsSeries[selectedScopeIndex] != null && patternMatches} { - // filterComponent.updateFilters(detail) - console.log("Upstream New Res:", detail) - }} + on:zoom={({detail}) => { handleZoom(detail) }} {width} height={300} cluster={job.cluster} @@ -220,6 +227,7 @@ metric={metricName} {series} {isShared} + {zoomState} statisticsSeries={statsSeries[selectedScopeIndex]} useStatsSeries={!!statsSeries[selectedScopeIndex]} /> From 5eb6f7d307502c20459d480b4d58abd39c36743c Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 2 Sep 2024 18:45:33 +0200 Subject: [PATCH 24/32] fix: user name join not required for normal jobStats --- internal/repository/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/repository/stats.go b/internal/repository/stats.go index ef1cf9e..aa38d29 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -89,7 +89,7 @@ func (r *JobRepository) buildStatsQuery( ).From("job").Join("user ON user.username = job.user").GroupBy(col) } else { // Scan columns: totalJobs, name, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours - query = sq.Select("COUNT(job.id)", "name", + query = sq.Select("COUNT(job.id)", fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType), @@ -97,7 +97,7 @@ func (r *JobRepository) buildStatsQuery( fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s)`, time.Now().Unix(), castType), fmt.Sprintf(`CAST(SUM(job.num_acc) as %s)`, castType), fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s)`, time.Now().Unix(), castType), - ).From("job").Join("user ON user.username = job.user") + ).From("job") } for _, f := range filter { From 6443541a79a5a31fc4b2fac2dcc5c0d2764d11b0 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 09:34:45 +0200 Subject: [PATCH 25/32] fix SQL migration syntax --- .../sqlite3/08_add-footprint.up.sql | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index c101c6e..59ab747 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -31,44 +31,44 @@ ALTER TABLE job DROP net_data_vol_total; ALTER TABLE job DROP file_bw_avg; ALTER TABLE job DROP file_data_vol_total; -CREATE INDEX jobs_cluster IF NOT EXISTS ON job (cluster); -CREATE INDEX jobs_cluster_starttime IF NOT EXISTS ON job (cluster, start_time); -CREATE INDEX jobs_cluster_user IF NOT EXISTS ON job (cluster, user); -CREATE INDEX jobs_cluster_project IF NOT EXISTS ON job (cluster, project); -CREATE INDEX jobs_cluster_subcluster IF NOT EXISTS ON job (cluster, subcluster); +CREATE INDEX IF NOT EXISTS jobs_cluster ON job (cluster); +CREATE INDEX IF NOT EXISTS jobs_cluster_starttime ON job (cluster, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_user ON job (cluster, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_project ON job (cluster, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_subcluster ON job (cluster, subcluster); -CREATE INDEX jobs_cluster_partition IF NOT EXISTS ON job (cluster, partition); -CREATE INDEX jobs_cluster_partition_starttime IF NOT EXISTS ON job (cluster, partition, start_time); -CREATE INDEX jobs_cluster_partition_jobstate IF NOT EXISTS ON job (cluster, partition, job_state); -CREATE INDEX jobs_cluster_partition_jobstate_user IF NOT EXISTS ON job (cluster, partition, job_state, user); -CREATE INDEX jobs_cluster_partition_jobstate_project IF NOT EXISTS ON job (cluster, partition, job_state, project); -CREATE INDEX jobs_cluster_partition_jobstate_starttime IF NOT EXISTS ON job (cluster, partition, job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition ON job (cluster, partition); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_starttime ON job (cluster, partition, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate ON job (cluster, partition, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_user ON job (cluster, partition, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (cluster, partition, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); -CREATE INDEX jobs_cluster_jobstate IF NOT EXISTS ON job (cluster, job_state); -CREATE INDEX jobs_cluster_jobstate_starttime IF NOT EXISTS ON job (cluster, job_state, starttime); -CREATE INDEX jobs_cluster_jobstate_user IF NOT EXISTS ON job (cluster, job_state, user); -CREATE INDEX jobs_cluster_jobstate_project IF NOT EXISTS ON job (cluster, job_state, project); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, starttime); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); -CREATE INDEX jobs_user IF NOT EXISTS ON job (user); -CREATE INDEX jobs_user_starttime IF NOT EXISTS ON job (user, start_time); +CREATE INDEX IF NOT EXISTS jobs_user ON job (user); +CREATE INDEX IF NOT EXISTS jobs_user_starttime ON job (user, start_time); -CREATE INDEX jobs_project IF NOT EXISTS ON job (project); -CREATE INDEX jobs_project_starttime IF NOT EXISTS ON job (project, start_time); -CREATE INDEX jobs_project_user IF NOT EXISTS ON job (project, user); +CREATE INDEX IF NOT EXISTS jobs_project ON job (project); +CREATE INDEX IF NOT EXISTS jobs_project_starttime ON job (project, start_time); +CREATE INDEX IF NOT EXISTS jobs_project_user ON job (project, user); -CREATE INDEX jobs_jobstate IF NOT EXISTS ON job (job_state); -CREATE INDEX jobs_jobstate_user IF NOT EXISTS ON job (job_state, user); -CREATE INDEX jobs_jobstate_project IF NOT EXISTS ON job (job_state, project); -CREATE INDEX jobs_jobstate_cluster IF NOT EXISTS ON job (job_state, cluster); -CREATE INDEX jobs_jobstate_starttime IF NOT EXISTS ON job (job_state, start_time); +CREATE INDEX IF NOT EXISTS jobs_jobstate ON job (job_state); +CREATE INDEX IF NOT EXISTS jobs_jobstate_user ON job (job_state, user); +CREATE INDEX IF NOT EXISTS jobs_jobstate_project ON job (job_state, project); +CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); +CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); -CREATE INDEX jobs_arrayjobid_starttime IF NOT EXISTS ON job (array_job_id, start_time); -CREATE INDEX jobs_cluster_arrayjobid_starttime IF NOT EXISTS ON job (cluster, array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); +CREATE INDEX jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); -CREATE INDEX jobs_starttime IF NOT EXISTS ON job (start_time); -CREATE INDEX jobs_duration IF NOT EXISTS ON job (duration); -CREATE INDEX jobs_numnodes IF NOT EXISTS ON job (num_nodes); -CREATE INDEX jobs_numhwthreads IF NOT EXISTS ON job (num_hwthreads); -CREATE INDEX jobs_numacc IF NOT EXISTS ON job (num_acc); +CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); +CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); +CREATE INDEX IF NOT EXISTS jobs_numnodes ON job (num_nodes); +CREATE INDEX IF NOT EXISTS jobs_numhwthreads ON job (num_hwthreads); +CREATE INDEX IF NOT EXISTS jobs_numacc ON job (num_acc); PRAGMA optimize; From 275a77807eac22df6db41464bf94d7aa463cdd03 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 09:40:00 +0200 Subject: [PATCH 26/32] fix typo in migration --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 59ab747..4fb5e94 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -45,7 +45,7 @@ CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_project ON job (clust CREATE INDEX IF NOT EXISTS jobs_cluster_partition_jobstate_starttime ON job (cluster, partition, job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate ON job (cluster, job_state); -CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, starttime); +CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_starttime ON job (cluster, job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_user ON job (cluster, job_state, user); CREATE INDEX IF NOT EXISTS jobs_cluster_jobstate_project ON job (cluster, job_state, project); From 4b1b34d8a7efc5fbd8570c28fe7f8828ee370e82 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 13:10:44 +0200 Subject: [PATCH 27/32] remove logging, remove forced change to node scope --- web/frontend/src/generic/plots/MetricPlot.svelte | 6 +++--- web/frontend/src/job/Metric.svelte | 7 ------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/web/frontend/src/generic/plots/MetricPlot.svelte b/web/frontend/src/generic/plots/MetricPlot.svelte index 05ebc52..a73f993 100644 --- a/web/frontend/src/generic/plots/MetricPlot.svelte +++ b/web/frontend/src/generic/plots/MetricPlot.svelte @@ -399,7 +399,7 @@ init: [ (u) => { u.over.addEventListener("dblclick", (e) => { - console.log('Dispatch Reset') + // console.log('Dispatch Reset') dispatch('zoom', { lastZoomState: { x: { time: false }, @@ -455,7 +455,7 @@ if (key === 'x') { const numX = (u.series[0].idxs[1] - u.series[0].idxs[0]) if (numX <= 20 && timestep !== 60) { // Zoom IN if not at MAX - console.log('Dispatch Zoom') + // console.log('Dispatch Zoom') if (timestep == 600) { dispatch('zoom', { newRes: 240, @@ -468,7 +468,7 @@ }); } } else { - console.log('Dispatch Update') + // console.log('Dispatch Update') dispatch('zoom', { lastZoomState: u?.scales }); diff --git a/web/frontend/src/job/Metric.svelte b/web/frontend/src/job/Metric.svelte index ceacca5..cb41f2b 100644 --- a/web/frontend/src/job/Metric.svelte +++ b/web/frontend/src/job/Metric.svelte @@ -117,13 +117,6 @@ selectedScopes = [...scopes, "socket", "core", "accelerator"] } - if ((selectedResolution !== pendingResolution) && selectedScopes.length >= 2) { - selectedScope = String("node") - selectedScopes = ["node"] - // Instead of adding acc to load-all: always add by default if native is acc - // selectedScopes = nativeScope == "accelerator" ? ["node", "accelerator"] : ["node"] - } - selectedResolution = Number(pendingResolution) metricData = queryStore({ From 193bee5ac8f4718bc42de4efe63d68a7eb5a9842 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 3 Sep 2024 14:16:16 +0200 Subject: [PATCH 28/32] fix: prevent addition of existing scopes to table --- web/frontend/src/job/StatsTable.svelte | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/web/frontend/src/job/StatsTable.svelte b/web/frontend/src/job/StatsTable.svelte index e6a03e3..aa5aa5f 100644 --- a/web/frontend/src/job/StatsTable.svelte +++ b/web/frontend/src/job/StatsTable.svelte @@ -25,8 +25,8 @@ export let job; export let jobMetrics; - const allMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort(), - scopesForMetric = (metric) => + const allMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort() + const scopesForMetric = (metric) => jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope); let hosts = job.resources.map((r) => r.hostname).sort(), @@ -87,8 +87,12 @@ } export function moreLoaded(moreJobMetrics) { - jobMetrics = [...jobMetrics, ...moreJobMetrics] - } + moreJobMetrics.forEach(function (newMetric) { + if (!jobMetrics.some((m) => m.scope == newMetric.scope)) { + jobMetrics = [...jobMetrics, newMetric] + } + }); + };
From b04bf6a9517f3f7d2b326a2e13e4a798033bb8fb Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 5 Sep 2024 15:00:43 +0200 Subject: [PATCH 29/32] fix missing condition in migration --- internal/repository/migrations/sqlite3/08_add-footprint.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql index 4fb5e94..7f0d578 100644 --- a/internal/repository/migrations/sqlite3/08_add-footprint.up.sql +++ b/internal/repository/migrations/sqlite3/08_add-footprint.up.sql @@ -63,7 +63,7 @@ CREATE INDEX IF NOT EXISTS jobs_jobstate_cluster ON job (job_state, cluster); CREATE INDEX IF NOT EXISTS jobs_jobstate_starttime ON job (job_state, start_time); CREATE INDEX IF NOT EXISTS jobs_arrayjobid_starttime ON job (array_job_id, start_time); -CREATE INDEX jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); +CREATE INDEX IF NOT EXISTS jobs_cluster_arrayjobid_starttime ON job (cluster, array_job_id, start_time); CREATE INDEX IF NOT EXISTS jobs_starttime ON job (start_time); CREATE INDEX IF NOT EXISTS jobs_duration ON job (duration); From 7400273b0a42f536b9869340fcf21a0f5edb5a3f Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Thu, 5 Sep 2024 17:27:18 +0200 Subject: [PATCH 30/32] Manual merge changes not staged last time ... --- internal/archiver/archiver.go | 2 +- internal/metricDataDispatcher/dataLoader.go | 33 ++++++++++++++++--- .../taskManager/updateFootprintService.go | 2 +- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index de84cf0..1c4a3ec 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -34,7 +34,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { scopes = append(scopes, schema.MetricScopeAccelerator) } - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx) + jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, ctx, 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { log.Error("Error wile loading job data for archiving") return nil, err diff --git a/internal/metricDataDispatcher/dataLoader.go b/internal/metricDataDispatcher/dataLoader.go index 2c7cfa6..121fbf4 100644 --- a/internal/metricDataDispatcher/dataLoader.go +++ b/internal/metricDataDispatcher/dataLoader.go @@ -14,6 +14,7 @@ import ( "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" + "github.com/ClusterCockpit/cc-backend/pkg/resampler" "github.com/ClusterCockpit/cc-backend/pkg/schema" ) @@ -23,11 +24,12 @@ func cacheKey( job *schema.Job, metrics []string, scopes []schema.MetricScope, + resolution int, ) string { // Duration and StartTime do not need to be in the cache key as StartTime is less unique than // job.ID and the TTL of the cache entry makes sure it does not stay there forever. - return fmt.Sprintf("%d(%s):[%v],[%v]", - job.ID, job.State, metrics, scopes) + return fmt.Sprintf("%d(%s):[%v],[%v]-%d", + job.ID, job.State, metrics, scopes, resolution) } // Fetches the metric data for a job. @@ -35,8 +37,9 @@ func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, + resolution int, ) (schema.JobData, error) { - data := cache.Get(cacheKey(job, metrics, scopes), func() (_ interface{}, ttl time.Duration, size int) { + data := cache.Get(cacheKey(job, metrics, scopes, resolution), func() (_ interface{}, ttl time.Duration, size int) { var jd schema.JobData var err error @@ -60,7 +63,7 @@ func LoadData(job *schema.Job, } } - jd, err = repo.LoadData(job, metrics, scopes, ctx) + jd, err = repo.LoadData(job, metrics, scopes, ctx, resolution) if err != nil { if len(jd) != 0 { log.Warnf("partial error: %s", err.Error()) @@ -72,12 +75,31 @@ func LoadData(job *schema.Job, } size = jd.Size() } else { - jd, err = archive.GetHandle().LoadJobData(job) + var jd_temp schema.JobData + jd_temp, err = archive.GetHandle().LoadJobData(job) if err != nil { log.Error("Error while loading job data from archive") return err, 0, 0 } + //Deep copy the cached archive hashmap + jd = metricdata.DeepCopy(jd_temp) + + //Resampling for archived data. + //Pass the resolution from frontend here. + for _, v := range jd { + for _, v_ := range v { + timestep := 0 + for i := 0; i < len(v_.Series); i += 1 { + v_.Series[i].Data, timestep, err = resampler.LargestTriangleThreeBucket(v_.Series[i].Data, v_.Timestep, resolution) + if err != nil { + return err, 0, 0 + } + } + v_.Timestep = timestep + } + } + // Avoid sending unrequested data to the client: if metrics != nil || scopes != nil { if metrics == nil { @@ -117,6 +139,7 @@ func LoadData(job *schema.Job, } // FIXME: Review: Is this really necessary or correct. + // Note: Lines 142-170 formerly known as prepareJobData(jobData, scoeps) // For /monitoring/job/ and some other places, flops_any and mem_bw need // to be available at the scope 'node'. If a job has a lot of nodes, // statisticsSeries should be available so that a min/median/max Graph can be diff --git a/internal/taskManager/updateFootprintService.go b/internal/taskManager/updateFootprintService.go index 2434fd1..2de3bd4 100644 --- a/internal/taskManager/updateFootprintService.go +++ b/internal/taskManager/updateFootprintService.go @@ -47,7 +47,7 @@ func RegisterFootprintWorker() { scopes = append(scopes, schema.MetricScopeAccelerator) for _, job := range jobs { - jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background()) + jobData, err := metricDataDispatcher.LoadData(job, allMetrics, scopes, context.Background(), 0) // 0 Resulotion-Value retrieves highest res (60s) if err != nil { log.Error("Error wile loading job data for footprint update") continue From f1893c596e41aaf270f3438ac2e26b1d38238042 Mon Sep 17 00:00:00 2001 From: Aditya Ujeniya Date: Tue, 17 Sep 2024 14:36:42 +0200 Subject: [PATCH 31/32] Versioning to query endpoint --- internal/metricdata/cc-metric-store.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 1f3300e..f2853e3 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -140,6 +140,13 @@ func (ccms *CCMetricStore) doRequest( req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) } + // versioning the cc-metric-store query API. + // v2 = data with resampling + // v1 = data without resampling + q := req.URL.Query() + q.Add("version", "v2") + req.URL.RawQuery = q.Encode() + res, err := ccms.client.Do(req) if err != nil { log.Error("Error while performing request") @@ -198,12 +205,17 @@ func (ccms *CCMetricStore) LoadData( jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric) } + res := row[0].Resolution + if res == 0 { + res = mc.Timestep + } + jobMetric, ok := jobData[metric][scope] if !ok { jobMetric = &schema.JobMetric{ Unit: mc.Unit, - Timestep: row[0].Resolution, + Timestep: res, Series: make([]schema.Series, 0), } jobData[metric][scope] = jobMetric @@ -623,7 +635,7 @@ func (ccms *CCMetricStore) LoadNodeData( resBody, err := ccms.doRequest(ctx, &req) if err != nil { - log.Error("Error while performing request") + log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) return nil, err } From 21e4870e4cf173b6c0019064015a1da92a8caba3 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Tue, 24 Sep 2024 11:13:39 +0200 Subject: [PATCH 32/32] feat: add configurability to frontend plot zoom --- internal/graph/schema.resolvers.go | 19 ++-- internal/routerConfig/routes.go | 14 +-- pkg/schema/config.go | 10 ++ pkg/schema/schemas/config.schema.json | 21 ++++ web/frontend/src/config.entrypoint.js | 3 +- web/frontend/src/config/AdminSettings.svelte | 4 +- web/frontend/src/config/admin/Options.svelte | 46 ++++++--- .../src/generic/joblist/JobListRow.svelte | 9 +- .../src/generic/plots/MetricPlot.svelte | 99 +++++++++++-------- .../src/generic/select/TimeSelection.svelte | 1 + web/frontend/src/job.entrypoint.js | 3 +- web/frontend/src/job/Metric.svelte | 13 ++- web/frontend/src/jobs.entrypoint.js | 3 +- web/frontend/src/user.entrypoint.js | 3 +- web/templates/config.tmpl | 1 + web/templates/monitoring/job.tmpl | 1 + web/templates/monitoring/jobs.tmpl | 1 + web/templates/monitoring/user.tmpl | 1 + web/web.go | 1 + 19 files changed, 165 insertions(+), 88 deletions(-) diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index ff7d62c..56b71e1 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "strings" "time" @@ -225,8 +226,8 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) // JobMetrics is the resolver for the jobMetrics field. func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope, resolution *int) ([]*model.JobMetricWithName, error) { - defaultRes := 600 - if resolution == nil { + if resolution == nil && config.Keys.EnableResampling != nil { + defaultRes := slices.Max(config.Keys.EnableResampling.Resolutions) resolution = &defaultRes } @@ -445,11 +446,9 @@ func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } // SubCluster returns generated.SubClusterResolver implementation. func (r *Resolver) SubCluster() generated.SubClusterResolver { return &subClusterResolver{r} } -type ( - clusterResolver struct{ *Resolver } - jobResolver struct{ *Resolver } - metricValueResolver struct{ *Resolver } - mutationResolver struct{ *Resolver } - queryResolver struct{ *Resolver } - subClusterResolver struct{ *Resolver } -) +type clusterResolver struct{ *Resolver } +type jobResolver struct{ *Resolver } +type metricValueResolver struct{ *Resolver } +type mutationResolver struct{ *Resolver } +type queryResolver struct{ *Resolver } +type subClusterResolver struct{ *Resolver } diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 1dd6dee..e101fbd 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/util" @@ -272,12 +273,13 @@ func SetupRoutes(router *mux.Router, buildInfo web.Build) { availableRoles, _ := schema.GetValidRolesMap(user) page := web.Page{ - Title: title, - User: *user, - Roles: availableRoles, - Build: buildInfo, - Config: conf, - Infos: infos, + Title: title, + User: *user, + Roles: availableRoles, + Build: buildInfo, + Config: conf, + Resampling: config.Keys.EnableResampling, + Infos: infos, } if route.Filter { diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 28fa53a..e2cb28c 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -76,6 +76,13 @@ type Retention struct { IncludeDB bool `json:"includeDB"` } +type ResampleConfig struct { + // Trigger next zoom level at less than this many visible datapoints + Trigger int `json:"trigger"` + // Array of resampling target resolutions, in seconds; Example: [600,300,60] + Resolutions []int `json:"resolutions"` +} + // Format of the configuration (file). See below for the defaults. type ProgramConfig struct { // Address where the http (or https) server will listen on (for example: 'localhost:80'). @@ -133,6 +140,9 @@ type ProgramConfig struct { // be provided! Most options here can be overwritten by the user. UiDefaults map[string]interface{} `json:"ui-defaults"` + // If exists, will enable dynamic zoom in frontend metric plots using the configured values + EnableResampling *ResampleConfig `json:"enable-resampling"` + // Where to store MachineState files MachineStateDir string `json:"machine-state-dir"` diff --git a/pkg/schema/schemas/config.schema.json b/pkg/schema/schemas/config.schema.json index ee64b5a..cc6c553 100644 --- a/pkg/schema/schemas/config.schema.json +++ b/pkg/schema/schemas/config.schema.json @@ -424,6 +424,27 @@ "plot_general_colorscheme", "plot_list_selectedMetrics" ] + }, + "enable-resampling": { + "description": "Enable dynamic zoom in frontend metric plots.", + "type": "object", + "properties": { + "trigger": { + "description": "Trigger next zoom level at less than this many visible datapoints.", + "type": "integer" + }, + "resolutions": { + "description": "Array of resampling target resolutions, in seconds.", + "type": "array", + "items": { + "type": "integer" + } + } + }, + "required": [ + "trigger", + "resolutions" + ] } }, "required": [ diff --git a/web/frontend/src/config.entrypoint.js b/web/frontend/src/config.entrypoint.js index 2978c8c..345056b 100644 --- a/web/frontend/src/config.entrypoint.js +++ b/web/frontend/src/config.entrypoint.js @@ -9,6 +9,7 @@ new Config({ username: username }, context: new Map([ - ['cc-config', clusterCockpitConfig] + ['cc-config', clusterCockpitConfig], + ['resampling', resampleConfig] ]) }) diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index d959c3b..9d3abf2 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -51,7 +51,5 @@ - - - + diff --git a/web/frontend/src/config/admin/Options.svelte b/web/frontend/src/config/admin/Options.svelte index 2a4e11c..a1fe307 100644 --- a/web/frontend/src/config/admin/Options.svelte +++ b/web/frontend/src/config/admin/Options.svelte @@ -3,11 +3,13 @@ --> - - - Scramble Names / Presentation Mode - - Active? - - ++ + + Scramble Names / Presentation Mode + + Active? + + + + +{#if resampleConfig} + + + + Metric Plot Resampling +

Triggered at {resampleConfig.trigger} datapoints.

+

Configured resolutions: {resampleConfig.resolutions}

+
+
+ +{/if} diff --git a/web/frontend/src/generic/joblist/JobListRow.svelte b/web/frontend/src/generic/joblist/JobListRow.svelte index 5581903..b1e1511 100644 --- a/web/frontend/src/generic/joblist/JobListRow.svelte +++ b/web/frontend/src/generic/joblist/JobListRow.svelte @@ -26,13 +26,16 @@ export let showFootprint; export let triggerMetricRefresh = false; + const resampleConfig = getContext("resampling") || null; + const resampleDefault = resampleConfig ? Math.max(...resampleConfig.resolutions) : 0; + let { id } = job; let scopes = job.numNodes == 1 ? job.numAcc >= 1 ? ["core", "accelerator"] : ["core"] : ["node"]; - let selectedResolution = 600; + let selectedResolution = resampleDefault; let zoomStates = {}; const cluster = getContext("clusters").find((c) => c.name == job.cluster); @@ -69,7 +72,7 @@ `; function handleZoom(detail, metric) { - if ( + if ( // States have to differ, causes deathloop if just set (zoomStates[metric]?.x?.min !== detail?.lastZoomState?.x?.min) && (zoomStates[metric]?.y?.max !== detail?.lastZoomState?.y?.max) ) { @@ -187,7 +190,7 @@ isShared={job.exclusive != 1} numhwthreads={job.numHWThreads} numaccs={job.numAcc} - zoomState={zoomStates[metric.data.name]} + zoomState={zoomStates[metric.data.name] || null} /> {:else if metric.disabled == true && metric.data} {{end}} \ No newline at end of file diff --git a/web/templates/monitoring/job.tmpl b/web/templates/monitoring/job.tmpl index 9b344f9..648c4e5 100644 --- a/web/templates/monitoring/job.tmpl +++ b/web/templates/monitoring/job.tmpl @@ -13,6 +13,7 @@ const clusterCockpitConfig = {{ .Config }}; const authlevel = {{ .User.GetAuthLevel }}; const roles = {{ .Roles }}; + const resampleConfig = {{ .Resampling }}; {{end}} diff --git a/web/templates/monitoring/jobs.tmpl b/web/templates/monitoring/jobs.tmpl index 6ea05d5..4248471 100644 --- a/web/templates/monitoring/jobs.tmpl +++ b/web/templates/monitoring/jobs.tmpl @@ -12,6 +12,7 @@ const clusterCockpitConfig = {{ .Config }}; const authlevel = {{ .User.GetAuthLevel }}; const roles = {{ .Roles }}; + const resampleConfig = {{ .Resampling }}; {{end}} diff --git a/web/templates/monitoring/user.tmpl b/web/templates/monitoring/user.tmpl index 693ae61..8b4cf44 100644 --- a/web/templates/monitoring/user.tmpl +++ b/web/templates/monitoring/user.tmpl @@ -10,6 +10,7 @@ const userInfos = {{ .Infos }}; const filterPresets = {{ .FilterPresets }}; const clusterCockpitConfig = {{ .Config }}; + const resampleConfig = {{ .Resampling }}; {{end}} diff --git a/web/web.go b/web/web.go index 99008b5..45ca9e3 100644 --- a/web/web.go +++ b/web/web.go @@ -98,6 +98,7 @@ type Page struct { FilterPresets map[string]interface{} // For pages with the Filter component, this can be used to set initial filters. Infos map[string]interface{} // For generic use (e.g. username for /monitoring/user/, job id for /monitoring/job/) Config map[string]interface{} // UI settings for the currently logged in user (e.g. line width, ...) + Resampling *schema.ResampleConfig // If not nil, defines resampling trigger and resolutions } func RenderTemplate(rw http.ResponseWriter, file string, page *Page) {