split statsTable data from jobMetrics query, initial commit

- mainly backend changes
- statstable changes only for prototyping
This commit is contained in:
Christoph Kluge 2025-03-13 17:33:55 +01:00
parent d0af933b35
commit f5f36427a4
19 changed files with 1471 additions and 426 deletions

View File

@ -137,11 +137,6 @@ type JobMetricWithName {
metric: JobMetric!
}
type JobMetricStatWithName {
name: String!
stats: MetricStatistics!
}
type JobMetric {
unit: Unit
timestep: Int!
@ -156,6 +151,30 @@ type Series {
data: [NullableFloat!]!
}
type StatsSeries {
mean: [NullableFloat!]!
median: [NullableFloat!]!
min: [NullableFloat!]!
max: [NullableFloat!]!
}
type JobStatsWithScope {
name: String!
scope: MetricScope!
stats: [ScopedStats!]!
}
type ScopedStats {
hostname: String!
id: String
data: MetricStatistics!
}
type JobStats {
name: String!
stats: MetricStatistics!
}
type Unit {
base: String!
prefix: String
@ -167,13 +186,6 @@ type MetricStatistics {
max: Float!
}
type StatsSeries {
mean: [NullableFloat!]!
median: [NullableFloat!]!
min: [NullableFloat!]!
max: [NullableFloat!]!
}
type MetricFootprints {
metric: String!
data: [NullableFloat!]!
@ -247,7 +259,8 @@ type Query {
job(id: ID!): Job
jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]!
jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]!
jobStats(id: ID!, metrics: [String!]): [JobStats!]!
scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!

File diff suppressed because it is too large Load Diff

View File

@ -81,11 +81,6 @@ type JobLinkResultList struct {
Count *int `json:"count,omitempty"`
}
type JobMetricStatWithName struct {
Name string `json:"name"`
Stats *schema.MetricStatistics `json:"stats"`
}
type JobMetricWithName struct {
Name string `json:"name"`
Scope schema.MetricScope `json:"scope"`
@ -100,6 +95,17 @@ type JobResultList struct {
HasNextPage *bool `json:"hasNextPage,omitempty"`
}
type JobStats struct {
Name string `json:"name"`
Stats *schema.MetricStatistics `json:"stats"`
}
type JobStatsWithScope struct {
Name string `json:"name"`
Scope schema.MetricScope `json:"scope"`
Stats []*ScopedStats `json:"stats"`
}
type JobsStatistics struct {
ID string `json:"id"`
Name string `json:"name"`
@ -173,6 +179,12 @@ type PageRequest struct {
Page int `json:"page"`
}
type ScopedStats struct {
Hostname string `json:"hostname"`
ID *string `json:"id,omitempty"`
Data *schema.MetricStatistics `json:"data"`
}
type StringInput struct {
Eq *string `json:"eq,omitempty"`
Neq *string `json:"neq,omitempty"`

View File

@ -301,24 +301,23 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
return res, err
}
// JobMetricStats is the resolver for the jobMetricStats field.
func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) {
// JobMetricStats is the resolver for the jobStats field.
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
log.Warn("Error while querying job for metrics")
log.Warnf("Error while querying job %s for metrics", id)
return nil, err
}
data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx)
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil {
log.Warn("Error while loading job stat data")
log.Warnf("Error while loading job stat data for job id %s", id)
return nil, err
}
res := []*model.JobMetricStatWithName{}
res := []*model.JobStats{}
for name, md := range data {
res = append(res, &model.JobMetricStatWithName{
res = append(res, &model.JobStats{
Name: name,
Stats: &md,
})
@ -327,6 +326,47 @@ func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics [
return res, err
}
// JobStats is the resolver for the scopedJobStats field.
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) {
job, err := r.Query().Job(ctx, id)
if err != nil {
log.Warnf("Error while querying job %s for metrics", id)
return nil, err
}
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
if err != nil {
log.Warnf("Error while loading scoped job stat data for job id %s", id)
return nil, err
}
res := make([]*model.JobStatsWithScope, 0)
for name, scoped := range data {
for scope, stats := range scoped {
// log.Debugf("HANDLE >>>>> %s @ %s -> First Array Value %#v", name, scope, *stats[0])
mdlStats := make([]*model.ScopedStats, 0)
for _, stat := range stats {
// log.Debugf("CONVERT >>>>> >>>>> %s -> %v -> %#v", stat.Hostname, stat.Id, stat.Data)
mdlStats = append(mdlStats, &model.ScopedStats{
Hostname: stat.Hostname,
ID: stat.Id,
Data: stat.Data,
})
}
// log.Debugf("APPEND >>>>> >>>>> %#v", mdlStats)
res = append(res, &model.JobStatsWithScope{
Name: name,
Scope: scope,
Stats: mdlStats,
})
}
}
return res, nil
}
// JobsFootprints is the resolver for the jobsFootprints field.
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
// NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!

View File

@ -224,8 +224,34 @@ func LoadAverages(
return nil
}
// Used for polar plots in frontend
func LoadStatData(
// Used for statsTable in frontend: Return scoped statistics by metric.
func LoadScopedJobStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
}
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return nil, err
}
return scopedStats, nil
}
// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric.
func LoadJobStats(
job *schema.Job,
metrics []string,
ctx context.Context,
@ -237,12 +263,12 @@ func LoadStatData(
data := make(map[string]schema.MetricStatistics, len(metrics))
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
}
stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil {
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return data, err
}

View File

@ -618,7 +618,98 @@ func (ccms *CCMetricStore) LoadStats(
return stats, nil
}
// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known!
// Scoped Stats: Basically Load Data without resolution and data query flag?
func (ccms *CCMetricStore) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
if err != nil {
log.Warn("Error while building queries")
return nil, err
}
req := ApiQueryRequest{
Cluster: job.Cluster,
From: job.StartTime.Unix(),
To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
Queries: queries,
WithStats: true,
WithData: false,
}
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Error("Error while performing request")
return nil, err
}
var errors []string
scopedJobStats := make(schema.ScopedJobStats)
for i, row := range resBody.Results {
query := req.Queries[i]
metric := ccms.toLocalName(query.Metric)
scope := assignedScope[i]
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for ndx, res := range row {
if res.Error != nil {
/* Build list for "partial errors", if any */
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
continue
}
id := (*string)(nil)
if query.Type != nil {
id = new(string)
*id = query.TypeIds[ndx]
}
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
// "schema.Float()" because regular float64 can not be JSONed when NaN.
res.Avg = schema.Float(0)
res.Min = schema.Float(0)
res.Max = schema.Float(0)
}
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: query.Hostname,
Id: id,
Data: &schema.MetricStatistics{
Avg: float64(res.Avg),
Min: float64(res.Min),
Max: float64(res.Max),
},
})
}
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
if len(scopedJobStats[metric][scope]) == 0 {
delete(scopedJobStats[metric], scope)
if len(scopedJobStats[metric]) == 0 {
delete(scopedJobStats, metric)
}
}
}
if len(errors) != 0 {
/* Returns list for "partial errors" */
return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
}
return scopedJobStats, nil
}
// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! - Todo Outdated with NodeListData?
func (ccms *CCMetricStore) LoadNodeData(
cluster string,
metrics, nodes []string,

View File

@ -301,6 +301,18 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
return stats, nil
}
func (idb *InfluxDBv2DataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
// TODO : Implement to be used in JobView Stats Table
log.Infof("LoadScopedStats unimplemented for InfluxDBv2DataRepository, Args: Job-ID %d, metrics %v, scopes %v", job.JobID, metrics, scopes)
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
}
func (idb *InfluxDBv2DataRepository) LoadNodeData(
cluster string,
metrics, nodes []string,

View File

@ -24,9 +24,12 @@ type MetricDataRepository interface {
// Return the JobData for the given job, only with the requested metrics.
LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error)
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now.
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only.
LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error)
// Return a map of metrics to a map of scopes to the scoped metric statistics of the job.
LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error)
// Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node.
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)

View File

@ -448,6 +448,18 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
return data, nil
}
func (pdb *PrometheusDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
// TODO : Implement to be used in Job-View StatsTable
log.Infof("LoadScopedStats unimplemented for PrometheusDataRepository, Args: job-id %v, metrics %v, scopes %v", job.JobID, metrics, scopes)
return nil, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository")
}
func (pdb *PrometheusDataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string,
metrics []string,
@ -463,5 +475,5 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
// TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/PROMETHEUS > unimplemented for PrometheusDataRepository")
}

View File

@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData(
func (tmdr *TestMetricDataRepository) LoadStats(
job *schema.Job,
metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
metrics []string,
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
panic("TODO")
}
func (tmdr *TestMetricDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
panic("TODO")
}

View File

@ -27,6 +27,8 @@ type ArchiveBackend interface {
LoadJobData(job *schema.Job) (schema.JobData, error)
LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error)
LoadClusterCfg(name string) (*schema.Cluster, error)
StoreJobMeta(jobMeta *schema.JobMeta) error
@ -125,7 +127,7 @@ func LoadAveragesFromArchive(
return nil
}
// Helper to metricdataloader.LoadStatData().
// Helper to metricdataloader.LoadJobStats().
func LoadStatsFromArchive(
job *schema.Job,
metrics []string,
@ -154,6 +156,22 @@ func LoadStatsFromArchive(
return data, nil
}
// Helper to metricdataloader.LoadScopedJobStats().
func LoadScopedStatsFromArchive(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
) (schema.ScopedJobStats, error) {
data, err := ar.LoadJobStats(job)
if err != nil {
log.Warn("Error while loading job metadata from archiveBackend")
return nil, err
}
return data, nil
}
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job)
if err != nil {

View File

@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
}
}
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobStats()- %v", err)
return nil, err
}
defer f.Close()
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
log.Errorf(" %v", err)
return nil, err
}
defer r.Close()
if config.Keys.Validate {
if err := schema.Validate(schema.Data, r); err != nil {
return nil, fmt.Errorf("validate job data: %v", err)
}
}
return DecodeJobStats(r, filename)
} else {
if config.Keys.Validate {
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
return nil, fmt.Errorf("validate job data: %v", err)
}
}
return DecodeJobStats(bufio.NewReader(f), filename)
}
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
var config FsArchiveConfig
@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
return loadJobData(filename, isCompressed)
}
func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) {
var isCompressed bool = true
filename := getPath(job, fsa.path, "data.json.gz")
if !util.CheckFileExists(filename) {
filename = getPath(job, fsa.path, "data.json")
isCompressed = false
}
return loadJobStats(filename, isCompressed)
}
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename)

View File

@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
return data.(schema.JobData), nil
}
func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
jobData, err := DecodeJobData(r, k)
// Convert schema.JobData to schema.ScopedJobStats
if jobData != nil {
scopedJobStats := make(schema.ScopedJobStats)
for metric, metricData := range jobData {
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
for scope, jobMetric := range metricData {
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for _, series := range jobMetric.Series {
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: series.Hostname,
Id: series.Id,
Data: &series.Statistics,
})
}
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
if len(scopedJobStats[metric][scope]) == 0 {
delete(scopedJobStats[metric], scope)
if len(scopedJobStats[metric]) == 0 {
delete(scopedJobStats, metric)
}
}
}
}
return scopedJobStats, nil
}
return nil, err
}
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
var d schema.JobMeta
if err := json.NewDecoder(r).Decode(&d); err != nil {

View File

@ -15,6 +15,7 @@ import (
)
type JobData map[string]map[MetricScope]*JobMetric
type ScopedJobStats map[string]map[MetricScope][]*ScopedStats
type JobMetric struct {
StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"`
@ -30,6 +31,12 @@ type Series struct {
Statistics MetricStatistics `json:"statistics"`
}
type ScopedStats struct {
Hostname string `json:"hostname"`
Id *string `json:"id,omitempty"`
Data *MetricStatistics `json:"data"`
}
type MetricStatistics struct {
Avg float64 `json:"avg"`
Min float64 `json:"min"`

View File

@ -127,28 +127,17 @@
let job = $initq.data.job;
if (!job) return;
const pendingMetrics = [
...(
(
ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
ccconfig[`job_view_selectedMetrics:${job.cluster}`]
) ||
$initq.data.globalMetrics
.reduce((names, gm) => {
if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) {
names.push(gm.name);
}
return names;
}, [])
),
...(
(
ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`]
) ||
ccconfig[`job_view_nodestats_selectedMetrics`]
),
];
const pendingMetrics = (
ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
ccconfig[`job_view_selectedMetrics:${job.cluster}`]
) ||
$initq.data.globalMetrics
.reduce((names, gm) => {
if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) {
names.push(gm.name);
}
return names;
}, [])
// Select default Scopes to load: Check before if any metric has accelerator scope by default
const accScopeDefault = [...pendingMetrics].some(function (m) {
@ -343,7 +332,6 @@
{#if item.data}
<Metric
bind:this={plots[item.metric]}
on:more-loaded={({ detail }) => statsTable.moreLoaded(detail)}
job={$initq.data.job}
metricName={item.metric}
metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit}
@ -404,15 +392,7 @@
class="overflow-x-auto"
active={!somethingMissing}
>
{#if $jobMetrics?.data?.jobMetrics}
{#key $jobMetrics.data.jobMetrics}
<StatsTable
bind:this={statsTable}
job={$initq.data.job}
jobMetrics={$jobMetrics.data.jobMetrics}
/>
{/key}
{/if}
<StatsTable job={$initq.data.job}/>
</TabPane>
<TabPane tabId="job-script" tab="Job Script">
<div class="pre-wrapper">

View File

@ -150,11 +150,6 @@
// On additional scope request
if (selectedScope == "load-all") {
// Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition)
const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node")
if (statsTableData.length > 0) {
dispatch("more-loaded", statsTableData);
}
// Set selected scope to min of returned scopes
selectedScope = minScope(scopes)
nodeOnly = (selectedScope == "node") // "node" still only scope after load-all

View File

@ -3,13 +3,14 @@
Properties:
- `job Object`: The job object
- `jobMetrics [Object]`: The jobs metricdata
Exported:
- `moreLoaded`: Adds additional scopes requested from Metric.svelte in Job-View
-->
<script>
import {
queryStore,
gql,
getContextClient
} from "@urql/svelte";
import { getContext } from "svelte";
import {
Button,
@ -26,11 +27,6 @@
import MetricSelection from "../generic/select/MetricSelection.svelte";
export let job;
export let jobMetrics;
const sortedJobMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort()
const scopesForMetric = (metric) =>
jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope);
let hosts = job.resources.map((r) => r.hostname).sort(),
selectedScopes = {},
@ -42,29 +38,63 @@
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`]
) || getContext("cc-config")["job_view_nodestats_selectedMetrics"];
for (let metric of sortedJobMetrics) {
// Not Exclusive or Multi-Node: get maxScope directly (mostly: node)
// -> Else: Load smallest available granularity as default as per availability
const availableScopes = scopesForMetric(metric);
if (job.exclusive != 1 || job.numNodes == 1) {
if (availableScopes.includes("accelerator")) {
selectedScopes[metric] = "accelerator";
} else if (availableScopes.includes("core")) {
selectedScopes[metric] = "core";
} else if (availableScopes.includes("socket")) {
selectedScopes[metric] = "socket";
} else {
selectedScopes[metric] = "node";
const client = getContextClient();
const query = gql`
query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!) {
scopedJobStats(id: $dbid, metrics: $selectedMetrics, scopes: $selectedScopes) {
name
scope
stats {
hostname
id
data {
min
avg
max
}
}
}
} else {
selectedScopes[metric] = maxScope(availableScopes);
}
`;
sorting[metric] = {
min: { dir: "up", active: false },
avg: { dir: "up", active: false },
max: { dir: "up", active: false },
};
$: scopedStats = queryStore({
client: client,
query: query,
variables: { dbid: job.id, selectedMetrics, selectedScopes: ["node"] },
});
$: console.log(">>>> RESULT:", $scopedStats?.data?.scopedJobStats)
$: jobMetrics = $scopedStats?.data?.scopedJobStats || [];
const scopesForMetric = (metric) =>
jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope);
$: if ($scopedStats?.data) {
for (let metric of selectedMetrics) {
// Not Exclusive or Multi-Node: get maxScope directly (mostly: node)
// -> Else: Load smallest available granularity as default as per availability
const availableScopes = scopesForMetric(metric);
if (job.exclusive != 1 || job.numNodes == 1) {
if (availableScopes.includes("accelerator")) {
selectedScopes[metric] = "accelerator";
} else if (availableScopes.includes("core")) {
selectedScopes[metric] = "core";
} else if (availableScopes.includes("socket")) {
selectedScopes[metric] = "socket";
} else {
selectedScopes[metric] = "node";
}
} else {
selectedScopes[metric] = maxScope(availableScopes);
}
sorting[metric] = {
min: { dir: "up", active: false },
avg: { dir: "up", active: false },
max: { dir: "up", active: false },
};
}
}
function sortBy(metric, stat) {
@ -90,13 +120,6 @@
});
}
export function moreLoaded(moreJobMetrics) {
moreJobMetrics.forEach(function (newMetric) {
if (!jobMetrics.some((m) => m.scope == newMetric.scope)) {
jobMetrics = [...jobMetrics, newMetric]
}
});
};
</script>
<Row>

View File

@ -37,8 +37,8 @@
return s.dir != "up" ? a[field] - b[field] : b[field] - a[field];
} else {
return s.dir != "up"
? a.statistics[field] - b.statistics[field]
: b.statistics[field] - a.statistics[field];
? a.data[field] - b.data[field]
: b.data[field] - a.data[field];
}
});
}
@ -52,7 +52,7 @@
$: series = jobMetrics
.find((jm) => jm.name == metric && jm.scope == scope)
?.metric.series.filter((s) => s.hostname == host && s.statistics != null)
?.stats.filter((s) => s.hostname == host && s.data != null)
?.sort(compareNumbers);
</script>
@ -60,13 +60,13 @@
<td colspan={scope == "node" ? 3 : 4}><i>No data</i></td>
{:else if series.length == 1 && scope == "node"}
<td>
{series[0].statistics.min}
{series[0].data.min}
</td>
<td>
{series[0].statistics.avg}
{series[0].data.avg}
</td>
<td>
{series[0].statistics.max}
{series[0].data.max}
</td>
{:else}
<td colspan="4">
@ -86,9 +86,9 @@
{#each series as s, i}
<tr>
<th>{s.id ?? i}</th>
<td>{s.statistics.min}</td>
<td>{s.statistics.avg}</td>
<td>{s.statistics.max}</td>
<td>{s.data.min}</td>
<td>{s.data.avg}</td>
<td>{s.data.max}</td>
</tr>
{/each}
</table>

View File

@ -40,14 +40,14 @@
const client = getContextClient();
const polarQuery = gql`
query ($dbid: ID!, $selectedMetrics: [String!]!) {
jobMetricStats(id: $dbid, metrics: $selectedMetrics) {
jobStats(id: $dbid, metrics: $selectedMetrics) {
name
stats {
min
avg
max
}
min
avg
max
}
}
}
`;
@ -66,7 +66,7 @@
{:else}
<Polar
{polarMetrics}
polarData={$polarData.data.jobMetricStats}
polarData={$polarData.data.jobStats}
/>
{/if}
</CardBody>