diff --git a/api/schema.graphqls b/api/schema.graphqls index eb3e270..303ede5 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -167,7 +167,7 @@ type TimeWeights { } enum Aggregate { USER, PROJECT, CLUSTER } -enum SortByAggregate { WALLTIME, NODEHOURS, COREHOURS, ACCHOURS } +enum SortByAggregate { WALLTIME, TOTALNODES, NODEHOURS, TOTALCORES, COREHOURS, TOTALACCS, ACCHOURS } type NodeMetrics { host: String! @@ -293,8 +293,11 @@ type JobsStatistics { runningJobs: Int! # Number of running jobs shortJobs: Int! # Number of jobs with a duration of less than duration totalWalltime: Int! # Sum of the duration of all matched jobs in hours + totalNodes: Int! # Sum of the nodes of all matched jobs totalNodeHours: Int! # Sum of the node hours of all matched jobs + totalCores: Int! # Sum of the cores of all matched jobs totalCoreHours: Int! # Sum of the core hours of all matched jobs + totalAccs: Int! # Sum of the accs of all matched jobs totalAccHours: Int! # Sum of the gpu hours of all matched jobs histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index 355b25a..f06698c 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -147,9 +147,12 @@ type ComplexityRoot struct { RunningJobs func(childComplexity int) int ShortJobs func(childComplexity int) int TotalAccHours func(childComplexity int) int + TotalAccs func(childComplexity int) int TotalCoreHours func(childComplexity int) int + TotalCores func(childComplexity int) int TotalJobs func(childComplexity int) int TotalNodeHours func(childComplexity int) int + TotalNodes func(childComplexity int) int TotalWalltime func(childComplexity int) int } @@ -767,6 +770,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalAccHours(childComplexity), true + case "JobsStatistics.totalAccs": + if e.complexity.JobsStatistics.TotalAccs == nil { + break + } + + return e.complexity.JobsStatistics.TotalAccs(childComplexity), true + case "JobsStatistics.totalCoreHours": if e.complexity.JobsStatistics.TotalCoreHours == nil { break @@ -774,6 +784,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalCoreHours(childComplexity), true + case "JobsStatistics.totalCores": + if e.complexity.JobsStatistics.TotalCores == nil { + break + } + + return e.complexity.JobsStatistics.TotalCores(childComplexity), true + case "JobsStatistics.totalJobs": if e.complexity.JobsStatistics.TotalJobs == nil { break @@ -788,6 +805,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalNodeHours(childComplexity), true + case "JobsStatistics.totalNodes": + if e.complexity.JobsStatistics.TotalNodes == nil { + break + } + + return e.complexity.JobsStatistics.TotalNodes(childComplexity), true + case "JobsStatistics.totalWalltime": if e.complexity.JobsStatistics.TotalWalltime == nil { break @@ -1727,7 +1751,7 @@ type TimeWeights { } enum Aggregate { USER, PROJECT, CLUSTER } -enum SortByAggregate { WALLTIME, NODEHOURS, COREHOURS, ACCHOURS } +enum SortByAggregate { WALLTIME, TOTALNODES, NODEHOURS, TOTALCORES, COREHOURS, TOTALACCS, ACCHOURS } type NodeMetrics { host: String! @@ -1853,8 +1877,11 @@ type JobsStatistics { runningJobs: Int! # Number of running jobs shortJobs: Int! # Number of jobs with a duration of less than duration totalWalltime: Int! # Sum of the duration of all matched jobs in hours + totalNodes: Int! # Sum of the nodes of all matched jobs totalNodeHours: Int! # Sum of the node hours of all matched jobs + totalCores: Int! # Sum of the cores of all matched jobs totalCoreHours: Int! # Sum of the core hours of all matched jobs + totalAccs: Int! # Sum of the accs of all matched jobs totalAccHours: Int! # Sum of the gpu hours of all matched jobs histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes @@ -5131,6 +5158,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalWalltime(ctx contex return fc, nil } +func (ec *executionContext) _JobsStatistics_totalNodes(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalNodes(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalNodes, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalNodes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalNodeHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalNodeHours(ctx, field) if err != nil { @@ -5175,6 +5246,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalNodeHours(ctx conte return fc, nil } +func (ec *executionContext) _JobsStatistics_totalCores(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalCores(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalCores, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalCores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalCoreHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalCoreHours(ctx, field) if err != nil { @@ -5219,6 +5334,50 @@ func (ec *executionContext) fieldContext_JobsStatistics_totalCoreHours(ctx conte return fc, nil } +func (ec *executionContext) _JobsStatistics_totalAccs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_totalAccs(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.TotalAccs, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_totalAccs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_totalAccHours(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_totalAccHours(ctx, field) if err != nil { @@ -7134,10 +7293,16 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return ec.fieldContext_JobsStatistics_shortJobs(ctx, field) case "totalWalltime": return ec.fieldContext_JobsStatistics_totalWalltime(ctx, field) + case "totalNodes": + return ec.fieldContext_JobsStatistics_totalNodes(ctx, field) case "totalNodeHours": return ec.fieldContext_JobsStatistics_totalNodeHours(ctx, field) + case "totalCores": + return ec.fieldContext_JobsStatistics_totalCores(ctx, field) case "totalCoreHours": return ec.fieldContext_JobsStatistics_totalCoreHours(ctx, field) + case "totalAccs": + return ec.fieldContext_JobsStatistics_totalAccs(ctx, field) case "totalAccHours": return ec.fieldContext_JobsStatistics_totalAccHours(ctx, field) case "histDuration": @@ -12573,16 +12738,31 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalNodes": + out.Values[i] = ec._JobsStatistics_totalNodes(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalNodeHours": out.Values[i] = ec._JobsStatistics_totalNodeHours(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalCores": + out.Values[i] = ec._JobsStatistics_totalCores(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalCoreHours": out.Values[i] = ec._JobsStatistics_totalCoreHours(ctx, field, obj) if out.Values[i] == graphql.Null { out.Invalids++ } + case "totalAccs": + out.Values[i] = ec._JobsStatistics_totalAccs(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } case "totalAccHours": out.Values[i] = ec._JobsStatistics_totalAccHours(ctx, field, obj) if out.Values[i] == graphql.Null { diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 609e6a4..3997b2d 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -91,8 +91,11 @@ type JobsStatistics struct { RunningJobs int `json:"runningJobs"` ShortJobs int `json:"shortJobs"` TotalWalltime int `json:"totalWalltime"` + TotalNodes int `json:"totalNodes"` TotalNodeHours int `json:"totalNodeHours"` + TotalCores int `json:"totalCores"` TotalCoreHours int `json:"totalCoreHours"` + TotalAccs int `json:"totalAccs"` TotalAccHours int `json:"totalAccHours"` HistDuration []*HistoPoint `json:"histDuration"` HistNumNodes []*HistoPoint `json:"histNumNodes"` @@ -191,22 +194,28 @@ func (e Aggregate) MarshalGQL(w io.Writer) { type SortByAggregate string const ( - SortByAggregateWalltime SortByAggregate = "WALLTIME" - SortByAggregateNodehours SortByAggregate = "NODEHOURS" - SortByAggregateCorehours SortByAggregate = "COREHOURS" - SortByAggregateAcchours SortByAggregate = "ACCHOURS" + SortByAggregateWalltime SortByAggregate = "WALLTIME" + SortByAggregateTotalnodes SortByAggregate = "TOTALNODES" + SortByAggregateNodehours SortByAggregate = "NODEHOURS" + SortByAggregateTotalcores SortByAggregate = "TOTALCORES" + SortByAggregateCorehours SortByAggregate = "COREHOURS" + SortByAggregateTotalaccs SortByAggregate = "TOTALACCS" + SortByAggregateAcchours SortByAggregate = "ACCHOURS" ) var AllSortByAggregate = []SortByAggregate{ SortByAggregateWalltime, + SortByAggregateTotalnodes, SortByAggregateNodehours, + SortByAggregateTotalcores, SortByAggregateCorehours, + SortByAggregateTotalaccs, SortByAggregateAcchours, } func (e SortByAggregate) IsValid() bool { switch e { - case SortByAggregateWalltime, SortByAggregateNodehours, SortByAggregateCorehours, SortByAggregateAcchours: + case SortByAggregateWalltime, SortByAggregateTotalnodes, SortByAggregateNodehours, SortByAggregateTotalcores, SortByAggregateCorehours, SortByAggregateTotalaccs, SortByAggregateAcchours: return true } return false diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 18d495b..905d74b 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -24,10 +24,13 @@ var groupBy2column = map[model.Aggregate]string{ } var sortBy2column = map[model.SortByAggregate]string{ - model.SortByAggregateWalltime: "totalWalltime", - model.SortByAggregateNodehours: "totalNodeHours", - model.SortByAggregateCorehours: "totalCoreHours", - model.SortByAggregateAcchours: "totalAccHours", + model.SortByAggregateWalltime: "totalWalltime", + model.SortByAggregateTotalnodes: "totalNodes", + model.SortByAggregateNodehours: "totalNodeHours", + model.SortByAggregateTotalcores: "totalCores", + model.SortByAggregateCorehours: "totalCoreHours", + model.SortByAggregateTotalaccs: "totalAccs", + model.SortByAggregateAcchours: "totalAccHours", } func (r *JobRepository) buildCountQuery( @@ -67,20 +70,26 @@ func (r *JobRepository) buildStatsQuery( castType := r.getCastType() if col != "" { - // Scan columns: id, totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours + // Scan columns: id, totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select(col, "COUNT(job.id)", fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s) as totalWalltime", castType), + fmt.Sprintf("CAST(SUM(job.num_nodes) as %s) as totalNodes", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s) as totalNodeHours", castType), + fmt.Sprintf("CAST(SUM(job.num_hwthreads) as %s) as totalCores", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s) as totalCoreHours", castType), + fmt.Sprintf("CAST(SUM(job.num_acc) as %s) as totalAccs", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s) as totalAccHours", castType), ).From("job").GroupBy(col) } else { - // Scan columns: totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours + // Scan columns: totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours query = sq.Select("COUNT(job.id)", fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_nodes) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_hwthreads) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType), + fmt.Sprintf("CAST(SUM(job.num_acc) as %s)", castType), fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType), ).From("job") } @@ -152,14 +161,21 @@ func (r *JobRepository) JobsStatsGrouped( for rows.Next() { var id sql.NullString - var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64 - if err := rows.Scan(&id, &jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil { + var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64 + if err := rows.Scan(&id, &jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil { log.Warn("Error while scanning rows") return nil, err } if id.Valid { - var totalCoreHours, totalAccHours int + var totalCores, totalCoreHours, totalAccs, totalAccHours int + + if cores.Valid { + totalCores = int(cores.Int64) + } + if accs.Valid { + totalAccs = int(accs.Int64) + } if coreHours.Valid { totalCoreHours = int(coreHours.Int64) @@ -176,7 +192,9 @@ func (r *JobRepository) JobsStatsGrouped( Name: name, TotalJobs: int(jobs.Int64), TotalWalltime: int(walltime.Int64), + TotalCores: totalCores, TotalCoreHours: totalCoreHours, + TotalAccs: totalAccs, TotalAccHours: totalAccHours}) } else { stats = append(stats, @@ -184,7 +202,9 @@ func (r *JobRepository) JobsStatsGrouped( ID: id.String, TotalJobs: int(jobs.Int64), TotalWalltime: int(walltime.Int64), + TotalCores: totalCores, TotalCoreHours: totalCoreHours, + TotalAccs: totalAccs, TotalAccHours: totalAccHours}) } } diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index f60b2b9..4a87600 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -66,13 +66,26 @@ histDuration { count, value } histNumNodes { count, value } } - - topUsers: jobsCount(filter: $jobFilters, groupBy: USER, weight: CORE_HOURS, limit: 5) { name, count } } `, variables: { jobFilters } }) + const paging = { itemsPerPage: 5, page: 1 }; // Top 5 + // const sorting = { field: "totalCoreHours", order: "DESC" }; + $: topQuery = queryStore({ + client: client, + query: gql` + query($jobFilters: [JobFilter!]!, $paging: PageRequest!) { + jobsStatistics(filter: $jobFilters, page: $paging, sortBy: TOTALCOREHOURS, groupBy: USER) { + id + totalCoreHours + } + } + `, + variables: { jobFilters, paging } + }) + $: footprintsQuery = queryStore({ client: client, query: gql` @@ -164,8 +177,8 @@ b.count - a.count).map((tu) => tu.count)} - entities={$statsQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)} + quantities={$topQuery.data.jobsStatistics.map((tu) => tu.totalCoreHours)} + entities={$topQuery.data.jobsStatistics.map((tu) => tu.id)} /> {/key} @@ -173,11 +186,11 @@ - {#each $statsQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i} + {#each $topQuery.data.jobsStatistics as { id, totalCoreHours }, i} - - + + {/each}
LegendUser NameCore Hours
{name}{count}{id}{totalCoreHours}
diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index 244862c..06a1893 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -38,9 +38,7 @@ histNumNodes { count, value } } - allocatedNodes(cluster: $cluster) { name, count } - topUsers: jobsCount(filter: $filter, groupBy: USER, weight: NODE_COUNT, limit: 10) { name, count } - topProjects: jobsCount(filter: $filter, groupBy: PROJECT, weight: NODE_COUNT, limit: 10) { name, count } + allocatedNodes(cluster: $cluster) { name, count } }`, variables: { cluster: cluster, metrics: ['flops_any', 'mem_bw'], from: from.toISOString(), to: to.toISOString(), @@ -48,6 +46,36 @@ } }) + const paging = { itemsPerPage: 10, page: 1 }; // Top 10 + // const sorting = { field: "totalCores", order: "DESC" }; + $: topUserQuery = queryStore({ + client: client, + query: gql` + query($filter: [JobFilter!]!, $paging: PageRequest!) { + topUser: jobsStatistics(filter: $filter, page: $paging, sortBy: TOTALCORES, groupBy: USER) { + id + totalNodes + totalCores + } + } + `, + variables: { filter: [{ state: ['running'] }, { cluster: { eq: cluster } }], paging } + }) + + $: topProjectQuery = queryStore({ + client: client, + query: gql` + query($filter: [JobFilter!]!, $paging: PageRequest!) { + topProjects: jobsStatistics(filter: $filter, page: $paging, sortBy: TOTALCORES, groupBy: PROJECT) { + id + totalNodes + totalCores + } + } + `, + variables: { filter: [{ state: ['running'] }, { cluster: { eq: cluster } }], paging } + }) + const sumUp = (data, subcluster, metric) => data.reduce((sum, node) => node.subCluster == subcluster ? sum + (node.metrics.find(m => m.name == metric)?.metric.series.reduce((sum, series) => sum + series.data[series.data.length - 1], 0) || 0) : sum, 0) @@ -161,48 +189,47 @@

Top Users

- {#key $mainQuery.data} + {#key $topUserQuery.data} b.count - a.count).map((tu) => tu.count)} - entities={$mainQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)} - + quantities={$topUserQuery.data.topUser.map((tu) => tu.totalCores)} + entities={$topUserQuery.data.topUser.map((tu) => tu.id)} /> {/key}
- - {#each $mainQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i} + + {#each $topUserQuery.data.topUser as { id, totalCores, totalNodes }, i} - - + + {/each}
LegendUser NameNumber of Nodes
LegendUser NameNumber of Cores
{name}{count}{id}{totalCores}

Top Projects

- {#key $mainQuery.data} + {#key $topProjectQuery.data} b.count - a.count).map((tp) => tp.count)} - entities={$mainQuery.data.topProjects.sort((a, b) => b.count - a.count).map((tp) => tp.name)} + quantities={$topProjectQuery.data.topProjects.map((tp) => tp.totalCores)} + entities={$topProjectQuery.data.topProjects.map((tp) => tp.id)} /> {/key} - - {#each $mainQuery.data.topProjects.sort((a, b) => b.count - a.count) as { name, count }, i} + + {#each $topProjectQuery.data.topProjects as { id, totalCores, totalNodes }, i} - - + + {/each}
LegendProject CodeNumber of Nodes
LegendProject CodeNumber of Cores
{name}{count}{id}{totalCores}