Merge pull request #510 from ClusterCockpit/dev

Dev
This commit is contained in:
Jan Eitzinger
2026-03-04 17:53:52 +01:00
committed by GitHub
22 changed files with 948 additions and 1034 deletions

View File

@@ -1,3 +1,4 @@
version: 2
before: before:
hooks: hooks:
- go mod tidy - go mod tidy
@@ -34,6 +35,19 @@ builds:
main: ./tools/archive-manager main: ./tools/archive-manager
tags: tags:
- static_build - static_build
- env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
goamd64:
- v3
id: "archive-migration"
binary: archive-migration
main: ./tools/archive-migration
tags:
- static_build
- env: - env:
- CGO_ENABLED=0 - CGO_ENABLED=0
goos: goos:
@@ -48,7 +62,7 @@ builds:
tags: tags:
- static_build - static_build
archives: archives:
- format: tar.gz - formats: tar.gz
# this name template makes the OS and Arch compatible with the results of uname. # this name template makes the OS and Arch compatible with the results of uname.
name_template: >- name_template: >-
{{ .ProjectName }}_ {{ .ProjectName }}_
@@ -59,7 +73,7 @@ archives:
checksum: checksum:
name_template: "checksums.txt" name_template: "checksums.txt"
snapshot: snapshot:
name_template: "{{ incpatch .Version }}-next" version_template: "{{ incpatch .Version }}-next"
changelog: changelog:
sort: asc sort: asc
filters: filters:
@@ -87,7 +101,7 @@ changelog:
release: release:
draft: false draft: false
footer: | footer: |
Supports job archive version 2 and database version 8. Supports job archive version 3 and database version 10.
Please check out the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for further details on breaking changes. Please check out the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for further details on breaking changes.
# vim: set ts=2 sw=2 tw=0 fo=cnqoj # vim: set ts=2 sw=2 tw=0 fo=cnqoj

View File

@@ -16,6 +16,6 @@
"expr": "mem_used.max / mem_used.limits.peak * 100.0" "expr": "mem_used.max / mem_used.limits.peak * 100.0"
} }
], ],
"rule": "mem_used.max > memory_used.limits.alert", "rule": "mem_used.max > mem_used.limits.alert",
"hint": "This job used high memory: peak memory usage {{.mem_used.max}} GB ({{.memory_usage_pct}}% of {{.mem_used.limits.peak}} GB node capacity), exceeding the {{.highmemoryusage_threshold_factor}} utilization threshold. Risk of out-of-memory conditions." "hint": "This job used high memory: peak memory usage {{.mem_used.max}} GB ({{.memory_usage_pct}}% of {{.mem_used.limits.peak}} GB node capacity), exceeding the {{.highmemoryusage_threshold_factor}} utilization threshold. Risk of out-of-memory conditions."
} }

3
go.mod
View File

@@ -9,7 +9,7 @@ tool (
require ( require (
github.com/99designs/gqlgen v0.17.87 github.com/99designs/gqlgen v0.17.87
github.com/ClusterCockpit/cc-lib/v2 v2.7.0 github.com/ClusterCockpit/cc-lib/v2 v2.8.0
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0 github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0
github.com/Masterminds/squirrel v1.5.4 github.com/Masterminds/squirrel v1.5.4
github.com/aws/aws-sdk-go-v2 v1.41.2 github.com/aws/aws-sdk-go-v2 v1.41.2
@@ -111,7 +111,6 @@ require (
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa // indirect
golang.org/x/mod v0.33.0 // indirect golang.org/x/mod v0.33.0 // indirect
golang.org/x/net v0.51.0 // indirect golang.org/x/net v0.51.0 // indirect
golang.org/x/sync v0.19.0 // indirect golang.org/x/sync v0.19.0 // indirect

6
go.sum
View File

@@ -4,8 +4,8 @@ github.com/99designs/gqlgen v0.17.87 h1:pSnCIMhBQezAE8bc1GNmfdLXFmnWtWl1GRDFEE/n
github.com/99designs/gqlgen v0.17.87/go.mod h1:fK05f1RqSNfQpd4CfW5qk/810Tqi4/56Wf6Nem0khAg= github.com/99designs/gqlgen v0.17.87/go.mod h1:fK05f1RqSNfQpd4CfW5qk/810Tqi4/56Wf6Nem0khAg=
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A= github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk= github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
github.com/ClusterCockpit/cc-lib/v2 v2.7.0 h1:EMTShk6rMTR1wlfmQ8SVCawH1OdltUbD3kVQmaW+5pE= github.com/ClusterCockpit/cc-lib/v2 v2.8.0 h1:ROduRzRuusi+6kLB991AAu3Pp2AHOasQJFJc7JU/n/E=
github.com/ClusterCockpit/cc-lib/v2 v2.7.0/go.mod h1:0Etx8WMs0lYZ4tiOQizY18CQop+2i3WROvU9rMUxHA4= github.com/ClusterCockpit/cc-lib/v2 v2.8.0/go.mod h1:FwD8vnTIbBM3ngeLNKmCvp9FoSjQZm7xnuaVxEKR23o=
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0 h1:hIzxgTBWcmCIHtoDKDkSCsKCOCOwUC34sFsbD2wcW0Q= github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0 h1:hIzxgTBWcmCIHtoDKDkSCsKCOCOwUC34sFsbD2wcW0Q=
github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0/go.mod h1:y42qUu+YFmu5fdNuUAS4VbbIKxVjxCvbVqFdpdh8ahY= github.com/ClusterCockpit/cc-line-protocol/v2 v2.4.0/go.mod h1:y42qUu+YFmu5fdNuUAS4VbbIKxVjxCvbVqFdpdh8ahY=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
@@ -307,8 +307,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0=
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=

View File

@@ -37,23 +37,13 @@ package metricstoreclient
import ( import (
"fmt" "fmt"
"strconv"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/metricstore"
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
"github.com/ClusterCockpit/cc-lib/v2/schema" "github.com/ClusterCockpit/cc-lib/v2/schema"
) )
// Scope string constants used in API queries.
// Pre-converted to avoid repeated allocations during query building.
var (
hwthreadString = string(schema.MetricScopeHWThread)
coreString = string(schema.MetricScopeCore)
memoryDomainString = string(schema.MetricScopeMemoryDomain)
socketString = string(schema.MetricScopeSocket)
acceleratorString = string(schema.MetricScopeAccelerator)
)
// buildQueries constructs API queries for job-specific metric data. // buildQueries constructs API queries for job-specific metric data.
// It iterates through metrics, scopes, and job resources to build the complete query set. // It iterates through metrics, scopes, and job resources to build the complete query set.
// //
@@ -89,18 +79,9 @@ func (ccms *CCMetricStore) buildQueries(
} }
// Skip if metric is removed for subcluster // Skip if metric is removed for subcluster
if len(mc.SubClusters) != 0 { if len(mc.SubClusters) != 0 && metricstore.IsMetricRemovedForSubCluster(mc, job.SubCluster) {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == job.SubCluster && scConfig.Remove {
isRemoved = true
break
}
}
if isRemoved {
continue continue
} }
}
// Avoid duplicates... // Avoid duplicates...
handledScopes := make([]schema.MetricScope, 0, 3) handledScopes := make([]schema.MetricScope, 0, 3)
@@ -126,21 +107,27 @@ func (ccms *CCMetricStore) buildQueries(
hwthreads = topology.Node hwthreads = topology.Node
} }
// Note: Expected exceptions will return as empty slices -> Continue scopeResults, ok := metricstore.BuildScopeQueries(
hostQueries, hostScopes := buildScopeQueries(
nativeScope, requestedScope, nativeScope, requestedScope,
remoteName, host.Hostname, remoteName, host.Hostname,
topology, hwthreads, host.Accelerators, topology, hwthreads, host.Accelerators,
resolution,
) )
// Note: Unexpected errors, such as unhandled cases, will return as nils -> Error if !ok {
if hostQueries == nil && hostScopes == nil { return nil, nil, fmt.Errorf("METRICDATA/EXTERNAL-CCMS > unsupported scope transformation: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
return nil, nil, fmt.Errorf("METRICDATA/EXTERNAL-CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
queries = append(queries, hostQueries...) for _, sr := range scopeResults {
assignedScope = append(assignedScope, hostScopes...) queries = append(queries, APIQuery{
Metric: sr.Metric,
Hostname: sr.Hostname,
Aggregate: sr.Aggregate,
Type: sr.Type,
TypeIds: sr.TypeIds,
Resolution: resolution,
})
assignedScope = append(assignedScope, sr.Scope)
}
} }
} }
} }
@@ -179,18 +166,9 @@ func (ccms *CCMetricStore) buildNodeQueries(
} }
// Skip if metric is removed for subcluster // Skip if metric is removed for subcluster
if mc.SubClusters != nil { if mc.SubClusters != nil && metricstore.IsMetricRemovedForSubCluster(mc, subCluster) {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == subCluster && scConfig.Remove {
isRemoved = true
break
}
}
if isRemoved {
continue continue
} }
}
// Avoid duplicates... // Avoid duplicates...
handledScopes := make([]schema.MetricScope, 0, 3) handledScopes := make([]schema.MetricScope, 0, 3)
@@ -231,19 +209,27 @@ func (ccms *CCMetricStore) buildNodeQueries(
continue scopesLoop continue scopesLoop
} }
nodeQueries, nodeScopes := buildScopeQueries( scopeResults, ok := metricstore.BuildScopeQueries(
nativeScope, requestedScope, nativeScope, requestedScope,
remoteName, hostname, remoteName, hostname,
topology, topology.Node, acceleratorIds, topology, topology.Node, acceleratorIds,
resolution,
) )
if len(nodeQueries) == 0 && len(nodeScopes) == 0 { if !ok {
return nil, nil, fmt.Errorf("METRICDATA/EXTERNAL-CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope) return nil, nil, fmt.Errorf("METRICDATA/EXTERNAL-CCMS > unsupported scope transformation: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
queries = append(queries, nodeQueries...) for _, sr := range scopeResults {
assignedScope = append(assignedScope, nodeScopes...) queries = append(queries, APIQuery{
Metric: sr.Metric,
Hostname: sr.Hostname,
Aggregate: sr.Aggregate,
Type: sr.Type,
TypeIds: sr.TypeIds,
Resolution: resolution,
})
assignedScope = append(assignedScope, sr.Scope)
}
} }
} }
} }
@@ -251,277 +237,3 @@ func (ccms *CCMetricStore) buildNodeQueries(
return queries, assignedScope, nil return queries, assignedScope, nil
} }
// buildScopeQueries generates API queries for a given scope transformation.
// It returns a slice of queries and corresponding assigned scopes.
// Some transformations (e.g., HWThread -> Core/Socket) may generate multiple queries.
func buildScopeQueries(
nativeScope, requestedScope schema.MetricScope,
metric, hostname string,
topology *schema.Topology,
hwthreads []int,
accelerators []string,
resolution int,
) ([]APIQuery, []schema.MetricScope) {
scope := nativeScope.Max(requestedScope)
queries := []APIQuery{}
scopes := []schema.MetricScope{}
hwthreadsStr := intToStringSlice(hwthreads)
// Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node)
if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) {
if scope != schema.MetricScopeAccelerator {
// Expected Exception -> Continue -> Return Empty Slices
return queries, scopes
}
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &acceleratorString,
TypeIds: accelerators,
Resolution: resolution,
})
scopes = append(scopes, schema.MetricScopeAccelerator)
return queries, scopes
}
// Accelerator -> Node
if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode {
if len(accelerators) == 0 {
// Expected Exception -> Continue -> Return Empty Slices
return queries, scopes
}
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &acceleratorString,
TypeIds: accelerators,
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// HWThread -> HWThread
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &hwthreadString,
TypeIds: hwthreadsStr,
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// HWThread -> Core
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
for _, core := range cores {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Core[core]),
Resolution: resolution,
})
scopes = append(scopes, scope)
}
return queries, scopes
}
// HWThread -> Socket
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
scopes = append(scopes, scope)
}
return queries, scopes
}
// HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: hwthreadsStr,
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// Core -> Socket
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(hwthreads)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
scopes = append(scopes, scope)
}
return queries, scopes
}
// Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &memoryDomainString,
TypeIds: intToStringSlice(memDomains),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// MemoryDomain -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &memoryDomainString,
TypeIds: intToStringSlice(memDomains),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// MemoryDomain -> Socket
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeSocket {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
socketToDomains, err := topology.GetMemoryDomainsBySocket(memDomains)
if err != nil {
cclog.Errorf("Error mapping memory domains to sockets, return unchanged: %v", err)
// Rare Error Case -> Still Continue -> Return Empty Slices
return queries, scopes
}
// Create a query for each socket
for _, domains := range socketToDomains {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &memoryDomainString,
TypeIds: intToStringSlice(domains),
Resolution: resolution,
})
// Add scope for each query, not just once
scopes = append(scopes, scope)
}
return queries, scopes
}
// Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Resolution: resolution,
})
scopes = append(scopes, scope)
return queries, scopes
}
// Unhandled Case -> Error -> Return nils
return nil, nil
}
// intToStringSlice converts a slice of integers to a slice of strings.
// Used to convert hardware IDs (core IDs, socket IDs, etc.) to the string format required by the API.
func intToStringSlice(is []int) []string {
ss := make([]string, len(is))
for i, x := range is {
ss[i] = strconv.Itoa(x)
}
return ss
}

View File

@@ -63,7 +63,7 @@ import (
"time" "time"
"github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/archive"
"github.com/ClusterCockpit/cc-backend/pkg/metricstore" ms "github.com/ClusterCockpit/cc-backend/pkg/metricstore"
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
"github.com/ClusterCockpit/cc-lib/v2/schema" "github.com/ClusterCockpit/cc-lib/v2/schema"
) )
@@ -275,13 +275,6 @@ func (ccms *CCMetricStore) LoadData(
} }
for i, row := range resBody.Results { for i, row := range resBody.Results {
// Safety check to prevent index out of range errors
if i >= len(req.Queries) || i >= len(assignedScope) {
cclog.Warnf("Index out of range prevented: i=%d, queries=%d, assignedScope=%d",
i, len(req.Queries), len(assignedScope))
continue
}
query := req.Queries[i] query := req.Queries[i]
metric := query.Metric metric := query.Metric
scope := assignedScope[i] scope := assignedScope[i]
@@ -318,20 +311,9 @@ func (ccms *CCMetricStore) LoadData(
continue continue
} }
id := (*string)(nil) id := ms.ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res.Avg, &res.Min, &res.Max) ms.SanitizeStats(&res.Avg, &res.Min, &res.Max)
jobMetric.Series = append(jobMetric.Series, schema.Series{ jobMetric.Series = append(jobMetric.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -393,6 +375,10 @@ func (ccms *CCMetricStore) LoadStats(
stats := make(map[string]map[string]schema.MetricStatistics, len(metrics)) stats := make(map[string]map[string]schema.MetricStatistics, len(metrics))
for i, res := range resBody.Results { for i, res := range resBody.Results {
if i >= len(req.Queries) {
cclog.Warnf("LoadStats: result index %d exceeds queries length %d", i, len(req.Queries))
break
}
if len(res) == 0 { if len(res) == 0 {
// No Data Found For Metric, Logged in FetchData to Warn // No Data Found For Metric, Logged in FetchData to Warn
continue continue
@@ -481,20 +467,9 @@ func (ccms *CCMetricStore) LoadScopedStats(
continue continue
} }
id := (*string)(nil) id := ms.ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res.Avg, &res.Min, &res.Max) ms.SanitizeStats(&res.Avg, &res.Min, &res.Max)
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -582,9 +557,16 @@ func (ccms *CCMetricStore) LoadNodeData(
qdata := res[0] qdata := res[0]
if qdata.Error != nil { if qdata.Error != nil {
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error)) errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
continue
} }
sanitizeStats(&qdata.Avg, &qdata.Min, &qdata.Max) mc := archive.GetMetricConfig(cluster, metric)
if mc == nil {
cclog.Warnf("Metric config not found for %s on cluster %s", metric, cluster)
continue
}
ms.SanitizeStats(&qdata.Avg, &qdata.Min, &qdata.Max)
hostdata, ok := data[query.Hostname] hostdata, ok := data[query.Hostname]
if !ok { if !ok {
@@ -592,7 +574,6 @@ func (ccms *CCMetricStore) LoadNodeData(
data[query.Hostname] = hostdata data[query.Hostname] = hostdata
} }
mc := archive.GetMetricConfig(cluster, metric)
hostdata[metric] = append(hostdata[metric], &schema.JobMetric{ hostdata[metric] = append(hostdata[metric], &schema.JobMetric{
Unit: mc.Unit, Unit: mc.Unit,
Timestep: mc.Timestep, Timestep: mc.Timestep,
@@ -680,13 +661,6 @@ func (ccms *CCMetricStore) LoadNodeListData(
} }
for i, row := range resBody.Results { for i, row := range resBody.Results {
// Safety check to prevent index out of range errors
if i >= len(req.Queries) || i >= len(assignedScope) {
cclog.Warnf("Index out of range prevented: i=%d, queries=%d, assignedScope=%d",
i, len(req.Queries), len(assignedScope))
continue
}
var query APIQuery var query APIQuery
if resBody.Queries != nil { if resBody.Queries != nil {
if i < len(resBody.Queries) { if i < len(resBody.Queries) {
@@ -743,20 +717,9 @@ func (ccms *CCMetricStore) LoadNodeListData(
continue continue
} }
id := (*string)(nil) id := ms.ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res.Avg, &res.Min, &res.Max) ms.SanitizeStats(&res.Avg, &res.Min, &res.Max)
scopeData.Series = append(scopeData.Series, schema.Series{ scopeData.Series = append(scopeData.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -784,8 +747,8 @@ func (ccms *CCMetricStore) LoadNodeListData(
// returns the per-node health check results. // returns the per-node health check results.
func (ccms *CCMetricStore) HealthCheck(cluster string, func (ccms *CCMetricStore) HealthCheck(cluster string,
nodes []string, metrics []string, nodes []string, metrics []string,
) (map[string]metricstore.HealthCheckResult, error) { ) (map[string]ms.HealthCheckResult, error) {
req := metricstore.HealthCheckReq{ req := ms.HealthCheckReq{
Cluster: cluster, Cluster: cluster,
Nodes: nodes, Nodes: nodes,
MetricNames: metrics, MetricNames: metrics,
@@ -818,7 +781,7 @@ func (ccms *CCMetricStore) HealthCheck(cluster string,
return nil, fmt.Errorf("'%s': HTTP Status: %s", endpoint, res.Status) return nil, fmt.Errorf("'%s': HTTP Status: %s", endpoint, res.Status)
} }
var results map[string]metricstore.HealthCheckResult var results map[string]ms.HealthCheckResult
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&results); err != nil { if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&results); err != nil {
cclog.Errorf("Error while decoding health check response: %s", err.Error()) cclog.Errorf("Error while decoding health check response: %s", err.Error())
return nil, err return nil, err
@@ -827,16 +790,6 @@ func (ccms *CCMetricStore) HealthCheck(cluster string,
return results, nil return results, nil
} }
// sanitizeStats replaces NaN values in statistics with 0 to enable JSON marshaling.
// Regular float64 values cannot be JSONed when NaN.
func sanitizeStats(avg, min, max *schema.Float) {
if avg.IsNaN() || min.IsNaN() || max.IsNaN() {
*avg = schema.Float(0)
*min = schema.Float(0)
*max = schema.Float(0)
}
}
// hasNaNStats returns true if any of the statistics contain NaN values. // hasNaNStats returns true if any of the statistics contain NaN values.
func hasNaNStats(avg, min, max schema.Float) bool { func hasNaNStats(avg, min, max schema.Float) bool {
return avg.IsNaN() || min.IsNaN() || max.IsNaN() return avg.IsNaN() || min.IsNaN() || max.IsNaN()

View File

@@ -405,6 +405,17 @@ func buildFilterPresets(query url.Values) map[string]any {
if query.Get("energy") != "" { if query.Get("energy") != "" {
parts := strings.Split(query.Get("energy"), "-") parts := strings.Split(query.Get("energy"), "-")
if len(parts) == 2 { if len(parts) == 2 {
if parts[0] == "lessthan" {
lt, lte := strconv.Atoi(parts[1])
if lte == nil {
filterPresets["energy"] = map[string]int{"from": 1, "to": lt}
}
} else if parts[0] == "morethan" {
mt, mte := strconv.Atoi(parts[1])
if mte == nil {
filterPresets["energy"] = map[string]int{"from": mt, "to": 0}
}
} else {
a, e1 := strconv.Atoi(parts[0]) a, e1 := strconv.Atoi(parts[0])
b, e2 := strconv.Atoi(parts[1]) b, e2 := strconv.Atoi(parts[1])
if e1 == nil && e2 == nil { if e1 == nil && e2 == nil {
@@ -412,11 +423,33 @@ func buildFilterPresets(query url.Values) map[string]any {
} }
} }
} }
}
if len(query["stat"]) != 0 { if len(query["stat"]) != 0 {
statList := make([]map[string]any, 0) statList := make([]map[string]any, 0)
for _, statEntry := range query["stat"] { for _, statEntry := range query["stat"] {
parts := strings.Split(statEntry, "-") parts := strings.Split(statEntry, "-")
if len(parts) == 3 { // Metric Footprint Stat Field, from - to if len(parts) == 3 { // Metric Footprint Stat Field, from - to
if parts[1] == "lessthan" {
lt, lte := strconv.ParseInt(parts[2], 10, 64)
if lte == nil {
statEntry := map[string]any{
"field": parts[0],
"from": 1,
"to": lt,
}
statList = append(statList, statEntry)
}
} else if parts[1] == "morethan" {
mt, mte := strconv.ParseInt(parts[2], 10, 64)
if mte == nil {
statEntry := map[string]any{
"field": parts[0],
"from": mt,
"to": 0,
}
statList = append(statList, statEntry)
}
} else {
a, e1 := strconv.ParseInt(parts[1], 10, 64) a, e1 := strconv.ParseInt(parts[1], 10, 64)
b, e2 := strconv.ParseInt(parts[2], 10, 64) b, e2 := strconv.ParseInt(parts[2], 10, 64)
if e1 == nil && e2 == nil { if e1 == nil && e2 == nil {
@@ -429,6 +462,7 @@ func buildFilterPresets(query url.Values) map[string]any {
} }
} }
} }
}
filterPresets["stats"] = statList filterPresets["stats"] = statList
} }
return filterPresets return filterPresets

View File

@@ -309,7 +309,7 @@ func (t *JobClassTagger) Register() error {
func (t *JobClassTagger) Match(job *schema.Job) { func (t *JobClassTagger) Match(job *schema.Job) {
jobStats, err := t.getStatistics(job) jobStats, err := t.getStatistics(job)
metricsList := t.getMetricConfig(job.Cluster, job.SubCluster) metricsList := t.getMetricConfig(job.Cluster, job.SubCluster)
cclog.Infof("Enter match rule with %d rules for job %d", len(t.rules), job.JobID) cclog.Debugf("Enter match rule with %d rules for job %d", len(t.rules), job.JobID)
if err != nil { if err != nil {
cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err) cclog.Errorf("job classification failed for job %d: %#v", job.JobID, err)
return return
@@ -321,7 +321,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
for tag, ri := range t.rules { for tag, ri := range t.rules {
env := make(map[string]any) env := make(map[string]any)
maps.Copy(env, ri.env) maps.Copy(env, ri.env)
cclog.Infof("Try to match rule %s for job %d", tag, job.JobID) cclog.Debugf("Try to match rule %s for job %d", tag, job.JobID)
// Initialize environment // Initialize environment
env["job"] = map[string]any{ env["job"] = map[string]any{
@@ -369,7 +369,7 @@ func (t *JobClassTagger) Match(job *schema.Job) {
break break
} }
if !ok.(bool) { if !ok.(bool) {
cclog.Infof("requirement for rule %s not met", tag) cclog.Debugf("requirement for rule %s not met", tag)
requirementsMet = false requirementsMet = false
break break
} }
@@ -399,7 +399,6 @@ func (t *JobClassTagger) Match(job *schema.Job) {
continue continue
} }
if match.(bool) { if match.(bool) {
cclog.Info("Rule matches!")
if !t.repo.HasTag(id, t.tagType, tag) { if !t.repo.HasTag(id, t.tagType, tag) {
if _, err := t.repo.AddTagOrCreateDirect(id, t.tagType, tag); err != nil { if _, err := t.repo.AddTagOrCreateDirect(id, t.tagType, tag); err != nil {
cclog.Errorf("failed to add tag '%s' to job %d: %v", tag, id, err) cclog.Errorf("failed to add tag '%s' to job %d: %v", tag, id, err)
@@ -414,8 +413,6 @@ func (t *JobClassTagger) Match(job *schema.Job) {
continue continue
} }
messages = append(messages, msg.String()) messages = append(messages, msg.String())
} else {
cclog.Info("Rule does not match!")
} }
} }

View File

@@ -178,24 +178,24 @@ func (t *AppTagger) Match(job *schema.Job) {
metadata, err := r.FetchMetadata(job) metadata, err := r.FetchMetadata(job)
if err != nil { if err != nil {
cclog.Infof("AppTagger: cannot fetch metadata for job %d on %s: %v", job.JobID, job.Cluster, err) cclog.Debugf("AppTagger: cannot fetch metadata for job %d on %s: %v", job.JobID, job.Cluster, err)
return return
} }
if metadata == nil { if metadata == nil {
cclog.Infof("AppTagger: metadata is nil for job %d on %s", job.JobID, job.Cluster) cclog.Debugf("AppTagger: metadata is nil for job %d on %s", job.JobID, job.Cluster)
return return
} }
jobscript, ok := metadata["jobScript"] jobscript, ok := metadata["jobScript"]
if !ok { if !ok {
cclog.Infof("AppTagger: no 'jobScript' key in metadata for job %d on %s (keys: %v)", cclog.Debugf("AppTagger: no 'jobScript' key in metadata for job %d on %s (keys: %v)",
job.JobID, job.Cluster, metadataKeys(metadata)) job.JobID, job.Cluster, metadataKeys(metadata))
return return
} }
if len(jobscript) == 0 { if len(jobscript) == 0 {
cclog.Infof("AppTagger: empty jobScript for job %d on %s", job.JobID, job.Cluster) cclog.Debugf("AppTagger: empty jobScript for job %d on %s", job.JobID, job.Cluster)
return return
} }
@@ -210,7 +210,7 @@ func (t *AppTagger) Match(job *schema.Job) {
if r.HasTag(id, t.tagType, a.tag) { if r.HasTag(id, t.tagType, a.tag) {
cclog.Debugf("AppTagger: job %d already has tag %s:%s, skipping", id, t.tagType, a.tag) cclog.Debugf("AppTagger: job %d already has tag %s:%s, skipping", id, t.tagType, a.tag)
} else { } else {
cclog.Infof("AppTagger: pattern '%s' matched for app '%s' on job %d", re.String(), a.tag, id) cclog.Debugf("AppTagger: pattern '%s' matched for app '%s' on job %d", re.String(), a.tag, id)
if _, err := r.AddTagOrCreateDirect(id, t.tagType, a.tag); err != nil { if _, err := r.AddTagOrCreateDirect(id, t.tagType, a.tag); err != nil {
cclog.Errorf("AppTagger: failed to add tag '%s' to job %d: %v", a.tag, id, err) cclog.Errorf("AppTagger: failed to add tag '%s' to job %d: %v", a.tag, id, err)
} }

View File

@@ -126,6 +126,9 @@ func initClusterConfig() error {
if newMetric.Energy != "" { if newMetric.Energy != "" {
sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name) sc.EnergyFootprint = append(sc.EnergyFootprint, newMetric.Name)
} }
// Init Topology Lookup Maps Once Per Subcluster
sc.Topology.InitTopologyMaps()
} }
item := metricLookup[mc.Name] item := metricLookup[mc.Name]

View File

@@ -166,10 +166,10 @@ func (m *MemoryStore) HealthCheck(cluster string,
healthyCount := len(expectedMetrics) - degradedCount - missingCount healthyCount := len(expectedMetrics) - degradedCount - missingCount
if degradedCount > 0 { if degradedCount > 0 {
cclog.ComponentInfo("metricstore", "HealthCheck: node ", hostname, "degraded metrics:", degradedList) cclog.ComponentDebug("metricstore", "HealthCheck: node ", hostname, "degraded metrics:", degradedList)
} }
if missingCount > 0 { if missingCount > 0 {
cclog.ComponentInfo("metricstore", "HealthCheck: node ", hostname, "missing metrics:", missingList) cclog.ComponentDebug("metricstore", "HealthCheck: node ", hostname, "missing metrics:", missingList)
} }
var state schema.MonitoringState var state schema.MonitoringState

View File

@@ -235,7 +235,7 @@ func InitMetrics(metrics map[string]MetricConfig) {
// This function is safe for concurrent use after initialization. // This function is safe for concurrent use after initialization.
func GetMemoryStore() *MemoryStore { func GetMemoryStore() *MemoryStore {
if msInstance == nil { if msInstance == nil {
cclog.Fatalf("[METRICSTORE]> MemoryStore not initialized!") cclog.Warnf("[METRICSTORE]> MemoryStore not initialized!")
} }
return msInstance return msInstance

View File

@@ -29,7 +29,6 @@ package metricstore
import ( import (
"context" "context"
"fmt" "fmt"
"strconv"
"strings" "strings"
"time" "time"
@@ -130,13 +129,6 @@ func (ccms *InternalMetricStore) LoadData(
} }
for i, row := range resBody.Results { for i, row := range resBody.Results {
// Safety check to prevent index out of range errors
if i >= len(req.Queries) || i >= len(assignedScope) {
cclog.Warnf("Index out of range prevented: i=%d, queries=%d, assignedScope=%d",
i, len(req.Queries), len(assignedScope))
continue
}
query := req.Queries[i] query := req.Queries[i]
metric := query.Metric metric := query.Metric
scope := assignedScope[i] scope := assignedScope[i]
@@ -173,20 +165,9 @@ func (ccms *InternalMetricStore) LoadData(
continue continue
} }
id := (*string)(nil) id := ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res) SanitizeStats(&res.Avg, &res.Min, &res.Max)
jobMetric.Series = append(jobMetric.Series, schema.Series{ jobMetric.Series = append(jobMetric.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -216,18 +197,6 @@ func (ccms *InternalMetricStore) LoadData(
return jobData, nil return jobData, nil
} }
// Pre-converted scope strings avoid repeated string(MetricScope) allocations during
// query construction. These are used in APIQuery.Type field throughout buildQueries
// and buildNodeQueries functions. Converting once at package initialization improves
// performance for high-volume query building.
var (
hwthreadString = string(schema.MetricScopeHWThread)
coreString = string(schema.MetricScopeCore)
memoryDomainString = string(schema.MetricScopeMemoryDomain)
socketString = string(schema.MetricScopeSocket)
acceleratorString = string(schema.MetricScopeAccelerator)
)
// buildQueries constructs APIQuery structures with automatic scope transformation for a job. // buildQueries constructs APIQuery structures with automatic scope transformation for a job.
// //
// This function implements the core scope transformation logic, handling all combinations of // This function implements the core scope transformation logic, handling all combinations of
@@ -264,7 +233,7 @@ func buildQueries(
} }
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
assignedScope := []schema.MetricScope{} assignedScope := make([]schema.MetricScope, 0, len(metrics)*len(scopes)*len(job.Resources))
subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster) subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
if scerr != nil { if scerr != nil {
@@ -280,22 +249,14 @@ func buildQueries(
} }
// Skip if metric is removed for subcluster // Skip if metric is removed for subcluster
if len(mc.SubClusters) != 0 { if len(mc.SubClusters) != 0 && IsMetricRemovedForSubCluster(mc, job.SubCluster) {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == job.SubCluster && scConfig.Remove {
isRemoved = true
break
}
}
if isRemoved {
continue continue
} }
}
// Avoid duplicates using map for O(1) lookup // Avoid duplicates...
handledScopes := make(map[schema.MetricScope]bool, 3) handledScopes := make([]schema.MetricScope, 0, 3)
scopesLoop:
for _, requestedScope := range scopes { for _, requestedScope := range scopes {
nativeScope := mc.Scope nativeScope := mc.Scope
if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 { if nativeScope == schema.MetricScopeAccelerator && job.NumAcc == 0 {
@@ -303,10 +264,12 @@ func buildQueries(
} }
scope := nativeScope.Max(requestedScope) scope := nativeScope.Max(requestedScope)
if handledScopes[scope] { for _, s := range handledScopes {
continue if scope == s {
continue scopesLoop
} }
handledScopes[scope] = true }
handledScopes = append(handledScopes, scope)
for _, host := range job.Resources { for _, host := range job.Resources {
hwthreads := host.HWThreads hwthreads := host.HWThreads
@@ -314,224 +277,27 @@ func buildQueries(
hwthreads = topology.Node hwthreads = topology.Node
} }
// Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) scopeResults, ok := BuildScopeQueries(
if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { nativeScope, requestedScope,
if scope != schema.MetricScopeAccelerator { metric, host.Hostname,
// Skip all other catched cases &topology, hwthreads, host.Accelerators,
continue )
if !ok {
return nil, nil, fmt.Errorf("METRICDATA/INTERNAL-CCMS > unsupported scope transformation: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
for _, sr := range scopeResults {
queries = append(queries, APIQuery{ queries = append(queries, APIQuery{
Metric: metric, Metric: sr.Metric,
Hostname: host.Hostname, Hostname: sr.Hostname,
Aggregate: false, Aggregate: sr.Aggregate,
Type: &acceleratorString, Type: sr.Type,
TypeIds: host.Accelerators, TypeIds: sr.TypeIds,
Resolution: resolution, Resolution: resolution,
}) })
assignedScope = append(assignedScope, schema.MetricScopeAccelerator) assignedScope = append(assignedScope, sr.Scope)
continue
} }
// Accelerator -> Node
if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode {
if len(host.Accelerators) == 0 {
continue
}
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &acceleratorString,
TypeIds: host.Accelerators,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> HWThread
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: false,
Type: &hwthreadString,
TypeIds: intToStringSlice(hwthreads),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> Core
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
for _, core := range cores {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Core[core]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Socket
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(hwthreads),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: false,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Socket
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(hwthreads)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: false,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDomain -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: false,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Aggregate: true,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: host.Hostname,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
return nil, nil, fmt.Errorf("METRICDATA/INTERNAL-CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
} }
} }
@@ -581,6 +347,10 @@ func (ccms *InternalMetricStore) LoadStats(
stats := make(map[string]map[string]schema.MetricStatistics, len(metrics)) stats := make(map[string]map[string]schema.MetricStatistics, len(metrics))
for i, res := range resBody.Results { for i, res := range resBody.Results {
if i >= len(req.Queries) {
cclog.Warnf("LoadStats: result index %d exceeds queries length %d", i, len(req.Queries))
break
}
if len(res) == 0 { if len(res) == 0 {
// No Data Found For Metric, Logged in FetchData to Warn // No Data Found For Metric, Logged in FetchData to Warn
continue continue
@@ -682,20 +452,9 @@ func (ccms *InternalMetricStore) LoadScopedStats(
continue continue
} }
id := (*string)(nil) id := ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res) SanitizeStats(&res.Avg, &res.Min, &res.Max)
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{ scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -794,9 +553,16 @@ func (ccms *InternalMetricStore) LoadNodeData(
qdata := res[0] qdata := res[0]
if qdata.Error != nil { if qdata.Error != nil {
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error)) errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
continue
} }
sanitizeStats(&qdata) mc := archive.GetMetricConfig(cluster, metric)
if mc == nil {
cclog.Warnf("Metric config not found for %s on cluster %s", metric, cluster)
continue
}
SanitizeStats(&qdata.Avg, &qdata.Min, &qdata.Max)
hostdata, ok := data[query.Hostname] hostdata, ok := data[query.Hostname]
if !ok { if !ok {
@@ -804,7 +570,6 @@ func (ccms *InternalMetricStore) LoadNodeData(
data[query.Hostname] = hostdata data[query.Hostname] = hostdata
} }
mc := archive.GetMetricConfig(cluster, metric)
hostdata[metric] = append(hostdata[metric], &schema.JobMetric{ hostdata[metric] = append(hostdata[metric], &schema.JobMetric{
Unit: mc.Unit, Unit: mc.Unit,
Timestep: mc.Timestep, Timestep: mc.Timestep,
@@ -901,13 +666,6 @@ func (ccms *InternalMetricStore) LoadNodeListData(
} }
for i, row := range resBody.Results { for i, row := range resBody.Results {
// Safety check to prevent index out of range errors
if i >= len(req.Queries) || i >= len(assignedScope) {
cclog.Warnf("Index out of range prevented: i=%d, queries=%d, assignedScope=%d",
i, len(req.Queries), len(assignedScope))
continue
}
var query APIQuery var query APIQuery
if resBody.Queries != nil { if resBody.Queries != nil {
if i < len(resBody.Queries) { if i < len(resBody.Queries) {
@@ -964,20 +722,9 @@ func (ccms *InternalMetricStore) LoadNodeListData(
continue continue
} }
id := (*string)(nil) id := ExtractTypeID(query.Type, query.TypeIds, ndx, query.Metric, query.Hostname)
if query.Type != nil {
// Check if ndx is within the bounds of TypeIds slice
if ndx < len(query.TypeIds) {
id = new(string)
*id = query.TypeIds[ndx]
} else {
// Log the error but continue processing
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(query.TypeIds), query.Metric, query.Hostname)
}
}
sanitizeStats(&res) SanitizeStats(&res.Avg, &res.Min, &res.Max)
scopeData.Series = append(scopeData.Series, schema.Series{ scopeData.Series = append(scopeData.Series, schema.Series{
Hostname: query.Hostname, Hostname: query.Hostname,
@@ -1026,7 +773,7 @@ func buildNodeQueries(
resolution int64, resolution int64,
) ([]APIQuery, []schema.MetricScope, error) { ) ([]APIQuery, []schema.MetricScope, error) {
queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes)) queries := make([]APIQuery, 0, len(metrics)*len(scopes)*len(nodes))
assignedScope := []schema.MetricScope{} assignedScope := make([]schema.MetricScope, 0, len(metrics)*len(scopes)*len(nodes))
// Get Topol before loop if subCluster given // Get Topol before loop if subCluster given
var subClusterTopol *schema.SubCluster var subClusterTopol *schema.SubCluster
@@ -1047,30 +794,24 @@ func buildNodeQueries(
} }
// Skip if metric is removed for subcluster // Skip if metric is removed for subcluster
if mc.SubClusters != nil { if mc.SubClusters != nil && IsMetricRemovedForSubCluster(mc, subCluster) {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == subCluster && scConfig.Remove {
isRemoved = true
break
}
}
if isRemoved {
continue continue
} }
}
// Avoid duplicates using map for O(1) lookup // Avoid duplicates...
handledScopes := make(map[schema.MetricScope]bool, 3) handledScopes := make([]schema.MetricScope, 0, 3)
nodeScopesLoop:
for _, requestedScope := range scopes { for _, requestedScope := range scopes {
nativeScope := mc.Scope nativeScope := mc.Scope
scope := nativeScope.Max(requestedScope) scope := nativeScope.Max(requestedScope)
if handledScopes[scope] { for _, s := range handledScopes {
continue if scope == s {
continue nodeScopesLoop
} }
handledScopes[scope] = true }
handledScopes = append(handledScopes, scope)
for _, hostname := range nodes { for _, hostname := range nodes {
@@ -1086,8 +827,7 @@ func buildNodeQueries(
} }
} }
// Always full node hwthread id list, no partial queries expected -> Use "topology.Node" directly where applicable // Always full node hwthread id list, no partial queries expected
// Always full accelerator id list, no partial queries expected -> Use "acceleratorIds" directly where applicable
topology := subClusterTopol.Topology topology := subClusterTopol.Topology
acceleratorIds := topology.GetAcceleratorIDs() acceleratorIds := topology.GetAcceleratorIDs()
@@ -1096,262 +836,30 @@ func buildNodeQueries(
continue continue
} }
// Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) scopeResults, ok := BuildScopeQueries(
if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { nativeScope, requestedScope,
if scope != schema.MetricScopeAccelerator { metric, hostname,
// Skip all other catched cases &topology, topology.Node, acceleratorIds,
continue )
if !ok {
return nil, nil, fmt.Errorf("METRICDATA/INTERNAL-CCMS > unsupported scope transformation: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
for _, sr := range scopeResults {
queries = append(queries, APIQuery{ queries = append(queries, APIQuery{
Metric: metric, Metric: sr.Metric,
Hostname: hostname, Hostname: sr.Hostname,
Aggregate: false, Aggregate: sr.Aggregate,
Type: &acceleratorString, Type: sr.Type,
TypeIds: acceleratorIds, TypeIds: sr.TypeIds,
Resolution: resolution, Resolution: resolution,
}) })
assignedScope = append(assignedScope, schema.MetricScopeAccelerator) assignedScope = append(assignedScope, sr.Scope)
continue
} }
// Accelerator -> Node
if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode {
if len(acceleratorIds) == 0 {
continue
}
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &acceleratorString,
TypeIds: acceleratorIds,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> HWThread
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Node),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// HWThread -> Core
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
for _, core := range cores {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Core[core]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Socket
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &hwthreadString,
TypeIds: intToStringSlice(topology.Node),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Core -> Socket
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(topology.Node)
for _, socket := range sockets {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(topology.Socket[socket]),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
}
continue
}
// Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &coreString,
TypeIds: intToStringSlice(cores),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// MemoryDomain -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
sockets, _ := topology.GetMemoryDomainsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &memoryDomainString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(topology.Node)
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &socketString,
TypeIds: intToStringSlice(sockets),
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
// Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
queries = append(queries, APIQuery{
Metric: metric,
Hostname: hostname,
Resolution: resolution,
})
assignedScope = append(assignedScope, scope)
continue
}
return nil, nil, fmt.Errorf("METRICDATA/INTERNAL-CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
} }
} }
} }
return queries, assignedScope, nil return queries, assignedScope, nil
} }
// sanitizeStats converts NaN statistics to zero for JSON compatibility.
//
// schema.Float with NaN values cannot be properly JSON-encoded, so we convert
// NaN to 0. This loses the distinction between "no data" and "zero value",
// but maintains API compatibility.
func sanitizeStats(data *APIMetricData) {
if data.Avg.IsNaN() {
data.Avg = schema.Float(0)
}
if data.Min.IsNaN() {
data.Min = schema.Float(0)
}
if data.Max.IsNaN() {
data.Max = schema.Float(0)
}
}
// intToStringSlice converts a slice of integers to a slice of strings.
// Used to convert hardware thread/core/socket IDs from topology (int) to APIQuery TypeIds (string).
//
// Optimized to reuse a byte buffer for string conversion, reducing allocations.
func intToStringSlice(is []int) []string {
if len(is) == 0 {
return nil
}
ss := make([]string, len(is))
buf := make([]byte, 0, 16) // Reusable buffer for integer conversion
for i, x := range is {
buf = strconv.AppendInt(buf[:0], int64(x), 10)
ss[i] = string(buf)
}
return ss
}

View File

@@ -0,0 +1,341 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// This file contains shared scope transformation logic used by both the internal
// metric store (pkg/metricstore) and the external cc-metric-store client
// (internal/metricstoreclient). It extracts the common algorithm for mapping
// between native metric scopes and requested scopes based on cluster topology.
package metricstore
import (
"strconv"
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
"github.com/ClusterCockpit/cc-lib/v2/schema"
)
// Pre-converted scope strings avoid repeated string(MetricScope) allocations
// during query construction. Used in ScopeQueryResult.Type field.
var (
HWThreadString = string(schema.MetricScopeHWThread)
CoreString = string(schema.MetricScopeCore)
MemoryDomainString = string(schema.MetricScopeMemoryDomain)
SocketString = string(schema.MetricScopeSocket)
AcceleratorString = string(schema.MetricScopeAccelerator)
)
// ScopeQueryResult is a package-independent intermediate type returned by
// BuildScopeQueries. Each consumer converts it to their own APIQuery type
// (adding Resolution and any other package-specific fields).
type ScopeQueryResult struct {
Type *string
Metric string
Hostname string
TypeIds []string
Scope schema.MetricScope
Aggregate bool
}
// BuildScopeQueries generates scope query results for a given scope transformation.
// It returns a slice of results and a boolean indicating success.
// An empty slice means an expected exception (skip this combination).
// ok=false means an unhandled case (caller should return an error).
func BuildScopeQueries(
nativeScope, requestedScope schema.MetricScope,
metric, hostname string,
topology *schema.Topology,
hwthreads []int,
accelerators []string,
) ([]ScopeQueryResult, bool) {
scope := nativeScope.Max(requestedScope)
results := []ScopeQueryResult{}
hwthreadsStr := IntToStringSlice(hwthreads)
// Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node)
if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) {
if scope != schema.MetricScopeAccelerator {
// Expected Exception -> Return Empty Slice
return results, true
}
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &AcceleratorString,
TypeIds: accelerators,
Scope: schema.MetricScopeAccelerator,
})
return results, true
}
// Accelerator -> Node
if nativeScope == schema.MetricScopeAccelerator && scope == schema.MetricScopeNode {
if len(accelerators) == 0 {
// Expected Exception -> Return Empty Slice
return results, true
}
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &AcceleratorString,
TypeIds: accelerators,
Scope: scope,
})
return results, true
}
// HWThread -> HWThread
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeHWThread {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &HWThreadString,
TypeIds: hwthreadsStr,
Scope: scope,
})
return results, true
}
// HWThread -> Core
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
for _, core := range cores {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &HWThreadString,
TypeIds: IntToStringSlice(topology.Core[core]),
Scope: scope,
})
}
return results, true
}
// HWThread -> Socket
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
for _, socket := range sockets {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &HWThreadString,
TypeIds: IntToStringSlice(topology.Socket[socket]),
Scope: scope,
})
}
return results, true
}
// HWThread -> Node
if nativeScope == schema.MetricScopeHWThread && scope == schema.MetricScopeNode {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &HWThreadString,
TypeIds: hwthreadsStr,
Scope: scope,
})
return results, true
}
// Core -> Core
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeCore {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &CoreString,
TypeIds: IntToStringSlice(cores),
Scope: scope,
})
return results, true
}
// Core -> Socket
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromCores(hwthreads)
for _, socket := range sockets {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &CoreString,
TypeIds: IntToStringSlice(topology.Socket[socket]),
Scope: scope,
})
}
return results, true
}
// Core -> Node
if nativeScope == schema.MetricScopeCore && scope == schema.MetricScopeNode {
cores, _ := topology.GetCoresFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &CoreString,
TypeIds: IntToStringSlice(cores),
Scope: scope,
})
return results, true
}
// MemoryDomain -> MemoryDomain
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeMemoryDomain {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &MemoryDomainString,
TypeIds: IntToStringSlice(memDomains),
Scope: scope,
})
return results, true
}
// MemoryDomain -> Socket
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeSocket {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
socketToDomains, err := topology.GetMemoryDomainsBySocket(memDomains)
if err != nil {
cclog.Errorf("Error mapping memory domains to sockets, return unchanged: %v", err)
// Rare Error Case -> Still Continue -> Return Empty Slice
return results, true
}
// Create a query for each socket
for _, domains := range socketToDomains {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &MemoryDomainString,
TypeIds: IntToStringSlice(domains),
Scope: scope,
})
}
return results, true
}
// MemoryDomain -> Node
if nativeScope == schema.MetricScopeMemoryDomain && scope == schema.MetricScopeNode {
memDomains, _ := topology.GetMemoryDomainsFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &MemoryDomainString,
TypeIds: IntToStringSlice(memDomains),
Scope: scope,
})
return results, true
}
// Socket -> Socket
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeSocket {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: false,
Type: &SocketString,
TypeIds: IntToStringSlice(sockets),
Scope: scope,
})
return results, true
}
// Socket -> Node
if nativeScope == schema.MetricScopeSocket && scope == schema.MetricScopeNode {
sockets, _ := topology.GetSocketsFromHWThreads(hwthreads)
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Aggregate: true,
Type: &SocketString,
TypeIds: IntToStringSlice(sockets),
Scope: scope,
})
return results, true
}
// Node -> Node
if nativeScope == schema.MetricScopeNode && scope == schema.MetricScopeNode {
results = append(results, ScopeQueryResult{
Metric: metric,
Hostname: hostname,
Scope: scope,
})
return results, true
}
// Unhandled Case
return nil, false
}
// IntToStringSlice converts a slice of integers to a slice of strings.
// Used to convert hardware thread/core/socket IDs from topology (int) to query TypeIds (string).
// Optimized to reuse a byte buffer for string conversion, reducing allocations.
func IntToStringSlice(is []int) []string {
if len(is) == 0 {
return nil
}
ss := make([]string, len(is))
buf := make([]byte, 0, 16) // Reusable buffer for integer conversion
for i, x := range is {
buf = strconv.AppendInt(buf[:0], int64(x), 10)
ss[i] = string(buf)
}
return ss
}
// ExtractTypeID returns the type ID at the given index from a query's TypeIds slice.
// Returns nil if queryType is nil (no type filtering). Logs a warning and returns nil
// if the index is out of range.
func ExtractTypeID(queryType *string, typeIds []string, ndx int, metric, hostname string) *string {
if queryType == nil {
return nil
}
if ndx < len(typeIds) {
id := typeIds[ndx]
return &id
}
cclog.Warnf("TypeIds index out of range: %d with length %d for metric %s on host %s",
ndx, len(typeIds), metric, hostname)
return nil
}
// IsMetricRemovedForSubCluster checks whether a metric is marked as removed
// for the given subcluster in its per-subcluster configuration.
func IsMetricRemovedForSubCluster(mc *schema.MetricConfig, subCluster string) bool {
for _, scConfig := range mc.SubClusters {
if scConfig.Name == subCluster && scConfig.Remove {
return true
}
}
return false
}
// SanitizeStats replaces NaN values in statistics with 0 to enable JSON marshaling.
// If ANY of avg/min/max is NaN, ALL three are zeroed for consistency.
func SanitizeStats(avg, min, max *schema.Float) {
if avg.IsNaN() || min.IsNaN() || max.IsNaN() {
*avg = schema.Float(0)
*min = schema.Float(0)
*max = schema.Float(0)
}
}

View File

@@ -0,0 +1,273 @@
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
// All rights reserved. This file is part of cc-backend.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package metricstore
import (
"testing"
"github.com/ClusterCockpit/cc-lib/v2/schema"
)
// makeTopology creates a simple 2-socket, 4-core, 8-hwthread topology for testing.
// Socket 0: cores 0,1 with hwthreads 0,1,2,3
// Socket 1: cores 2,3 with hwthreads 4,5,6,7
// MemoryDomain 0: hwthreads 0,1,2,3 (socket 0)
// MemoryDomain 1: hwthreads 4,5,6,7 (socket 1)
func makeTopology() schema.Topology {
topo := schema.Topology{
Node: []int{0, 1, 2, 3, 4, 5, 6, 7},
Socket: [][]int{{0, 1, 2, 3}, {4, 5, 6, 7}},
MemoryDomain: [][]int{{0, 1, 2, 3}, {4, 5, 6, 7}},
Core: [][]int{{0, 1}, {2, 3}, {4, 5}, {6, 7}},
Accelerators: []*schema.Accelerator{
{ID: "gpu0"},
{ID: "gpu1"},
},
}
return topo
}
func TestBuildScopeQueries(t *testing.T) {
topo := makeTopology()
topo.InitTopologyMaps()
accIds := topo.GetAcceleratorIDs()
tests := []struct {
name string
nativeScope schema.MetricScope
requestedScope schema.MetricScope
expectOk bool
expectLen int // expected number of results
expectAgg bool
expectScope schema.MetricScope
}{
// Same-scope cases
{
name: "HWThread->HWThread", nativeScope: schema.MetricScopeHWThread,
requestedScope: schema.MetricScopeHWThread, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeHWThread,
},
{
name: "Core->Core", nativeScope: schema.MetricScopeCore,
requestedScope: schema.MetricScopeCore, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeCore,
},
{
name: "Socket->Socket", nativeScope: schema.MetricScopeSocket,
requestedScope: schema.MetricScopeSocket, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeSocket,
},
{
name: "MemoryDomain->MemoryDomain", nativeScope: schema.MetricScopeMemoryDomain,
requestedScope: schema.MetricScopeMemoryDomain, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeMemoryDomain,
},
{
name: "Node->Node", nativeScope: schema.MetricScopeNode,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeNode,
},
{
name: "Accelerator->Accelerator", nativeScope: schema.MetricScopeAccelerator,
requestedScope: schema.MetricScopeAccelerator, expectOk: true, expectLen: 1,
expectAgg: false, expectScope: schema.MetricScopeAccelerator,
},
// Aggregation cases
{
name: "HWThread->Core", nativeScope: schema.MetricScopeHWThread,
requestedScope: schema.MetricScopeCore, expectOk: true, expectLen: 4, // 4 cores
expectAgg: true, expectScope: schema.MetricScopeCore,
},
{
name: "HWThread->Socket", nativeScope: schema.MetricScopeHWThread,
requestedScope: schema.MetricScopeSocket, expectOk: true, expectLen: 2, // 2 sockets
expectAgg: true, expectScope: schema.MetricScopeSocket,
},
{
name: "HWThread->Node", nativeScope: schema.MetricScopeHWThread,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: true, expectScope: schema.MetricScopeNode,
},
{
name: "Core->Socket", nativeScope: schema.MetricScopeCore,
requestedScope: schema.MetricScopeSocket, expectOk: true, expectLen: 2, // 2 sockets
expectAgg: true, expectScope: schema.MetricScopeSocket,
},
{
name: "Core->Node", nativeScope: schema.MetricScopeCore,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: true, expectScope: schema.MetricScopeNode,
},
{
name: "Socket->Node", nativeScope: schema.MetricScopeSocket,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: true, expectScope: schema.MetricScopeNode,
},
{
name: "MemoryDomain->Node", nativeScope: schema.MetricScopeMemoryDomain,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: true, expectScope: schema.MetricScopeNode,
},
{
name: "MemoryDomain->Socket", nativeScope: schema.MetricScopeMemoryDomain,
requestedScope: schema.MetricScopeSocket, expectOk: true, expectLen: 2, // 2 sockets
expectAgg: true, expectScope: schema.MetricScopeSocket,
},
{
name: "Accelerator->Node", nativeScope: schema.MetricScopeAccelerator,
requestedScope: schema.MetricScopeNode, expectOk: true, expectLen: 1,
expectAgg: true, expectScope: schema.MetricScopeNode,
},
// Expected exception: Accelerator scope requested but non-accelerator scope in between
{
name: "Accelerator->Core (exception)", nativeScope: schema.MetricScopeAccelerator,
requestedScope: schema.MetricScopeCore, expectOk: true, expectLen: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
results, ok := BuildScopeQueries(
tt.nativeScope, tt.requestedScope,
"test_metric", "node001",
&topo, topo.Node, accIds,
)
if ok != tt.expectOk {
t.Fatalf("expected ok=%v, got ok=%v", tt.expectOk, ok)
}
if len(results) != tt.expectLen {
t.Fatalf("expected %d results, got %d", tt.expectLen, len(results))
}
if tt.expectLen > 0 {
for _, r := range results {
if r.Scope != tt.expectScope {
t.Errorf("expected scope %s, got %s", tt.expectScope, r.Scope)
}
if r.Aggregate != tt.expectAgg {
t.Errorf("expected aggregate=%v, got %v", tt.expectAgg, r.Aggregate)
}
if r.Metric != "test_metric" {
t.Errorf("expected metric 'test_metric', got '%s'", r.Metric)
}
if r.Hostname != "node001" {
t.Errorf("expected hostname 'node001', got '%s'", r.Hostname)
}
}
}
})
}
}
func TestBuildScopeQueries_UnhandledCase(t *testing.T) {
topo := makeTopology()
topo.InitTopologyMaps()
// Node native with HWThread requested => scope.Max = Node, but let's try an invalid combination
// Actually all valid combinations are handled. An unhandled case would be something like
// a scope that doesn't exist in the if-chain. Since all real scopes are covered,
// we test with a synthetic unhandled combination by checking the bool return.
// The function should return ok=false for truly unhandled cases.
// For now, verify all known combinations return ok=true
scopes := []schema.MetricScope{
schema.MetricScopeHWThread, schema.MetricScopeCore,
schema.MetricScopeSocket, schema.MetricScopeNode,
}
for _, native := range scopes {
for _, requested := range scopes {
results, ok := BuildScopeQueries(
native, requested,
"m", "h", &topo, topo.Node, nil,
)
if !ok {
t.Errorf("unexpected unhandled case: native=%s, requested=%s", native, requested)
}
if results == nil {
t.Errorf("results should not be nil for native=%s, requested=%s", native, requested)
}
}
}
}
func TestIntToStringSlice(t *testing.T) {
tests := []struct {
input []int
expected []string
}{
{nil, nil},
{[]int{}, nil},
{[]int{0}, []string{"0"}},
{[]int{1, 2, 3}, []string{"1", "2", "3"}},
{[]int{10, 100, 1000}, []string{"10", "100", "1000"}},
}
for _, tt := range tests {
result := IntToStringSlice(tt.input)
if len(result) != len(tt.expected) {
t.Errorf("IntToStringSlice(%v): expected len %d, got %d", tt.input, len(tt.expected), len(result))
continue
}
for i := range result {
if result[i] != tt.expected[i] {
t.Errorf("IntToStringSlice(%v)[%d]: expected %s, got %s", tt.input, i, tt.expected[i], result[i])
}
}
}
}
func TestSanitizeStats(t *testing.T) {
// Test: all valid - should remain unchanged
avg, min, max := schema.Float(1.0), schema.Float(0.5), schema.Float(2.0)
SanitizeStats(&avg, &min, &max)
if avg != 1.0 || min != 0.5 || max != 2.0 {
t.Errorf("SanitizeStats should not change valid values")
}
// Test: one NaN - all should be zeroed
avg, min, max = schema.Float(1.0), schema.Float(0.5), schema.NaN
SanitizeStats(&avg, &min, &max)
if avg != 0 || min != 0 || max != 0 {
t.Errorf("SanitizeStats should zero all when any is NaN, got avg=%v min=%v max=%v", avg, min, max)
}
// Test: all NaN
avg, min, max = schema.NaN, schema.NaN, schema.NaN
SanitizeStats(&avg, &min, &max)
if avg != 0 || min != 0 || max != 0 {
t.Errorf("SanitizeStats should zero all NaN values")
}
}
func TestNodeToNodeQuery(t *testing.T) {
topo := makeTopology()
topo.InitTopologyMaps()
results, ok := BuildScopeQueries(
schema.MetricScopeNode, schema.MetricScopeNode,
"cpu_load", "node001",
&topo, topo.Node, nil,
)
if !ok {
t.Fatal("expected ok=true for Node->Node")
}
if len(results) != 1 {
t.Fatalf("expected 1 result, got %d", len(results))
}
r := results[0]
if r.Type != nil {
t.Error("Node->Node should have nil Type")
}
if r.TypeIds != nil {
t.Error("Node->Node should have nil TypeIds")
}
if r.Aggregate {
t.Error("Node->Node should not aggregate")
}
}

View File

@@ -206,7 +206,7 @@
items.push({ duration: { to: filters.duration.lessThan, from: 0 } }); items.push({ duration: { to: filters.duration.lessThan, from: 0 } });
if (filters.duration.moreThan) if (filters.duration.moreThan)
items.push({ duration: { to: 0, from: filters.duration.moreThan } }); items.push({ duration: { to: 0, from: filters.duration.moreThan } });
if (filters.energy.from || filters.energy.to) if (filters.energy.from != null || filters.energy.to != null)
items.push({ items.push({
energy: { from: filters.energy.from, to: filters.energy.to }, energy: { from: filters.energy.from, to: filters.energy.to },
}); });
@@ -301,11 +301,20 @@
if (filters.node) opts.push(`node=${filters.node}`); if (filters.node) opts.push(`node=${filters.node}`);
if (filters.node && filters.nodeMatch != "eq") // "eq" is default-case if (filters.node && filters.nodeMatch != "eq") // "eq" is default-case
opts.push(`nodeMatch=${filters.nodeMatch}`); opts.push(`nodeMatch=${filters.nodeMatch}`);
if (filters.energy.from && filters.energy.to) if (filters.energy.from > 1 && filters.energy.to > 0)
opts.push(`energy=${filters.energy.from}-${filters.energy.to}`); opts.push(`energy=${filters.energy.from}-${filters.energy.to}`);
if (filters.stats.length != 0) else if (filters.energy.from > 1 && filters.energy.to == 0)
opts.push(`energy=morethan-${filters.energy.from}`);
else if (filters.energy.from == 1 && filters.energy.to > 0)
opts.push(`energy=lessthan-${filters.energy.to}`);
if (filters.stats.length > 0)
for (let stat of filters.stats) { for (let stat of filters.stats) {
if (stat.from > 1 && stat.to > 0)
opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`); opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`);
else if (stat.from > 1 && stat.to == 0)
opts.push(`stat=${stat.field}-morethan-${stat.from}`);
else if (stat.from == 1 && stat.to > 0)
opts.push(`stat=${stat.field}-lessthan-${stat.to}`);
} }
// Build && Return // Build && Return
if (opts.length == 0 && window.location.search.length <= 1) return; if (opts.length == 0 && window.location.search.length <= 1) return;
@@ -550,18 +559,36 @@
</Info> </Info>
{/if} {/if}
{#if filters.energy.from || filters.energy.to} {#if filters.energy.from > 1 && filters.energy.to > 0}
<Info icon="lightning-charge-fill" onclick={() => (isEnergyOpen = true)}> <Info icon="lightning-charge-fill" onclick={() => (isEnergyOpen = true)}>
Total Energy: {filters.energy.from} - {filters.energy.to} Total Energy: {filters.energy.from} - {filters.energy.to} kWh
</Info>
{:else if filters.energy.from > 1 && filters.energy.to == 0}
<Info icon="lightning-charge-fill" onclick={() => (isEnergyOpen = true)}>
Total Energy &ge;&nbsp;{filters.energy.from} kWh
</Info>
{:else if filters.energy.from == 1 && filters.energy.to > 0}
<Info icon="lightning-charge-fill" onclick={() => (isEnergyOpen = true)}>
Total Energy &le;&nbsp;{filters.energy.to} kWh
</Info> </Info>
{/if} {/if}
{#if filters.stats.length > 0} {#if filters.stats.length > 0}
{#each filters.stats as stat}
{#if stat.from > 1 && stat.to > 0}
<Info icon="bar-chart" onclick={() => (isStatsOpen = true)}> <Info icon="bar-chart" onclick={() => (isStatsOpen = true)}>
{filters.stats {stat.field}: {stat.from} - {stat.to} {stat.unit}
.map((stat) => `${stat.field}: ${stat.from} - ${stat.to}`) </Info>&thinsp;
.join(", ")} {:else if stat.from > 1 && stat.to == 0}
</Info> <Info icon="bar-chart" onclick={() => (isStatsOpen = true)}>
{stat.field} &ge;&nbsp;{stat.from} {stat.unit}
</Info>&thinsp;
{:else if stat.from == 1 && stat.to > 0}
<Info icon="bar-chart" onclick={() => (isStatsOpen = true)}>
{stat.field} &le;&nbsp;{stat.to} {stat.unit}
</Info>&thinsp;
{/if}
{/each}
{/if} {/if}
{/if} {/if}

View File

@@ -15,54 +15,90 @@
ModalBody, ModalBody,
ModalHeader, ModalHeader,
ModalFooter, ModalFooter,
Tooltip,
Icon
} from "@sveltestrap/sveltestrap"; } from "@sveltestrap/sveltestrap";
import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte"; import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte";
/* Svelte 5 Props */ /* Svelte 5 Props */
let { let {
isOpen = $bindable(false), isOpen = $bindable(false),
presetEnergy = { presetEnergy = { from: null, to: null },
from: null,
to: null
},
setFilter, setFilter,
} = $props(); } = $props();
/* Const */
const minEnergyPreset = 1;
const maxEnergyPreset = 1000;
/* Derived */ /* Derived */
let energyState = $derived(presetEnergy); // Pending
let pendingEnergyState = $derived({
from: presetEnergy?.from ? presetEnergy.from : minEnergyPreset,
to: !(presetEnergy.to == null || presetEnergy.to == 0) ? presetEnergy.to : maxEnergyPreset,
});
// Changable
let energyState = $derived({
from: presetEnergy?.from ? presetEnergy.from : minEnergyPreset,
to: !(presetEnergy.to == null || presetEnergy.to == 0) ? presetEnergy.to : maxEnergyPreset,
});
const energyActive = $derived(!(JSON.stringify(energyState) === JSON.stringify({ from: minEnergyPreset, to: maxEnergyPreset })));
// Block Apply if null
const disableApply = $derived(energyState.from === null || energyState.to === null);
/* Function */
function setEnergy() {
if (energyActive) {
pendingEnergyState = {
from: energyState.from,
to: (energyState.to == maxEnergyPreset) ? 0 : energyState.to
};
} else {
pendingEnergyState = { from: null, to: null};
};
}
</script> </script>
<Modal {isOpen} toggle={() => (isOpen = !isOpen)}> <Modal {isOpen} toggle={() => (isOpen = !isOpen)}>
<ModalHeader>Filter based on energy</ModalHeader> <ModalHeader>Filter based on energy</ModalHeader>
<ModalBody> <ModalBody>
<div class="mb-3"> <div class="mb-3">
<div class="mb-0"><b>Total Job Energy (kWh)</b></div> <div class="mb-0">
<b>Total Job Energy (kWh)</b>
<Icon id="energy-info" style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/>
</div>
<Tooltip target={`energy-info`} placement="right">
Generalized Presets. Use input fields to change to higher values.
</Tooltip>
<DoubleRangeSlider <DoubleRangeSlider
changeRange={(detail) => { changeRange={(detail) => {
energyState.from = detail[0]; energyState.from = detail[0];
energyState.to = detail[1]; energyState.to = detail[1];
}} }}
sliderMin={0.0} sliderMin={minEnergyPreset}
sliderMax={1000.0} sliderMax={maxEnergyPreset}
fromPreset={energyState?.from? energyState.from : 0.0} fromPreset={energyState.from}
toPreset={energyState?.to? energyState.to : 1000.0} toPreset={energyState.to}
/> />
</div> </div>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>
<Button <Button
color="primary" color="primary"
disabled={disableApply}
onclick={() => { onclick={() => {
isOpen = false; isOpen = false;
setFilter({ energy: energyState }); setEnergy();
setFilter({ energy: pendingEnergyState });
}}>Close & Apply</Button }}>Close & Apply</Button
> >
<Button <Button
color="danger" color="danger"
onclick={() => { onclick={() => {
isOpen = false; isOpen = false;
energyState = {from: null, to: null}; pendingEnergyState = {from: null, to: null};
setFilter({ energy: energyState }); setFilter({ energy: pendingEnergyState });
}}>Reset</Button }}>Reset</Button
> >
<Button onclick={() => (isOpen = false)}>Close</Button> <Button onclick={() => (isOpen = false)}>Close</Button>

View File

@@ -20,7 +20,7 @@
} = $props(); } = $props();
</script> </script>
<Button class="mr-2 mb-1" outline color={modified ? "warning" : "primary"} {onclick}> <Button class="mb-1" outline color={modified ? "warning" : "primary"} {onclick}>
<Icon name={icon} /> <Icon name={icon} />
{#if children} {#if children}
<!-- Note: Ignore '@' Error in IDE --> <!-- Note: Ignore '@' Error in IDE -->

View File

@@ -262,7 +262,7 @@
<Icon id="numthreads-info" style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/> <Icon id="numthreads-info" style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/>
</div> </div>
<Tooltip target={`numthreads-info`} placement="right"> <Tooltip target={`numthreads-info`} placement="right">
Presets for a single node. Can be changed to higher values. Presets for a single node. Use input fields to change to higher values.
</Tooltip> </Tooltip>
<DoubleRangeSlider <DoubleRangeSlider
changeRange={(detail) => { changeRange={(detail) => {
@@ -282,7 +282,7 @@
<Icon id="numaccs-info" style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/> <Icon id="numaccs-info" style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/>
</div> </div>
<Tooltip target={`numaccs-info`} placement="right"> <Tooltip target={`numaccs-info`} placement="right">
Presets for a single node. Can be changed to higher values. Presets for a single node. Use input fields to change to higher values.
</Tooltip> </Tooltip>
<DoubleRangeSlider <DoubleRangeSlider
changeRange={(detail) => { changeRange={(detail) => {

View File

@@ -15,13 +15,15 @@
ModalBody, ModalBody,
ModalHeader, ModalHeader,
ModalFooter, ModalFooter,
Tooltip,
Icon
} from "@sveltestrap/sveltestrap"; } from "@sveltestrap/sveltestrap";
import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte"; import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte";
/* Svelte 5 Props */ /* Svelte 5 Props */
let { let {
isOpen = $bindable(), isOpen = $bindable(),
presetStats, presetStats = [],
setFilter setFilter
} = $props(); } = $props();
@@ -29,10 +31,18 @@
const availableStats = $derived(getStatsItems(presetStats)); const availableStats = $derived(getStatsItems(presetStats));
/* Functions */ /* Functions */
function setRanges() {
for (let as of availableStats) {
if (as.enabled) {
as.to = (as.to == as.peak) ? 0 : as.to
}
};
}
function resetRanges() { function resetRanges() {
for (let as of availableStats) { for (let as of availableStats) {
as.enabled = false as.enabled = false
as.from = 0 as.from = 1
as.to = as.peak as.to = as.peak
}; };
} }
@@ -45,18 +55,24 @@
<ModalBody> <ModalBody>
{#each availableStats as aStat} {#each availableStats as aStat}
<div class="mb-3"> <div class="mb-3">
<div class="mb-0"><b>{aStat.text}</b></div> <div class="mb-0">
<b>{aStat.text} ({aStat.unit})</b>
<Icon id={`${aStat.metric}-info`} style="cursor:help; padding-right: 10px;" size="sm" name="info-circle"/>
</div>
<Tooltip target={`${aStat.metric}-info`} placement="right">
Peak Threshold Preset. Use input fields to change to higher values.
</Tooltip>
<DoubleRangeSlider <DoubleRangeSlider
changeRange={(detail) => { changeRange={(detail) => {
aStat.from = detail[0]; aStat.from = detail[0];
aStat.to = detail[1]; aStat.to = detail[1];
if (aStat.from == 0 && aStat.to == aStat.peak) { if (aStat.from == 1 && aStat.to == aStat.peak) {
aStat.enabled = false; aStat.enabled = false;
} else { } else {
aStat.enabled = true; aStat.enabled = true;
} }
}} }}
sliderMin={0.0} sliderMin={1}
sliderMax={aStat.peak} sliderMax={aStat.peak}
fromPreset={aStat.from} fromPreset={aStat.from}
toPreset={aStat.to} toPreset={aStat.to}
@@ -69,6 +85,7 @@
color="primary" color="primary"
onclick={() => { onclick={() => {
isOpen = false; isOpen = false;
setRanges();
setFilter({ stats: [...availableStats.filter((as) => as.enabled)] }); setFilter({ stats: [...availableStats.filter((as) => as.enabled)] });
}}>Close & Apply</Button }}>Close & Apply</Button
> >

View File

@@ -165,11 +165,11 @@
}} }}
/> />
{#if inputFieldFrom != "1" && inputFieldTo != sliderMax?.toString() } {#if inputFieldFrom != sliderMin?.toString() && inputFieldTo != sliderMax?.toString() }
<span>Selected: Range <b> {inputFieldFrom} </b> - <b> {inputFieldTo} </b></span> <span>Selected: Range <b> {inputFieldFrom} </b> - <b> {inputFieldTo} </b></span>
{:else if inputFieldFrom != "1" && inputFieldTo == sliderMax?.toString() } {:else if inputFieldFrom != sliderMin?.toString() && inputFieldTo == sliderMax?.toString() }
<span>Selected: More than <b> {inputFieldFrom} </b> </span> <span>Selected: More than <b> {inputFieldFrom} </b> </span>
{:else if inputFieldFrom == "1" && inputFieldTo != sliderMax?.toString() } {:else if inputFieldFrom == sliderMin?.toString() && inputFieldTo != sliderMax?.toString() }
<span>Selected: Less than <b> {inputFieldTo} </b></span> <span>Selected: Less than <b> {inputFieldTo} </b></span>
{:else} {:else}
<span><i>No Selection</i></span> <span><i>No Selection</i></span>

View File

@@ -341,26 +341,28 @@ export function getStatsItems(presetStats = []) {
if (gm?.footprint) { if (gm?.footprint) {
const mc = getMetricConfigDeep(gm.name, null, null) const mc = getMetricConfigDeep(gm.name, null, null)
if (mc) { if (mc) {
const presetEntry = presetStats.find((s) => s?.field === (gm.name + '_' + gm.footprint)) const presetEntry = presetStats.find((s) => s.field == `${gm.name}_${gm.footprint}`)
if (presetEntry) { if (presetEntry) {
return { return {
field: gm.name + '_' + gm.footprint, field: presetEntry.field,
text: gm.name + ' (' + gm.footprint + ')', text: `${gm.name} (${gm.footprint})`,
metric: gm.name, metric: gm.name,
from: presetEntry.from, from: presetEntry.from,
to: presetEntry.to, to: (presetEntry.to == 0) ? mc.peak : presetEntry.to,
peak: mc.peak, peak: mc.peak,
enabled: true enabled: true,
unit: `${gm?.unit?.prefix ? gm.unit.prefix : ''}${gm.unit.base}`
} }
} else { } else {
return { return {
field: gm.name + '_' + gm.footprint, field: `${gm.name}_${gm.footprint}`,
text: gm.name + ' (' + gm.footprint + ')', text: `${gm.name} (${gm.footprint})`,
metric: gm.name, metric: gm.name,
from: 0, from: 1,
to: mc.peak, to: mc.peak,
peak: mc.peak, peak: mc.peak,
enabled: false enabled: false,
unit: `${gm?.unit?.prefix ? gm.unit.prefix : ''}${gm.unit.base}`
} }
} }
} }