mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-06-08 00:23:48 +02:00
Refactor. Add Subcluster get metric list helper routine.
This commit is contained in:
parent
5a88c77171
commit
0aecea6de2
@ -124,7 +124,6 @@ func (t *JobClassTagger) prepareRule(b []byte, fns string) {
|
|||||||
}
|
}
|
||||||
log.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
|
log.Infof("prepareRule() > processing %s with %d requirements and %d variables", fns, len(ri.requirements), len(ri.variables))
|
||||||
|
|
||||||
delete(t.rules, rule.Tag)
|
|
||||||
t.rules[rule.Tag] = ri
|
t.rules[rule.Tag] = ri
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,16 +138,33 @@ func (t *JobClassTagger) EventCallback() {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fn := range files {
|
if util.CheckFileExists(t.cfgPath + "/parameters.json") {
|
||||||
fns := fn.Name()
|
log.Info("Merge parameters")
|
||||||
log.Debugf("Process: %s", fns)
|
b, err := os.ReadFile(t.cfgPath + "/parameters.json")
|
||||||
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
|
|
||||||
b, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("prepareRule() > open file error: %v", err)
|
log.Warnf("prepareRule() > open file error: %v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
t.prepareRule(b, fns)
|
|
||||||
|
var paramTmp map[string]any
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(b)).Decode(¶mTmp); err != nil {
|
||||||
|
log.Warn("Error while decoding parameters.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
maps.Copy(t.parameters, paramTmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
fns := fn.Name()
|
||||||
|
if fns != "parameters.json" {
|
||||||
|
log.Debugf("Process: %s", fns)
|
||||||
|
filename := fmt.Sprintf("%s/%s", t.cfgPath, fns)
|
||||||
|
b, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("prepareRule() > open file error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.prepareRule(b, fns)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,6 +236,8 @@ func (t *JobClassTagger) Match(job *schema.Job) {
|
|||||||
env := make(map[string]any)
|
env := make(map[string]any)
|
||||||
maps.Copy(env, ri.env)
|
maps.Copy(env, ri.env)
|
||||||
log.Infof("Try to match rule %s for job %d", tag, job.JobID)
|
log.Infof("Try to match rule %s for job %d", tag, job.JobID)
|
||||||
|
|
||||||
|
// Initialize environment
|
||||||
env["job"] = map[string]any{
|
env["job"] = map[string]any{
|
||||||
"exclusive": job.Exclusive,
|
"exclusive": job.Exclusive,
|
||||||
"duration": job.Duration,
|
"duration": job.Duration,
|
||||||
|
@ -69,16 +69,18 @@ func initClusterConfig() error {
|
|||||||
|
|
||||||
for _, sc := range cluster.SubClusters {
|
for _, sc := range cluster.SubClusters {
|
||||||
newMetric := &schema.MetricConfig{
|
newMetric := &schema.MetricConfig{
|
||||||
Unit: mc.Unit,
|
Metric: schema.Metric{
|
||||||
|
Name: mc.Name,
|
||||||
|
Unit: mc.Unit,
|
||||||
|
Peak: mc.Peak,
|
||||||
|
Normal: mc.Normal,
|
||||||
|
Caution: mc.Caution,
|
||||||
|
Alert: mc.Alert,
|
||||||
|
},
|
||||||
Energy: mc.Energy,
|
Energy: mc.Energy,
|
||||||
Name: mc.Name,
|
|
||||||
Scope: mc.Scope,
|
Scope: mc.Scope,
|
||||||
Aggregation: mc.Aggregation,
|
Aggregation: mc.Aggregation,
|
||||||
Peak: mc.Peak,
|
|
||||||
Caution: mc.Caution,
|
|
||||||
Alert: mc.Alert,
|
|
||||||
Timestep: mc.Timestep,
|
Timestep: mc.Timestep,
|
||||||
Normal: mc.Normal,
|
|
||||||
LowerIsBetter: mc.LowerIsBetter,
|
LowerIsBetter: mc.LowerIsBetter,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,6 +169,45 @@ func GetSubCluster(cluster, subcluster string) (*schema.SubCluster, error) {
|
|||||||
return nil, fmt.Errorf("subcluster '%v' not found for cluster '%v', or cluster '%v' not configured", subcluster, cluster, cluster)
|
return nil, fmt.Errorf("subcluster '%v' not found for cluster '%v', or cluster '%v' not configured", subcluster, cluster, cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetMetricConfigSubCluster(cluster, subcluster string) map[string]*schema.Metric {
|
||||||
|
metrics := make(map[string]*schema.Metric)
|
||||||
|
|
||||||
|
for _, c := range Clusters {
|
||||||
|
if c.Name == cluster {
|
||||||
|
for _, m := range c.MetricConfig {
|
||||||
|
for _, s := range m.SubClusters {
|
||||||
|
if s.Name == subcluster {
|
||||||
|
metrics[m.Name] = &schema.Metric{
|
||||||
|
Name: m.Name,
|
||||||
|
Unit: s.Unit,
|
||||||
|
Peak: s.Peak,
|
||||||
|
Normal: s.Normal,
|
||||||
|
Caution: s.Caution,
|
||||||
|
Alert: s.Alert,
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := metrics[m.Name]
|
||||||
|
if !ok {
|
||||||
|
metrics[m.Name] = &schema.Metric{
|
||||||
|
Name: m.Name,
|
||||||
|
Unit: m.Unit,
|
||||||
|
Peak: m.Peak,
|
||||||
|
Normal: m.Normal,
|
||||||
|
Caution: m.Caution,
|
||||||
|
Alert: m.Alert,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
|
|
||||||
func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
|
func GetMetricConfig(cluster, metric string) *schema.MetricConfig {
|
||||||
for _, c := range Clusters {
|
for _, c := range Clusters {
|
||||||
if c.Name == cluster {
|
if c.Name == cluster {
|
||||||
|
@ -61,7 +61,7 @@ func (nl *NodeList) PrintList() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (nl *NodeList) NodeCount() int {
|
func (nl *NodeList) NodeCount() int {
|
||||||
var out int = 0
|
out := 0
|
||||||
for _, term := range *nl {
|
for _, term := range *nl {
|
||||||
if len(term) == 1 { // If only String-Part in Term: Single Node Name -> add one
|
if len(term) == 1 { // If only String-Part in Term: Single Node Name -> add one
|
||||||
out += 1
|
out += 1
|
||||||
@ -160,7 +160,7 @@ func (nle NLExprIntRange) limits() []map[string]int {
|
|||||||
m["start"] = int(nle.start)
|
m["start"] = int(nle.start)
|
||||||
m["end"] = int(nle.end)
|
m["end"] = int(nle.end)
|
||||||
m["digits"] = int(nle.digits)
|
m["digits"] = int(nle.digits)
|
||||||
if nle.zeroPadded == true {
|
if nle.zeroPadded {
|
||||||
m["zeroPadded"] = 1
|
m["zeroPadded"] = 1
|
||||||
} else {
|
} else {
|
||||||
m["zeroPadded"] = 0
|
m["zeroPadded"] = 0
|
||||||
@ -183,14 +183,15 @@ func ParseNodeList(raw string) (NodeList, error) {
|
|||||||
rawterms := []string{}
|
rawterms := []string{}
|
||||||
prevterm := 0
|
prevterm := 0
|
||||||
for i := 0; i < len(raw); i++ {
|
for i := 0; i < len(raw); i++ {
|
||||||
if raw[i] == '[' {
|
switch raw[i] {
|
||||||
|
case '[':
|
||||||
for i < len(raw) && raw[i] != ']' {
|
for i < len(raw) && raw[i] != ']' {
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
if i == len(raw) {
|
if i == len(raw) {
|
||||||
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
||||||
}
|
}
|
||||||
} else if raw[i] == ',' {
|
case ',':
|
||||||
rawterms = append(rawterms, raw[prevterm:i])
|
rawterms = append(rawterms, raw[prevterm:i])
|
||||||
prevterm = i + 1
|
prevterm = i + 1
|
||||||
}
|
}
|
||||||
|
@ -45,31 +45,31 @@ type SubCluster struct {
|
|||||||
ThreadsPerCore int `json:"threadsPerCore"`
|
ThreadsPerCore int `json:"threadsPerCore"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Metric struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Unit Unit `json:"unit"`
|
||||||
|
Peak float64 `json:"peak"`
|
||||||
|
Normal float64 `json:"normal"`
|
||||||
|
Caution float64 `json:"caution"`
|
||||||
|
Alert float64 `json:"alert"`
|
||||||
|
}
|
||||||
|
|
||||||
type SubClusterConfig struct {
|
type SubClusterConfig struct {
|
||||||
Name string `json:"name"`
|
Metric
|
||||||
Footprint string `json:"footprint,omitempty"`
|
Footprint string `json:"footprint,omitempty"`
|
||||||
Energy string `json:"energy"`
|
Energy string `json:"energy"`
|
||||||
Peak float64 `json:"peak"`
|
Remove bool `json:"remove"`
|
||||||
Normal float64 `json:"normal"`
|
LowerIsBetter bool `json:"lowerIsBetter"`
|
||||||
Caution float64 `json:"caution"`
|
|
||||||
Alert float64 `json:"alert"`
|
|
||||||
Remove bool `json:"remove"`
|
|
||||||
LowerIsBetter bool `json:"lowerIsBetter"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricConfig struct {
|
type MetricConfig struct {
|
||||||
Unit Unit `json:"unit"`
|
Metric
|
||||||
Energy string `json:"energy"`
|
Energy string `json:"energy"`
|
||||||
Name string `json:"name"`
|
|
||||||
Scope MetricScope `json:"scope"`
|
Scope MetricScope `json:"scope"`
|
||||||
Aggregation string `json:"aggregation"`
|
Aggregation string `json:"aggregation"`
|
||||||
Footprint string `json:"footprint,omitempty"`
|
Footprint string `json:"footprint,omitempty"`
|
||||||
SubClusters []*SubClusterConfig `json:"subClusters,omitempty"`
|
SubClusters []*SubClusterConfig `json:"subClusters,omitempty"`
|
||||||
Peak float64 `json:"peak"`
|
|
||||||
Caution float64 `json:"caution"`
|
|
||||||
Alert float64 `json:"alert"`
|
|
||||||
Timestep int `json:"timestep"`
|
Timestep int `json:"timestep"`
|
||||||
Normal float64 `json:"normal"`
|
|
||||||
LowerIsBetter bool `json:"lowerIsBetter"`
|
LowerIsBetter bool `json:"lowerIsBetter"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ func (topo *Topology) GetSocketsFromHWThreads(
|
|||||||
// those in the argument list are assigned to one of the sockets in the first
|
// those in the argument list are assigned to one of the sockets in the first
|
||||||
// return value, return true as the second value. TODO: Optimize this, there
|
// return value, return true as the second value. TODO: Optimize this, there
|
||||||
// must be a more efficient way/algorithm.
|
// must be a more efficient way/algorithm.
|
||||||
func (topo *Topology) GetSocketsFromCores (
|
func (topo *Topology) GetSocketsFromCores(
|
||||||
cores []int,
|
cores []int,
|
||||||
) (sockets []int, exclusive bool) {
|
) (sockets []int, exclusive bool) {
|
||||||
socketsMap := map[int]int{}
|
socketsMap := map[int]int{}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user