Fixed Error return value of ... is not checked (errcheck)

This commit is contained in:
Holger Obermaier
2026-02-05 10:06:34 +01:00
parent ff0cd5803d
commit a77cc19ddb
30 changed files with 284 additions and 219 deletions

View File

@@ -67,7 +67,7 @@ A collector reads data from any source, parses it to metrics and submits these m
* `Read(duration time.Duration, output chan ccMessage.CCMessage)`: Read, parse and submit data to the `output` channel as [`CCMessage`](https://github.com/ClusterCockpit/cc-lib/blob/main/ccMessage/README.md). If the collector has to measure anything for some duration, use the provided function argument `duration`. * `Read(duration time.Duration, output chan ccMessage.CCMessage)`: Read, parse and submit data to the `output` channel as [`CCMessage`](https://github.com/ClusterCockpit/cc-lib/blob/main/ccMessage/README.md). If the collector has to measure anything for some duration, use the provided function argument `duration`.
* `Close()`: Closes down the collector. * `Close()`: Closes down the collector.
It is recommanded to call `setup()` in the `Init()` function. It is recommended to call `setup()` in the `Init()` function.
Finally, the collector needs to be registered in the `collectorManager.go`. There is a list of collectors called `AvailableCollectors` which is a map (`collector_type_string` -> `pointer to MetricCollector interface`). Add a new entry with a descriptive name and the new collector. Finally, the collector needs to be registered in the `collectorManager.go`. There is a list of collectors called `AvailableCollectors` which is a map (`collector_type_string` -> `pointer to MetricCollector interface`). Add a new entry with a descriptive name and the new collector.
@@ -100,11 +100,12 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
} }
m.name = "SampleCollector" m.name = "SampleCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) if err := json.Unmarshal(config, &m.config); err != nil {
if err != nil { return fmt.Errorf("%s Init(): json.Unmarshal() call failed: %w", m.name, err)
return err
} }
} }
m.meta = map[string]string{"source": m.name, "group": "Sample"} m.meta = map[string]string{"source": m.name, "group": "Sample"}

View File

@@ -61,7 +61,9 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
"rmXA", "setXA", "mirror"} "rmXA", "setXA", "mirror"}
m.name = "BeegfsMetaCollector" m.name = "BeegfsMetaCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
// Set default beegfs-ctl binary // Set default beegfs-ctl binary

View File

@@ -54,7 +54,9 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
"storInf", "unlnk"} "storInf", "unlnk"}
m.name = "BeegfsStorageCollector" m.name = "BeegfsStorageCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
// Set default beegfs-ctl binary // Set default beegfs-ctl binary

View File

@@ -41,9 +41,10 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
return nil return nil
} }
m.setup()
m.name = "CPUFreqCpuInfoCollector" m.name = "CPUFreqCpuInfoCollector"
if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,

View File

@@ -48,7 +48,9 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
} }
m.name = "CPUFreqCollector" m.name = "CPUFreqCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)

View File

@@ -39,7 +39,9 @@ type CpustatCollector struct {
func (m *CpustatCollector) Init(config json.RawMessage) error { func (m *CpustatCollector) Init(config json.RawMessage) error {
m.name = "CpustatCollector" m.name = "CpustatCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "CPU"} m.meta = map[string]string{"source": m.name, "group": "CPU"}
m.nodetags = map[string]string{"type": "node"} m.nodetags = map[string]string{"type": "node"}

View File

@@ -10,6 +10,7 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"log" "log"
"os" "os"
"os/exec" "os/exec"
@@ -49,7 +50,9 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
return err return err
} }
} }
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
for _, c := range m.config.Commands { for _, c := range m.config.Commands {
cmdfields := strings.Fields(c) cmdfields := strings.Fields(c)
command := exec.Command(cmdfields[0], cmdfields[1:]...) command := exec.Command(cmdfields[0], cmdfields[1:]...)

View File

@@ -10,6 +10,7 @@ package collectors
import ( import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"strings" "strings"
"syscall" "syscall"
@@ -36,7 +37,9 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
m.name = "DiskstatCollector" m.name = "DiskstatCollector"
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Disk"} m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
if err := json.Unmarshal(config, &m.config); err != nil { if err := json.Unmarshal(config, &m.config); err != nil {
return err return err

View File

@@ -43,11 +43,11 @@ type GpfsCollectorConfig struct {
} }
type GpfsMetricDefinition struct { type GpfsMetricDefinition struct {
name string name string
desc string desc string
prefix string prefix string
unit string unit string
calc string calc string
} }
type GpfsCollector struct { type GpfsCollector struct {
@@ -56,251 +56,251 @@ type GpfsCollector struct {
config GpfsCollectorConfig config GpfsCollectorConfig
sudoCmd string sudoCmd string
skipFS map[string]struct{} skipFS map[string]struct{}
lastTimestamp map[string]time.Time // Store timestamp of lastState per filesystem to derive bandwidths lastTimestamp map[string]time.Time // Store timestamp of lastState per filesystem to derive bandwidths
definitions []GpfsMetricDefinition // all metrics to report definitions []GpfsMetricDefinition // all metrics to report
lastState map[string]GpfsCollectorState // one GpfsCollectorState per filesystem lastState map[string]GpfsCollectorState // one GpfsCollectorState per filesystem
} }
var GpfsAbsMetrics = []GpfsMetricDefinition{ var GpfsAbsMetrics = []GpfsMetricDefinition{
{ {
name: "gpfs_num_opens", name: "gpfs_num_opens",
desc: "number of opens", desc: "number of opens",
prefix: "_oc_", prefix: "_oc_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_num_closes", name: "gpfs_num_closes",
desc: "number of closes", desc: "number of closes",
prefix: "_cc_", prefix: "_cc_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_num_reads", name: "gpfs_num_reads",
desc: "number of reads", desc: "number of reads",
prefix: "_rdc_", prefix: "_rdc_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_num_writes", name: "gpfs_num_writes",
desc: "number of writes", desc: "number of writes",
prefix: "_wc_", prefix: "_wc_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_num_readdirs", name: "gpfs_num_readdirs",
desc: "number of readdirs", desc: "number of readdirs",
prefix: "_dir_", prefix: "_dir_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_num_inode_updates", name: "gpfs_num_inode_updates",
desc: "number of Inode Updates", desc: "number of Inode Updates",
prefix: "_iu_", prefix: "_iu_",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_bytes_read", name: "gpfs_bytes_read",
desc: "bytes read", desc: "bytes read",
prefix: "_br_", prefix: "_br_",
unit: "bytes", unit: "bytes",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_bytes_written", name: "gpfs_bytes_written",
desc: "bytes written", desc: "bytes written",
prefix: "_bw_", prefix: "_bw_",
unit: "bytes", unit: "bytes",
calc: "none", calc: "none",
}, },
} }
var GpfsDiffMetrics = []GpfsMetricDefinition{ var GpfsDiffMetrics = []GpfsMetricDefinition{
{ {
name: "gpfs_num_opens_diff", name: "gpfs_num_opens_diff",
desc: "number of opens (diff)", desc: "number of opens (diff)",
prefix: "_oc_", prefix: "_oc_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_num_closes_diff", name: "gpfs_num_closes_diff",
desc: "number of closes (diff)", desc: "number of closes (diff)",
prefix: "_cc_", prefix: "_cc_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_num_reads_diff", name: "gpfs_num_reads_diff",
desc: "number of reads (diff)", desc: "number of reads (diff)",
prefix: "_rdc_", prefix: "_rdc_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_num_writes_diff", name: "gpfs_num_writes_diff",
desc: "number of writes (diff)", desc: "number of writes (diff)",
prefix: "_wc_", prefix: "_wc_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_num_readdirs_diff", name: "gpfs_num_readdirs_diff",
desc: "number of readdirs (diff)", desc: "number of readdirs (diff)",
prefix: "_dir_", prefix: "_dir_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_num_inode_updates_diff", name: "gpfs_num_inode_updates_diff",
desc: "number of Inode Updates (diff)", desc: "number of Inode Updates (diff)",
prefix: "_iu_", prefix: "_iu_",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_bytes_read_diff", name: "gpfs_bytes_read_diff",
desc: "bytes read (diff)", desc: "bytes read (diff)",
prefix: "_br_", prefix: "_br_",
unit: "bytes", unit: "bytes",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_bytes_written_diff", name: "gpfs_bytes_written_diff",
desc: "bytes written (diff)", desc: "bytes written (diff)",
prefix: "_bw_", prefix: "_bw_",
unit: "bytes", unit: "bytes",
calc: "difference", calc: "difference",
}, },
} }
var GpfsDeriveMetrics = []GpfsMetricDefinition{ var GpfsDeriveMetrics = []GpfsMetricDefinition{
{ {
name: "gpfs_opens_rate", name: "gpfs_opens_rate",
desc: "number of opens (rate)", desc: "number of opens (rate)",
prefix: "_oc_", prefix: "_oc_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_closes_rate", name: "gpfs_closes_rate",
desc: "number of closes (rate)", desc: "number of closes (rate)",
prefix: "_oc_", prefix: "_oc_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_reads_rate", name: "gpfs_reads_rate",
desc: "number of reads (rate)", desc: "number of reads (rate)",
prefix: "_rdc_", prefix: "_rdc_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_writes_rate", name: "gpfs_writes_rate",
desc: "number of writes (rate)", desc: "number of writes (rate)",
prefix: "_wc_", prefix: "_wc_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_readdirs_rate", name: "gpfs_readdirs_rate",
desc: "number of readdirs (rate)", desc: "number of readdirs (rate)",
prefix: "_dir_", prefix: "_dir_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_inode_updates_rate", name: "gpfs_inode_updates_rate",
desc: "number of Inode Updates (rate)", desc: "number of Inode Updates (rate)",
prefix: "_iu_", prefix: "_iu_",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_bw_read", name: "gpfs_bw_read",
desc: "bytes read (rate)", desc: "bytes read (rate)",
prefix: "_br_", prefix: "_br_",
unit: "bytes/sec", unit: "bytes/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_bw_write", name: "gpfs_bw_write",
desc: "bytes written (rate)", desc: "bytes written (rate)",
prefix: "_bw_", prefix: "_bw_",
unit: "bytes/sec", unit: "bytes/sec",
calc: "derivative", calc: "derivative",
}, },
} }
var GpfsTotalMetrics = []GpfsMetricDefinition{ var GpfsTotalMetrics = []GpfsMetricDefinition{
{ {
name: "gpfs_bytes_total", name: "gpfs_bytes_total",
desc: "bytes total", desc: "bytes total",
prefix: "bytesTotal", prefix: "bytesTotal",
unit: "bytes", unit: "bytes",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_bytes_total_diff", name: "gpfs_bytes_total_diff",
desc: "bytes total (diff)", desc: "bytes total (diff)",
prefix: "bytesTotal", prefix: "bytesTotal",
unit: "bytes", unit: "bytes",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_bw_total", name: "gpfs_bw_total",
desc: "bytes total (rate)", desc: "bytes total (rate)",
prefix: "bytesTotal", prefix: "bytesTotal",
unit: "bytes/sec", unit: "bytes/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_iops", name: "gpfs_iops",
desc: "iops", desc: "iops",
prefix: "iops", prefix: "iops",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_iops_diff", name: "gpfs_iops_diff",
desc: "iops (diff)", desc: "iops (diff)",
prefix: "iops", prefix: "iops",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_iops_rate", name: "gpfs_iops_rate",
desc: "iops (rate)", desc: "iops (rate)",
prefix: "iops", prefix: "iops",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
{ {
name: "gpfs_metaops", name: "gpfs_metaops",
desc: "metaops", desc: "metaops",
prefix: "metaops", prefix: "metaops",
unit: "requests", unit: "requests",
calc: "none", calc: "none",
}, },
{ {
name: "gpfs_metaops_diff", name: "gpfs_metaops_diff",
desc: "metaops (diff)", desc: "metaops (diff)",
prefix: "metaops", prefix: "metaops",
unit: "requests", unit: "requests",
calc: "difference", calc: "difference",
}, },
{ {
name: "gpfs_metaops_rate", name: "gpfs_metaops_rate",
desc: "metaops (rate)", desc: "metaops (rate)",
prefix: "metaops", prefix: "metaops",
unit: "requests/sec", unit: "requests/sec",
calc: "derivative", calc: "derivative",
}, },
} }
@@ -310,9 +310,10 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
return nil return nil
} }
var err error
m.name = "GpfsCollector" m.name = "GpfsCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
// Set default mmpmon binary // Set default mmpmon binary
@@ -320,7 +321,7 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
// Read JSON configuration // Read JSON configuration
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
log.Print(err.Error()) log.Print(err.Error())
return err return err
@@ -366,7 +367,7 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
if m.config.Sudo && !strings.HasPrefix(m.config.Mmpmon, "/") { if m.config.Sudo && !strings.HasPrefix(m.config.Mmpmon, "/") {
return fmt.Errorf("when using sudo, mmpmon_path must be provided and an absolute path: %s", m.config.Mmpmon) return fmt.Errorf("when using sudo, mmpmon_path must be provided and an absolute path: %s", m.config.Mmpmon)
} }
// Check if mmpmon is in executable search path // Check if mmpmon is in executable search path
p, err := exec.LookPath(m.config.Mmpmon) p, err := exec.LookPath(m.config.Mmpmon)
if err != nil { if err != nil {
@@ -416,11 +417,11 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
for _, def := range GpfsTotalMetrics { for _, def := range GpfsTotalMetrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, def.name); !skip { if _, skip := stringArrayContains(m.config.ExcludeMetrics, def.name); !skip {
// only send total metrics of the types requested // only send total metrics of the types requested
if ( def.calc == "none" && m.config.SendAbsoluteValues ) || if (def.calc == "none" && m.config.SendAbsoluteValues) ||
( def.calc == "difference" && m.config.SendDiffValues ) || (def.calc == "difference" && m.config.SendDiffValues) ||
( def.calc == "derivative" && m.config.SendDerivedValues ) { (def.calc == "derivative" && m.config.SendDerivedValues) {
m.definitions = append(m.definitions, def) m.definitions = append(m.definitions, def)
} }
} }
} }
} else if m.config.SendBandwidths { } else if m.config.SendBandwidths {
@@ -456,7 +457,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
} else { } else {
cmd = exec.Command(m.config.Mmpmon, "-p", "-s") cmd = exec.Command(m.config.Mmpmon, "-p", "-s")
} }
cmd.Stdin = strings.NewReader("once fs_io_s\n") cmd.Stdin = strings.NewReader("once fs_io_s\n")
cmdStdout := new(bytes.Buffer) cmdStdout := new(bytes.Buffer)
cmdStderr := new(bytes.Buffer) cmdStderr := new(bytes.Buffer)
@@ -617,7 +618,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
} }
case "derivative": case "derivative":
if vnew_ok && vold_ok && timeDiff > 0 { if vnew_ok && vold_ok && timeDiff > 0 {
value = float64(vnew - vold) / timeDiff value = float64(vnew-vold) / timeDiff
if value.(float64) < 0 { if value.(float64) < 0 {
value = 0 value = 0
} }

View File

@@ -65,7 +65,9 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "InfinibandCollector" m.name = "InfinibandCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,

View File

@@ -11,6 +11,7 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@@ -45,7 +46,9 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
m.name = "IOstatCollector" m.name = "IOstatCollector"
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Disk"} m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err = json.Unmarshal(config, &m.config)
if err != nil { if err != nil {

View File

@@ -43,7 +43,9 @@ func (m *IpmiCollector) Init(config json.RawMessage) error {
} }
m.name = "IpmiCollector" m.name = "IpmiCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,

View File

@@ -234,7 +234,9 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
return fmt.Errorf("error setting environment variable LIKWID_FORCE=1: %v", err) return fmt.Errorf("error setting environment variable LIKWID_FORCE=1: %v", err)
} }
} }
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.meta = map[string]string{"group": "PerfCounter"} m.meta = map[string]string{"group": "PerfCounter"}
cclog.ComponentDebug(m.name, "Get cpulist and init maps and lists") cclog.ComponentDebug(m.name, "Get cpulist and init maps and lists")

View File

@@ -42,7 +42,9 @@ type LoadavgCollector struct {
func (m *LoadavgCollector) Init(config json.RawMessage) error { func (m *LoadavgCollector) Init(config json.RawMessage) error {
m.name = "LoadavgCollector" m.name = "LoadavgCollector"
m.parallel = true m.parallel = true
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {

View File

@@ -302,7 +302,9 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
return err return err
} }
} }
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.tags = map[string]string{"type": "node"} m.tags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "Lustre"} m.meta = map[string]string{"source": m.name, "group": "Lustre"}

View File

@@ -127,7 +127,9 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
if len(m.matches) == 0 { if len(m.matches) == 0 {
return errors.New("no metrics to collect") return errors.New("no metrics to collect")
} }
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if m.config.NodeStats { if m.config.NodeStats {
if stats := getStats(MEMSTATFILE); len(stats) == 0 { if stats := getStats(MEMSTATFILE); len(stats) == 0 {

View File

@@ -11,6 +11,7 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@@ -65,7 +66,9 @@ func getCanonicalName(raw string, aliasToCanonical map[string]string) string {
func (m *NetstatCollector) Init(config json.RawMessage) error { func (m *NetstatCollector) Init(config json.RawMessage) error {
m.name = "NetstatCollector" m.name = "NetstatCollector"
m.parallel = true m.parallel = true
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.lastTimestamp = time.Now() m.lastTimestamp = time.Now()
const ( const (

View File

@@ -189,13 +189,17 @@ type Nfs4Collector struct {
func (m *Nfs3Collector) Init(config json.RawMessage) error { func (m *Nfs3Collector) Init(config json.RawMessage) error {
m.name = "Nfs3Collector" m.name = "Nfs3Collector"
m.version = `v3` m.version = `v3`
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
return m.MainInit(config) return m.MainInit(config)
} }
func (m *Nfs4Collector) Init(config json.RawMessage) error { func (m *Nfs4Collector) Init(config json.RawMessage) error {
m.name = "Nfs4Collector" m.name = "Nfs4Collector"
m.version = `v4` m.version = `v4`
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
return m.MainInit(config) return m.MainInit(config)
} }

View File

@@ -102,7 +102,9 @@ func (m *NfsIOStatCollector) readNfsiostats() map[string]map[string]int64 {
func (m *NfsIOStatCollector) Init(config json.RawMessage) error { func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
var err error = nil var err error = nil
m.name = "NfsIOStatCollector" m.name = "NfsIOStatCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"} m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"}
m.tags = map[string]string{"type": "node"} m.tags = map[string]string{"type": "node"}

View File

@@ -72,7 +72,9 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
m.name = "NUMAStatsCollector" m.name = "NUMAStatsCollector"
m.parallel = true m.parallel = true
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
"group": "NUMA", "group": "NUMA",

View File

@@ -64,7 +64,9 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
m.config.ProcessMigDevices = false m.config.ProcessMigDevices = false
m.config.UseUuidForMigDevices = false m.config.UseUuidForMigDevices = false
m.config.UseSliceForMigDevices = false m.config.UseSliceForMigDevices = false
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err = json.Unmarshal(config, &m.config)
if err != nil { if err != nil {

View File

@@ -54,9 +54,10 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
return nil return nil
} }
var err error = nil
m.name = "RAPLCollector" m.name = "RAPLCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
@@ -66,7 +67,7 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
// Read in the JSON configuration // Read in the JSON configuration
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error()) cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err return err

View File

@@ -52,7 +52,9 @@ func (m *RocmSmiCollector) Init(config json.RawMessage) error {
// Always set the name early in Init() to use it in cclog.Component* functions // Always set the name early in Init() to use it in cclog.Component* functions
m.name = "RocmSmiCollector" m.name = "RocmSmiCollector"
// This is for later use, also call it early // This is for later use, also call it early
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
// Define meta information sent with each metric // Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta()) // (Can also be dynamic or this is the basic set with extension through AddMeta())
//m.meta = map[string]string{"source": m.name, "group": "AMD"} //m.meta = map[string]string{"source": m.name, "group": "AMD"}

View File

@@ -9,6 +9,7 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt"
"time" "time"
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger" cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
@@ -41,7 +42,9 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
// Always set the name early in Init() to use it in cclog.Component* functions // Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SampleCollector" m.name = "SampleCollector"
// This is for later use, also call it early // This is for later use, also call it early
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
// Tell whether the collector should be run in parallel with others (reading files, ...) // Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors actually doing measurements // or it should be run serially, mostly for collectors actually doing measurements
// because they should not measure the execution of the other collectors // because they should not measure the execution of the other collectors

View File

@@ -9,6 +9,7 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt"
"sync" "sync"
"time" "time"
@@ -40,7 +41,9 @@ func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
// Always set the name early in Init() to use it in cclog.Component* functions // Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SampleTimerCollector" m.name = "SampleTimerCollector"
// This is for later use, also call it early // This is for later use, also call it early
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
// Define meta information sent with each metric // Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta()) // (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"} m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}

View File

@@ -50,7 +50,9 @@ func (m *SchedstatCollector) Init(config json.RawMessage) error {
// Always set the name early in Init() to use it in cclog.Component* functions // Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SchedstatCollector" m.name = "SchedstatCollector"
// This is for later use, also call it early // This is for later use, also call it early
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
// Tell whether the collector should be run in parallel with others (reading files, ...) // Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors acutally doing measurements // or it should be run serially, mostly for collectors acutally doing measurements
// because they should not measure the execution of the other collectors // because they should not measure the execution of the other collectors

View File

@@ -9,6 +9,7 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt"
"runtime" "runtime"
"syscall" "syscall"
"time" "time"
@@ -34,7 +35,9 @@ type SelfCollector struct {
func (m *SelfCollector) Init(config json.RawMessage) error { func (m *SelfCollector) Init(config json.RawMessage) error {
var err error = nil var err error = nil
m.name = "SelfCollector" m.name = "SelfCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Self"} m.meta = map[string]string{"source": m.name, "group": "Self"}
m.tags = map[string]string{"type": "node"} m.tags = map[string]string{"type": "node"}

View File

@@ -103,7 +103,9 @@ func (m *SlurmCgroupCollector) readFile(path string) ([]byte, error) {
func (m *SlurmCgroupCollector) Init(config json.RawMessage) error { func (m *SlurmCgroupCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "SlurmCgroupCollector" m.name = "SlurmCgroupCollector"
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
m.parallel = true m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "SLURM"} m.meta = map[string]string{"source": m.name, "group": "SLURM"}
m.tags = map[string]string{"type": "hwthread"} m.tags = map[string]string{"type": "hwthread"}

View File

@@ -58,7 +58,9 @@ func (m *TempCollector) Init(config json.RawMessage) error {
m.name = "TempCollector" m.name = "TempCollector"
m.parallel = true m.parallel = true
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {

View File

@@ -49,7 +49,9 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
if m.config.Num_procs <= 0 || m.config.Num_procs > MAX_NUM_PROCS { if m.config.Num_procs <= 0 || m.config.Num_procs > MAX_NUM_PROCS {
return fmt.Errorf("num_procs option must be set in 'topprocs' config (range: 1-%d)", MAX_NUM_PROCS) return fmt.Errorf("num_procs option must be set in 'topprocs' config (range: 1-%d)", MAX_NUM_PROCS)
} }
m.setup() if err := m.setup(); err != nil {
return fmt.Errorf("%s Init(): setup() call failed: %w", m.name, err)
}
command := exec.Command("ps", "-Ao", "comm", "--sort=-pcpu") command := exec.Command("ps", "-Ao", "comm", "--sort=-pcpu")
command.Wait() command.Wait()
_, err = command.Output() _, err = command.Output()