mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2024-11-10 04:27:25 +01:00
200af84c54
* Use channels, add a metric router, split up configuration and use extended version of Influx line protocol internally * Use central timer for collectors and router. Add expressions to router * Add expression to router config * Update entry points * Start with README * Update README for CCMetric * Formatting * Update README.md * Add README for MultiChanTicker * Add README for MultiChanTicker * Update README.md * Add README to metric router * Update main README * Remove SinkEntity type * Update README for sinks * Update go files * Update README for receivers * Update collectors README * Update collectors README * Use seperate page per collector * Fix for tempstat page * Add docs for customcmd collector * Add docs for ipmistat collector * Add docs for topprocs collector * Update customCmdMetric.md * Use seconds when calculating LIKWID metrics * Add IB metrics ib_recv_pkts and ib_xmit_pkts * Drop domain part of host name * Updated to latest stable version of likwid * Define source code dependencies in Makefile * Add GPFS / IBM Spectrum Scale collector * Add vet and staticcheck make targets * Add vet and staticcheck make targets * Avoid go vet warning: struct field tag `json:"..., omitempty"` not compatible with reflect.StructTag.Get: suspicious space in struct tag value struct field tag `json:"...", omitempty` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces * Add sample collector to README.md * Add CPU frequency collector * Avoid staticcheck warning: redundant return statement * Avoid staticcheck warning: unnecessary assignment to the blank identifier * Simplified code * Add CPUFreqCollectorCpuinfo a metric collector to measure the current frequency of the CPUs as obtained from /proc/cpuinfo Only measure on the first hyperthread * Add collector for NFS clients * Move publication of metrics into Flush() for NatsSink * Update GitHub actions * Refactoring * Avoid vet warning: Println arg list ends with redundant newline * Avoid vet warning struct field commands has json tag but is not exported * Avoid vet warning: return copies lock value. * Corrected typo * Refactoring * Add go sources in internal/... * Bad separator in Makefile * Fix Infiniband collector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
221 lines
5.9 KiB
Go
221 lines
5.9 KiB
Go
package collectors
|
|
|
|
import (
|
|
"bufio"
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
|
|
"golang.org/x/sys/unix"
|
|
)
|
|
|
|
//
|
|
// readOneLine reads one line from a file.
|
|
// It returns ok when file was successfully read.
|
|
// In this case text contains the first line of the files contents.
|
|
//
|
|
func readOneLine(filename string) (text string, ok bool) {
|
|
file, err := os.Open(filename)
|
|
if err != nil {
|
|
return
|
|
}
|
|
defer file.Close()
|
|
scanner := bufio.NewScanner(file)
|
|
ok = scanner.Scan()
|
|
text = scanner.Text()
|
|
return
|
|
}
|
|
|
|
type CPUFreqCollectorTopology struct {
|
|
processor string // logical processor number (continuous, starting at 0)
|
|
coreID string // socket local core ID
|
|
coreID_int int
|
|
physicalPackageID string // socket / package ID
|
|
physicalPackageID_int int
|
|
numPhysicalPackages string // number of sockets / packages
|
|
numPhysicalPackages_int int
|
|
isHT bool
|
|
numNonHT string // number of non hyperthreading processors
|
|
numNonHT_int int
|
|
scalingCurFreqFile string
|
|
tagSet map[string]string
|
|
}
|
|
|
|
//
|
|
// CPUFreqCollector
|
|
// a metric collector to measure the current frequency of the CPUs
|
|
// as obtained from the hardware (in KHz)
|
|
// Only measure on the first hyper thread
|
|
//
|
|
// See: https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html
|
|
//
|
|
type CPUFreqCollector struct {
|
|
metricCollector
|
|
topology []CPUFreqCollectorTopology
|
|
config struct {
|
|
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
|
}
|
|
}
|
|
|
|
func (m *CPUFreqCollector) Init(config json.RawMessage) error {
|
|
m.name = "CPUFreqCollector"
|
|
m.setup()
|
|
if len(config) > 0 {
|
|
err := json.Unmarshal(config, &m.config)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
m.meta = map[string]string{
|
|
"source": m.name,
|
|
"group": "CPU Frequency",
|
|
}
|
|
|
|
// Loop for all CPU directories
|
|
baseDir := "/sys/devices/system/cpu"
|
|
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
|
|
cpuDirs, err := filepath.Glob(globPattern)
|
|
if err != nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to glob files with pattern %s: %v", globPattern, err)
|
|
}
|
|
if cpuDirs == nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to find any files with pattern %s", globPattern)
|
|
}
|
|
|
|
// Initialize CPU topology
|
|
m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
|
|
for _, cpuDir := range cpuDirs {
|
|
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
|
|
processor_int, err := strconv.Atoi(processor)
|
|
if err != nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to convert cpuID to int: %v", err)
|
|
}
|
|
|
|
// Read package ID
|
|
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
|
|
physicalPackageID, ok := readOneLine(physicalPackageIDFile)
|
|
if !ok {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to read physical package ID from %s", physicalPackageIDFile)
|
|
}
|
|
physicalPackageID_int, err := strconv.Atoi(physicalPackageID)
|
|
if err != nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to convert packageID to int: %v", err)
|
|
}
|
|
|
|
// Read core ID
|
|
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
|
|
coreID, ok := readOneLine(coreIDFile)
|
|
if !ok {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to read core ID from %s", coreIDFile)
|
|
}
|
|
coreID_int, err := strconv.Atoi(coreID)
|
|
if err != nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to convert coreID to int: %v", err)
|
|
}
|
|
|
|
// Check access to current frequency file
|
|
scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
|
|
err = unix.Access(scalingCurFreqFile, unix.R_OK)
|
|
if err != nil {
|
|
return fmt.Errorf("CPUFreqCollector.Init() unable to access %s: %v", scalingCurFreqFile, err)
|
|
}
|
|
|
|
t := &m.topology[processor_int]
|
|
t.processor = processor
|
|
t.physicalPackageID = physicalPackageID
|
|
t.physicalPackageID_int = physicalPackageID_int
|
|
t.coreID = coreID
|
|
t.coreID_int = coreID_int
|
|
t.scalingCurFreqFile = scalingCurFreqFile
|
|
}
|
|
|
|
// is processor a hyperthread?
|
|
coreSeenBefore := make(map[string]bool)
|
|
for i := range m.topology {
|
|
t := &m.topology[i]
|
|
|
|
globalID := t.physicalPackageID + ":" + t.coreID
|
|
t.isHT = coreSeenBefore[globalID]
|
|
coreSeenBefore[globalID] = true
|
|
}
|
|
|
|
// number of non hyper thread cores and packages / sockets
|
|
numNonHT_int := 0
|
|
maxPhysicalPackageID := 0
|
|
for i := range m.topology {
|
|
t := &m.topology[i]
|
|
|
|
// Update maxPackageID
|
|
if t.physicalPackageID_int > maxPhysicalPackageID {
|
|
maxPhysicalPackageID = t.physicalPackageID_int
|
|
}
|
|
|
|
if !t.isHT {
|
|
numNonHT_int++
|
|
}
|
|
}
|
|
|
|
numPhysicalPackageID_int := maxPhysicalPackageID + 1
|
|
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
|
|
numNonHT := fmt.Sprint(numNonHT_int)
|
|
for i := range m.topology {
|
|
t := &m.topology[i]
|
|
t.numPhysicalPackages = numPhysicalPackageID
|
|
t.numPhysicalPackages_int = numPhysicalPackageID_int
|
|
t.numNonHT = numNonHT
|
|
t.numNonHT_int = numNonHT_int
|
|
t.tagSet = map[string]string{
|
|
"type": "cpu",
|
|
"type-id": t.processor,
|
|
"num_core": t.numNonHT,
|
|
"package_id": t.physicalPackageID,
|
|
"num_package": t.numPhysicalPackages,
|
|
}
|
|
}
|
|
|
|
m.init = true
|
|
return nil
|
|
}
|
|
|
|
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
|
if !m.init {
|
|
return
|
|
}
|
|
|
|
now := time.Now()
|
|
for i := range m.topology {
|
|
t := &m.topology[i]
|
|
|
|
// skip hyperthreads
|
|
if t.isHT {
|
|
continue
|
|
}
|
|
|
|
// Read current frequency
|
|
line, ok := readOneLine(t.scalingCurFreqFile)
|
|
if !ok {
|
|
log.Printf("CPUFreqCollector.Read(): Failed to read one line from file '%s'", t.scalingCurFreqFile)
|
|
continue
|
|
}
|
|
cpuFreq, err := strconv.Atoi(line)
|
|
if err != nil {
|
|
log.Printf("CPUFreqCollector.Read(): Failed to convert CPU frequency '%s': %v", line, err)
|
|
continue
|
|
}
|
|
|
|
y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now)
|
|
if err == nil {
|
|
output <- y
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m *CPUFreqCollector) Close() {
|
|
m.init = false
|
|
}
|