Fix for LIKWID collector with separate measurement thread and inotify watcher on the LIKWID lock (#97)

This commit is contained in:
Thomas Gruber 2022-12-20 12:59:33 +01:00 committed by GitHub
parent 200e6d6f42
commit 6c10c9741a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 225 additions and 247 deletions

View File

@ -15,6 +15,7 @@ import (
"math" "math"
"os" "os"
"os/signal" "os/signal"
"os/user"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -29,12 +30,14 @@ import (
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology" topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
"github.com/NVIDIA/go-nvml/pkg/dl" "github.com/NVIDIA/go-nvml/pkg/dl"
"golang.design/x/thread" "golang.design/x/thread"
fsnotify "gopkg.in/fsnotify.v0"
) )
const ( const (
LIKWID_LIB_NAME = "liblikwid.so" LIKWID_LIB_NAME = "liblikwid.so"
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
LIKWID_DEF_ACCESSMODE = "direct" LIKWID_DEF_ACCESSMODE = "direct"
LIKWID_DEF_LOCKFILE = "/var/run/likwid.lock"
) )
type LikwidCollectorMetricConfig struct { type LikwidCollectorMetricConfig struct {
@ -68,6 +71,7 @@ type LikwidCollectorConfig struct {
AccessMode string `json:"access_mode,omitempty"` AccessMode string `json:"access_mode,omitempty"`
DaemonPath string `json:"accessdaemon_path,omitempty"` DaemonPath string `json:"accessdaemon_path,omitempty"`
LibraryPath string `json:"liblikwid_path,omitempty"` LibraryPath string `json:"liblikwid_path,omitempty"`
LockfilePath string `json:"lockfile_path,omitempty"`
} }
type LikwidCollector struct { type LikwidCollector struct {
@ -82,7 +86,7 @@ type LikwidCollector struct {
basefreq float64 basefreq float64
running bool running bool
initialized bool initialized bool
needs_reinit bool needs_reinit bool
likwidGroups map[C.int]LikwidEventsetConfig likwidGroups map[C.int]LikwidEventsetConfig
lock sync.Mutex lock sync.Mutex
measureThread thread.Thread measureThread thread.Thread
@ -198,6 +202,7 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
m.running = false m.running = false
m.config.AccessMode = LIKWID_DEF_ACCESSMODE m.config.AccessMode = LIKWID_DEF_ACCESSMODE
m.config.LibraryPath = LIKWID_LIB_NAME m.config.LibraryPath = LIKWID_LIB_NAME
m.config.LockfilePath = LIKWID_DEF_LOCKFILE
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
@ -255,12 +260,16 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
} }
for _, metric := range evset.Metrics { for _, metric := range evset.Metrics {
// Try to evaluate the metric // Try to evaluate the metric
if testLikwidMetricFormula(metric.Calc, params) && checkMetricType(metric.Type) { cclog.ComponentDebug(m.name, "Checking", metric.Name)
// Add the computable metric to the parameter list for the global metrics if !checkMetricType(metric.Type) {
cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
metric.Calc = ""
} else if !testLikwidMetricFormula(metric.Calc, params) {
cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
metric.Calc = ""
} else {
globalParams = append(globalParams, metric.Name) globalParams = append(globalParams, metric.Name)
totalMetrics++ totalMetrics++
} else {
metric.Calc = ""
} }
} }
} else { } else {
@ -270,11 +279,11 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
} }
for _, metric := range m.config.Metrics { for _, metric := range m.config.Metrics {
// Try to evaluate the global metric // Try to evaluate the global metric
if !testLikwidMetricFormula(metric.Calc, globalParams) { if !checkMetricType(metric.Type) {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed") cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
metric.Calc = "" metric.Calc = ""
} else if !checkMetricType(metric.Type) { } else if !testLikwidMetricFormula(metric.Calc, globalParams) {
cclog.ComponentError(m.name, "Metric", metric.Name, "has invalid type") cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
metric.Calc = "" metric.Calc = ""
} else { } else {
totalMetrics++ totalMetrics++
@ -287,77 +296,195 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
cclog.ComponentError(m.name, err.Error()) cclog.ComponentError(m.name, err.Error())
return err return err
} }
ret := C.topology_init()
if ret != 0 {
err := errors.New("failed to initialize topology module")
cclog.ComponentError(m.name, err.Error())
return err
}
switch m.config.AccessMode {
case "direct":
C.HPMmode(0)
case "accessdaemon":
if len(m.config.DaemonPath) > 0 {
p := os.Getenv("PATH")
os.Setenv("PATH", m.config.DaemonPath+":"+p)
}
C.HPMmode(1)
for _, c := range m.cpulist {
C.HPMaddThread(c)
}
}
m.sock2tid = make(map[int]int)
tmp := make([]C.int, 1)
for _, sid := range topo.SocketList() {
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
if ret > 0 {
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
}
C.free(unsafe.Pointer(cstr))
}
m.basefreq = getBaseFreq()
m.measureThread = thread.New() m.measureThread = thread.New()
m.init = true m.init = true
return nil return nil
} }
// take a measurement for 'interval' seconds of event set index 'group' // take a measurement for 'interval' seconds of event set index 'group'
func (m *LikwidCollector) takeMeasurement(evset LikwidEventsetConfig, interval time.Duration) (bool, error) { func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
var ret C.int var ret C.int
m.lock.Lock() var gid C.int = -1
if m.initialized { sigchan := make(chan os.Signal, 1)
ret = C.perfmon_setupCounters(evset.gid) watcher, err := fsnotify.NewWatcher()
if ret != 0 { if err != nil {
var err error = nil cclog.ComponentError(m.name, err.Error())
var skip bool = false }
cclog.ComponentDebug(m.name, "Setup returns", ret) defer watcher.Close()
if ret == -37 { if len(m.config.LockfilePath) > 0 {
skip = true info, err := os.Stat(m.config.LockfilePath)
if err != nil {
return true, err
}
stat := info.Sys().(*syscall.Stat_t)
if stat.Uid != uint32(os.Getuid()) {
usr, err := user.LookupId(strconv.FormatUint(uint64(stat.Uid), 10))
if err == nil {
return true, fmt.Errorf("Access to performance counters locked by %s", usr.Username)
} else { } else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid) return true, fmt.Errorf("Access to performance counters locked by %d", stat.Uid)
}
m.lock.Unlock()
return skip, err
}
m.running = true
ret = C.perfmon_startCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
ret = C.perfmon_readCounters()
time.Sleep(interval)
m.running = false
ret = C.perfmon_stopCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
m.running = false
runtime := float64(C.perfmon_getLastTimeOfGroup(evset.gid))
// Go over events and get the results
for eidx, counter := range evset.eorder {
gctr := C.GoString(counter)
for _, tid := range m.cpu2tid {
res := C.perfmon_getLastResult(evset.gid, C.int(eidx), C.int(tid))
fres := float64(res)
if m.config.InvalidToZero && (math.IsNaN(fres) || math.IsInf(fres, 0)) {
cclog.ComponentDebug(m.name, "Sanitize", gctr, "to zero")
fres = 0.0
}
evset.results[tid][gctr] = fres
} }
} }
for _, tid := range m.cpu2tid { err = watcher.Watch(m.config.LockfilePath)
evset.results[tid]["time"] = runtime if err != nil {
cclog.ComponentError(m.name, err.Error())
} }
} }
m.lock.Unlock() m.lock.Lock()
defer m.lock.Unlock()
select {
case e := <-watcher.Event:
ret = -1
if !e.IsAttrib() {
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
}
default:
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
}
if ret != 0 {
return true, fmt.Errorf("failed to initialize library, error %d", ret)
}
signal.Notify(sigchan, os.Interrupt)
signal.Notify(sigchan, syscall.SIGCHLD)
select {
case <-sigchan:
gid = -1
case e := <-watcher.Event:
gid = -1
if !e.IsAttrib() {
gid = C.perfmon_addEventSet(evset.estr)
}
default:
gid = C.perfmon_addEventSet(evset.estr)
}
if gid < 0 {
return true, fmt.Errorf("failed to add events %s, error %d", evset.go_estr, gid)
} else {
evset.gid = gid
//m.likwidGroups[gid] = evset
}
select {
case <-sigchan:
ret = -1
case e := <-watcher.Event:
if !e.IsAttrib() {
ret = C.perfmon_setupCounters(gid)
}
default:
ret = C.perfmon_setupCounters(gid)
}
if ret != 0 {
return true, fmt.Errorf("failed to setup events '%s', error %d", evset.go_estr, ret)
}
select {
case <-sigchan:
ret = -1
case e := <-watcher.Event:
if !e.IsAttrib() {
ret = C.perfmon_startCounters()
}
default:
ret = C.perfmon_startCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to start events '%s', error %d", evset.go_estr, ret)
}
select {
case <-sigchan:
ret = -1
case e := <-watcher.Event:
if !e.IsAttrib() {
ret = C.perfmon_readCounters()
}
default:
ret = C.perfmon_readCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
}
time.Sleep(interval)
select {
case <-sigchan:
ret = -1
case e := <-watcher.Event:
if !e.IsAttrib() {
ret = C.perfmon_readCounters()
}
default:
ret = C.perfmon_readCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
}
for eidx, counter := range evset.eorder {
gctr := C.GoString(counter)
for _, tid := range m.cpu2tid {
res := C.perfmon_getLastResult(gid, C.int(eidx), C.int(tid))
fres := float64(res)
if m.config.InvalidToZero && (math.IsNaN(fres) || math.IsInf(fres, 0)) {
fres = 0.0
}
evset.results[tid][gctr] = fres
}
}
for _, tid := range m.cpu2tid {
evset.results[tid]["time"] = float64(C.perfmon_getLastTimeOfGroup(gid))
}
select {
case <-sigchan:
ret = -1
case e := <-watcher.Event:
if !e.IsAttrib() {
ret = C.perfmon_stopCounters()
}
default:
ret = C.perfmon_stopCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to stop events '%s', error %d", evset.go_estr, ret)
}
signal.Stop(sigchan)
select {
case e := <-watcher.Event:
if !e.IsAttrib() {
C.perfmon_finalize()
}
default:
C.perfmon_finalize()
}
return false, nil return false, nil
} }
@ -412,7 +539,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
} }
// Go over the global metrics, derive the value out of the event sets' metric values and send it // Go over the global metrics, derive the value out of the event sets' metric values and send it
func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan lp.CCMetric) error { func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
for _, metric := range m.config.Metrics { for _, metric := range m.config.Metrics {
scopemap := m.cpu2tid scopemap := m.cpu2tid
if metric.Type == "socket" { if metric.Type == "socket" {
@ -422,7 +549,7 @@ func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan
if tid >= 0 { if tid >= 0 {
// Here we generate parameter list // Here we generate parameter list
params := make(map[string]interface{}) params := make(map[string]interface{})
for _, evset := range m.likwidGroups { for _, evset := range groups {
for mname, mres := range evset.metrics[tid] { for mname, mres := range evset.metrics[tid] {
params[mname] = mres params[mname] = mres
} }
@ -436,7 +563,7 @@ func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan
if m.config.InvalidToZero && (math.IsNaN(value) || math.IsInf(value, 0)) { if m.config.InvalidToZero && (math.IsNaN(value) || math.IsInf(value, 0)) {
value = 0.0 value = 0.0
} }
m.gmresults[tid][metric.Name] = value //m.gmresults[tid][metric.Name] = value
// Now we have the result, send it with the proper tags // Now we have the result, send it with the proper tags
if !math.IsNaN(value) { if !math.IsNaN(value) {
if metric.Publish { if metric.Publish {
@ -460,203 +587,52 @@ func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan
return nil return nil
} }
func (m *LikwidCollector) ReInit() error { func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMetric) {
C.perfmon_finalize() var err error = nil
ret := C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0]) groups := make([]LikwidEventsetConfig, 0)
if ret != 0 {
return nil for evidx, evset := range m.config.Eventsets {
} e := genLikwidEventSet(evset)
for i, evset := range m.config.Eventsets { e.internal = evidx
var gid C.int skip := false
if len(evset.Events) > 0 { if !skip {
//skip := false // measure event set 'i' for 'interval' seconds
likwidGroup := genLikwidEventSet(evset) skip, err = m.takeMeasurement(evidx, e, interval)
gid = C.perfmon_addEventSet(likwidGroup.estr) if err != nil {
if gid >= 0 { cclog.ComponentError(m.name, err.Error())
likwidGroup.gid = gid return
likwidGroup.internal = i
m.likwidGroups[gid] = likwidGroup
} }
} }
}
return nil
}
func (m *LikwidCollector) LateInit() error { if !skip {
var ret C.int // read measurements and derive event set metrics
if m.initialized { m.calcEventsetMetrics(e, interval, output)
return nil
}
switch m.config.AccessMode {
case "direct":
C.HPMmode(0)
case "accessdaemon":
if len(m.config.DaemonPath) > 0 {
p := os.Getenv("PATH")
os.Setenv("PATH", m.config.DaemonPath+":"+p)
}
C.HPMmode(1)
for _, c := range m.cpulist {
C.HPMaddThread(c)
} }
groups = append(groups, e)
} }
cclog.ComponentDebug(m.name, "initialize LIKWID topology") // calculate global metrics
ret = C.topology_init() m.calcGlobalMetrics(groups, interval, output)
if ret != 0 {
err := errors.New("failed to initialize LIKWID topology")
cclog.ComponentError(m.name, err.Error())
return err
}
m.sock2tid = make(map[int]int)
tmp := make([]C.int, 1)
for _, sid := range topo.SocketList() {
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
if ret > 0 {
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
}
C.free(unsafe.Pointer(cstr))
}
m.basefreq = getBaseFreq()
cclog.ComponentDebug(m.name, "BaseFreq", m.basefreq)
if m.needs_reinit {
m.ReInit()
m.needs_reinit = false
}
// cclog.ComponentDebug(m.name, "initialize LIKWID perfmon module")
// ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
// if ret != 0 {
// var err error = nil
// C.topology_finalize()
// if ret != -22 {
// err = errors.New("failed to initialize LIKWID perfmon")
// cclog.ComponentError(m.name, err.Error())
// } else {
// err = errors.New("access to LIKWID perfmon locked")
// }
// return err
// }
// // While adding the events, we test the metrics whether they can be computed at all
// for i, evset := range m.config.Eventsets {
// var gid C.int
// if len(evset.Events) > 0 {
// //skip := false
// likwidGroup := genLikwidEventSet(evset)
// // for _, g := range m.likwidGroups {
// // if likwidGroup.go_estr == g.go_estr {
// // skip = true
// // break
// // }
// // }
// // if skip {
// // continue
// // }
// // Now we add the list of events to likwid
// gid = C.perfmon_addEventSet(likwidGroup.estr)
// if gid >= 0 {
// likwidGroup.gid = gid
// likwidGroup.internal = i
// m.likwidGroups[gid] = likwidGroup
// }
// } else {
// cclog.ComponentError(m.name, "Invalid Likwid eventset config, no events given")
// continue
// }
// }
// If no event set could be added, shut down LikwidCollector
if len(m.likwidGroups) == 0 {
C.perfmon_finalize()
C.topology_finalize()
err := errors.New("no LIKWID performance group initialized")
cclog.ComponentError(m.name, err.Error())
return err
}
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGCHLD)
signal.Notify(sigchan, os.Interrupt)
go func() {
<-sigchan
signal.Stop(sigchan)
m.initialized = false
}()
m.initialized = true
return nil
} }
// main read function taking multiple measurement rounds, each 'interval' seconds long // main read function taking multiple measurement rounds, each 'interval' seconds long
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) { func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var skip bool = false //var skip bool = false
var err error //var err error
if !m.init { if !m.init {
return return
} }
m.measureThread.Call(func() { m.measureThread.Call(func() {
if !m.initialized { m.ReadThread(interval, output)
m.lock.Lock()
err = m.LateInit()
if err != nil {
m.lock.Unlock()
cclog.ComponentError(m.name, "lateinit failed")
return
}
m.initialized = true
m.lock.Unlock()
skip = true
}
if m.initialized && !skip {
time := interval
for _, evset := range m.likwidGroups {
if !skip {
// measure event set 'i' for 'interval' seconds
skip, err = m.takeMeasurement(evset, interval)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
}
if !skip {
// read measurements and derive event set metrics
m.calcEventsetMetrics(evset, time, output)
}
}
if !skip {
// use the event set metrics to derive the global metrics
m.calcGlobalMetrics(time, output)
}
if skip {
m.needs_reinit = true
m.initialized = false
}
}
}) })
} }
func (m *LikwidCollector) Close() { func (m *LikwidCollector) Close() {
if m.init { if m.init {
m.init = false m.init = false
cclog.ComponentDebug(m.name, "Closing ...")
m.lock.Lock() m.lock.Lock()
if m.initialized { m.measureThread.Terminate()
cclog.ComponentDebug(m.name, "Finalize LIKWID perfmon module") m.initialized = false
C.perfmon_finalize()
m.initialized = false
}
m.lock.Unlock() m.lock.Unlock()
cclog.ComponentDebug(m.name, "Finalize LIKWID topology module")
C.topology_finalize() C.topology_finalize()
cclog.ComponentDebug(m.name, "Closing done")
} }
} }

View File

@ -10,6 +10,7 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
"liblikwid_path" : "/path/to/liblikwid.so", "liblikwid_path" : "/path/to/liblikwid.so",
"accessdaemon_path" : "/folder/that/contains/likwid-accessD", "accessdaemon_path" : "/folder/that/contains/likwid-accessD",
"access_mode" : "direct or accessdaemon or perf_event", "access_mode" : "direct or accessdaemon or perf_event",
"lockfile_path" : "/var/run/likwid.lock",
"eventsets": [ "eventsets": [
{ {
"events" : { "events" : {
@ -49,6 +50,7 @@ Additional options:
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested. - `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested.
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`) - `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`)
- `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so` - `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so`
- `lockfile_path`: Location of LIKWID's lock file if multiple tools should access the hardware counters. Default `/var/run/likwid.lock`
### Available metric types ### Available metric types