2022-01-25 15:37:43 +01:00
|
|
|
package collectors
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"os"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2022-10-10 11:53:11 +02:00
|
|
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
2024-07-13 02:23:58 +02:00
|
|
|
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
2022-10-10 11:53:11 +02:00
|
|
|
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
2022-01-25 15:37:43 +01:00
|
|
|
)
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// Map of all available metric collectors
|
2022-01-25 15:37:43 +01:00
|
|
|
var AvailableCollectors = map[string]MetricCollector{
|
|
|
|
|
2022-05-13 14:10:39 +02:00
|
|
|
"likwid": new(LikwidCollector),
|
|
|
|
"loadavg": new(LoadavgCollector),
|
|
|
|
"memstat": new(MemstatCollector),
|
|
|
|
"netstat": new(NetstatCollector),
|
|
|
|
"ibstat": new(InfinibandCollector),
|
|
|
|
"lustrestat": new(LustreCollector),
|
|
|
|
"cpustat": new(CpustatCollector),
|
|
|
|
"topprocs": new(TopProcsCollector),
|
|
|
|
"nvidia": new(NvidiaCollector),
|
|
|
|
"customcmd": new(CustomCmdCollector),
|
|
|
|
"iostat": new(IOstatCollector),
|
|
|
|
"diskstat": new(DiskstatCollector),
|
|
|
|
"tempstat": new(TempCollector),
|
|
|
|
"ipmistat": new(IpmiCollector),
|
|
|
|
"gpfs": new(GpfsCollector),
|
|
|
|
"cpufreq": new(CPUFreqCollector),
|
|
|
|
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
|
|
|
|
"nfs3stat": new(Nfs3Collector),
|
|
|
|
"nfs4stat": new(Nfs4Collector),
|
|
|
|
"numastats": new(NUMAStatsCollector),
|
|
|
|
"beegfs_meta": new(BeegfsMetaCollector),
|
|
|
|
"beegfs_storage": new(BeegfsStorageCollector),
|
2022-11-15 17:15:27 +01:00
|
|
|
"rapl": new(RAPLCollector),
|
2022-05-25 15:55:43 +02:00
|
|
|
"rocm_smi": new(RocmSmiCollector),
|
2022-10-10 12:18:52 +02:00
|
|
|
"self": new(SelfCollector),
|
2022-10-10 11:53:11 +02:00
|
|
|
"schedstat": new(SchedstatCollector),
|
Merge develop into main (#109)
* Add cpu_used (all-cpu_idle) to CpustatCollector
* Update to line-protocol/v2
* Update runonce.yml with Golang 1.20
* Update fsnotify in LIKWID Collector
* Use not a pointer to line-protocol.Encoder
* Simplify Makefile
* Use only as many arguments as required
* Allow sum function to handle non float types
* Allow values to be a slice of type float64, float32, int, int64, int32, bool
* Use generic function to simplify code
* Add missing case for type []int32
* Use generic function to compute minimum
* Use generic function to compute maximum
* Use generic function to compute average
* Add error value to sumAnyType
* Use generic function to compute median
* For older versions of go slices is not part of the installation
* Remove old entries from go.sum
* Use simpler sort function
* Compute metrics ib_total and ib_total_pkts
* Add aggregated metrics.
Add missing units
* Update likwidMetric.go
Fixes a potential bug when `fsnotify.NewWatcher()` fails with an error
* Completly avoid memory allocations in infinibandMetric read()
* Fixed initialization: Initalization and measurements should run in the same thread
* Add safe.directory to Release action
* Fix path after installation to /usr/bin after installation
* ioutil.ReadFile is deprecated: As of Go 1.16, this function simply calls os.ReadFile
* Switch to package slices from the golang 1.21 default library
* Read file line by line
* Read file line by line
* Read file line by line
* Use CamelCase
* Use CamelCase
* Fix function getNumaDomain, it always returned 0
* Avoid type conversion by using Atoi
Avoid copying structs by using pointer access
Increase readability with CamelCase variable names
* Add caching
* Cache CpuData
* Cleanup
* Use init function to initalize cache structure to avoid multi threading problems
* Reuse information from /proc/cpuinfo
* Avoid slice cloning. Directly use the cache
* Add DieList
* Add NumaDomainList and SMTList
* Cleanup
* Add comment
* Lookup core ID from /sys/devices/system/cpu, /proc/cpuinfo is not portable
* Lookup all information from /sys/devices/system/cpu, /proc/cpuinfo is not portable
* Correctly handle lists from /sys
* Add Simultaneous Multithreading siblings
* Replace deprecated thread_siblings_list by core_cpus_list
* Reduce number of required slices
* Allow to send total values per core, socket and node
* Send all metrics with same time stamp
calcEventsetMetrics does only computiation, counter measurement is done before
* Input parameters should be float64 when evaluating to float64
* Send all metrics with same time stamp
calcGlobalMetrics does only computiation, counter measurement is done before
* Remove unused variable gmresults
* Add comments
* Updated go packages
* Add build with golang 1.21
* Switch to checkout action version 4
* Switch to setup-go action version 4
* Add workflow_dispatch to allow manual run of workflow
* Add workflow_dispatch to allow manual run of workflow
* Add release build jobs to runonce.yml
* Switch to golang 1.20 for RHEL based distributions
* Use dnf to download golang
* Remove golang versions before 1.20
* Upgrade Ubuntu focal -> jammy
* Pipe golang tar package directly to tar
* Update golang version
* Fix Ubuntu version number
* Add links to ipmi and redfish receivers
* Fix http server addr format
* github.com/influxdata/line-protocol -> github.com/influxdata/line-protocol/v2/lineprotocol
* Corrected spelling
* Add some comments
* github.com/influxdata/line-protocol -> github.com/influxdata/line-protocol/v2/lineprotocol
* Allow other fields not only field "value"
* Add some basic debugging documentation
* Add some basic debugging documentation
* Use a lock for the flush timer
* Add tags in lexical order as required by AddTag()
* Only access meta data, when it gets used as tag
* Use slice to store lexialicly orderd key value pairs
* Increase golang version requirement to 1.20.
* Avoid package cmp to allow builds with golang v1.20
* Fix: Error NVML library not found did crash
cc-metric-collector with "SIGSEGV: segmentation violation"
* Add config option idle_timeout
* Add basic authentication support
* Add basic authentication support
* Avoid unneccessary memory allocations
* Add documentation for send_*_total values
* Use generic package maps to clone maps
* Reuse flush timer
* Add Influx client options
* Reuse ccTopology functionality
* Do not store unused topology information
* Add batch_size config
* Cleanup
* Use stype and stype-id for the NIC in NetstatCollector
* Wait for concurrent flush operations to finish
* Be more verbose in error messages
* Reverted previous changes.
Made the code to complex without much advantages
* Use line protocol encoder
* Go pkg update
* Stop flush timer, when immediatelly flushing
* Fix: Corrected unlock access to batch slice
* Add config option to specify whether to use GZip compression in influx write requests
* Add asynchron send of encoder metrics
* Use DefaultServeMux instead of github.com/gorilla/mux
* Add config option for HTTP keep-alives
* Be more strict, when parsing json
* Add config option for HTTP request timeout and Retry interval
* Allow more then one background send operation
* Fix %sysusers_create_package args (#108)
%sysusers_create_package requires two arguments. See: https://github.com/systemd/systemd/blob/main/src/rpm/macros.systemd.in#L165
* Add nfsiostat to list of collectors
---------
Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
Co-authored-by: Holger Obermaier <holgerob@gmx.de>
Co-authored-by: Obihörnchen <obihoernchende@gmail.com>
2023-12-04 12:21:26 +01:00
|
|
|
"nfsiostat": new(NfsIOStatCollector),
|
2022-01-25 15:37:43 +01:00
|
|
|
}
|
|
|
|
|
2022-01-28 15:16:58 +01:00
|
|
|
// Metric collector manager data structure
|
2022-01-25 15:37:43 +01:00
|
|
|
type collectorManager struct {
|
2022-05-13 14:10:39 +02:00
|
|
|
collectors []MetricCollector // List of metric collectors to read in parallel
|
|
|
|
serial []MetricCollector // List of metric collectors to read serially
|
2024-07-13 02:23:58 +02:00
|
|
|
output chan lp.CCMessage // Output channels
|
2022-05-13 14:10:39 +02:00
|
|
|
done chan bool // channel to finish / stop metric collector manager
|
|
|
|
ticker mct.MultiChanTicker // periodically ticking once each interval
|
|
|
|
duration time.Duration // duration (for metrics that measure over a given duration)
|
|
|
|
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
|
|
|
|
config map[string]json.RawMessage // json encoded config for collector manager
|
|
|
|
collector_wg sync.WaitGroup // internally used wait group for the parallel reading of collector
|
|
|
|
parallel_run bool // Flag whether the collectors are currently read in parallel
|
2022-01-25 15:37:43 +01:00
|
|
|
}
|
|
|
|
|
2022-01-28 15:16:58 +01:00
|
|
|
// Metric collector manager access functions
|
2022-01-25 15:37:43 +01:00
|
|
|
type CollectorManager interface {
|
|
|
|
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
|
2024-07-13 02:23:58 +02:00
|
|
|
AddOutput(output chan lp.CCMessage)
|
2022-01-25 15:37:43 +01:00
|
|
|
Start()
|
|
|
|
Close()
|
|
|
|
}
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// Init initializes a new metric collector manager by setting up:
|
2022-01-28 15:16:58 +01:00
|
|
|
// * output channel
|
2022-01-26 12:31:04 +01:00
|
|
|
// * done channel
|
2022-01-28 15:16:58 +01:00
|
|
|
// * wait group synchronization for goroutines (from variable wg)
|
2022-01-26 12:31:04 +01:00
|
|
|
// * ticker (from variable ticker)
|
|
|
|
// * configuration (read from config file in variable collectConfigFile)
|
|
|
|
// Initialization is done for all configured collectors
|
2022-01-25 15:37:43 +01:00
|
|
|
func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error {
|
|
|
|
cm.collectors = make([]MetricCollector, 0)
|
2022-05-13 14:10:39 +02:00
|
|
|
cm.serial = make([]MetricCollector, 0)
|
2022-01-25 15:37:43 +01:00
|
|
|
cm.output = nil
|
|
|
|
cm.done = make(chan bool)
|
|
|
|
cm.wg = wg
|
|
|
|
cm.ticker = ticker
|
|
|
|
cm.duration = duration
|
2022-01-26 12:31:04 +01:00
|
|
|
|
|
|
|
// Read collector config file
|
2022-01-25 15:37:43 +01:00
|
|
|
configFile, err := os.Open(collectConfigFile)
|
|
|
|
if err != nil {
|
2022-01-25 16:40:02 +01:00
|
|
|
cclog.Error(err.Error())
|
2022-01-25 15:37:43 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer configFile.Close()
|
|
|
|
jsonParser := json.NewDecoder(configFile)
|
|
|
|
err = jsonParser.Decode(&cm.config)
|
|
|
|
if err != nil {
|
2022-01-25 16:40:02 +01:00
|
|
|
cclog.Error(err.Error())
|
2022-01-25 15:37:43 +01:00
|
|
|
return err
|
|
|
|
}
|
2022-01-26 12:31:04 +01:00
|
|
|
|
|
|
|
// Initialize configured collectors
|
2022-01-28 15:16:58 +01:00
|
|
|
for collectorName, collectorCfg := range cm.config {
|
|
|
|
if _, found := AvailableCollectors[collectorName]; !found {
|
|
|
|
cclog.ComponentError("CollectorManager", "SKIP unknown collector", collectorName)
|
2022-01-25 15:37:43 +01:00
|
|
|
continue
|
|
|
|
}
|
2022-01-28 15:16:58 +01:00
|
|
|
collector := AvailableCollectors[collectorName]
|
2022-01-25 15:37:43 +01:00
|
|
|
|
2022-01-28 15:16:58 +01:00
|
|
|
err = collector.Init(collectorCfg)
|
2022-01-25 15:37:43 +01:00
|
|
|
if err != nil {
|
2022-01-28 15:16:58 +01:00
|
|
|
cclog.ComponentError("CollectorManager", "Collector", collectorName, "initialization failed:", err.Error())
|
2022-01-25 15:37:43 +01:00
|
|
|
continue
|
|
|
|
}
|
2022-01-28 15:16:58 +01:00
|
|
|
cclog.ComponentDebug("CollectorManager", "ADD COLLECTOR", collector.Name())
|
2022-05-13 14:10:39 +02:00
|
|
|
if collector.Parallel() {
|
|
|
|
cm.collectors = append(cm.collectors, collector)
|
|
|
|
} else {
|
|
|
|
cm.serial = append(cm.serial, collector)
|
|
|
|
}
|
2022-01-25 15:37:43 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// Start starts the metric collector manager
|
2022-01-25 15:37:43 +01:00
|
|
|
func (cm *collectorManager) Start() {
|
|
|
|
tick := make(chan time.Time)
|
|
|
|
cm.ticker.AddChannel(tick)
|
2022-01-26 15:54:49 +01:00
|
|
|
|
2022-01-27 20:45:22 +01:00
|
|
|
cm.wg.Add(1)
|
2022-01-25 15:37:43 +01:00
|
|
|
go func() {
|
2022-01-27 20:45:22 +01:00
|
|
|
defer cm.wg.Done()
|
2022-01-26 15:54:49 +01:00
|
|
|
// Collector manager is done
|
|
|
|
done := func() {
|
|
|
|
// close all metric collectors
|
2022-05-13 14:10:39 +02:00
|
|
|
if cm.parallel_run {
|
|
|
|
cm.collector_wg.Wait()
|
|
|
|
cm.parallel_run = false
|
|
|
|
}
|
2022-01-26 15:54:49 +01:00
|
|
|
for _, c := range cm.collectors {
|
|
|
|
c.Close()
|
|
|
|
}
|
2022-01-30 12:08:33 +01:00
|
|
|
close(cm.done)
|
2022-01-26 15:54:49 +01:00
|
|
|
cclog.ComponentDebug("CollectorManager", "DONE")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for done signal or timer event
|
2022-01-25 15:37:43 +01:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-cm.done:
|
2022-01-26 15:54:49 +01:00
|
|
|
done()
|
|
|
|
return
|
2022-01-25 15:37:43 +01:00
|
|
|
case t := <-tick:
|
2022-05-13 14:10:39 +02:00
|
|
|
cm.parallel_run = true
|
2022-01-25 15:37:43 +01:00
|
|
|
for _, c := range cm.collectors {
|
2022-01-26 15:54:49 +01:00
|
|
|
// Wait for done signal or execute the collector
|
2022-01-25 15:37:43 +01:00
|
|
|
select {
|
2022-05-13 14:10:39 +02:00
|
|
|
case <-cm.done:
|
|
|
|
done()
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Read metrics from collector c via goroutine
|
|
|
|
cclog.ComponentDebug("CollectorManager", c.Name(), t)
|
|
|
|
cm.collector_wg.Add(1)
|
|
|
|
go func(myc MetricCollector) {
|
|
|
|
myc.Read(cm.duration, cm.output)
|
|
|
|
cm.collector_wg.Done()
|
|
|
|
}(c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cm.collector_wg.Wait()
|
|
|
|
cm.parallel_run = false
|
|
|
|
for _, c := range cm.serial {
|
|
|
|
// Wait for done signal or execute the collector
|
|
|
|
select {
|
2022-01-25 15:37:43 +01:00
|
|
|
case <-cm.done:
|
2022-01-26 15:54:49 +01:00
|
|
|
done()
|
|
|
|
return
|
2022-01-25 15:37:43 +01:00
|
|
|
default:
|
2022-01-26 15:54:49 +01:00
|
|
|
// Read metrics from collector c
|
2022-01-26 12:31:04 +01:00
|
|
|
cclog.ComponentDebug("CollectorManager", c.Name(), t)
|
2022-01-25 15:37:43 +01:00
|
|
|
c.Read(cm.duration, cm.output)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2022-01-26 15:54:49 +01:00
|
|
|
|
|
|
|
// Collector manager is started
|
2022-01-25 17:43:10 +01:00
|
|
|
cclog.ComponentDebug("CollectorManager", "STARTED")
|
2022-01-25 15:37:43 +01:00
|
|
|
}
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// AddOutput adds the output channel to the metric collector manager
|
2024-07-13 02:23:58 +02:00
|
|
|
func (cm *collectorManager) AddOutput(output chan lp.CCMessage) {
|
2022-01-25 15:37:43 +01:00
|
|
|
cm.output = output
|
|
|
|
}
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// Close finishes / stops the metric collector manager
|
2022-01-25 15:37:43 +01:00
|
|
|
func (cm *collectorManager) Close() {
|
2022-01-25 17:43:10 +01:00
|
|
|
cclog.ComponentDebug("CollectorManager", "CLOSE")
|
2022-01-27 17:43:00 +01:00
|
|
|
cm.done <- true
|
2022-01-30 12:08:33 +01:00
|
|
|
// wait for close of channel cm.done
|
|
|
|
<-cm.done
|
2022-01-25 15:37:43 +01:00
|
|
|
}
|
|
|
|
|
2022-01-26 12:31:04 +01:00
|
|
|
// New creates a new initialized metric collector manager
|
2022-01-25 15:37:43 +01:00
|
|
|
func New(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) (CollectorManager, error) {
|
2022-01-28 15:16:58 +01:00
|
|
|
cm := new(collectorManager)
|
2022-01-25 15:37:43 +01:00
|
|
|
err := cm.Init(ticker, duration, wg, collectConfigFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return cm, err
|
|
|
|
}
|