mirror of
				https://github.com/ClusterCockpit/cc-metric-collector.git
				synced 2025-11-04 02:35:07 +01:00 
			
		
		
		
	Merge develop into main (#109)
* Add cpu_used (all-cpu_idle) to CpustatCollector * Update to line-protocol/v2 * Update runonce.yml with Golang 1.20 * Update fsnotify in LIKWID Collector * Use not a pointer to line-protocol.Encoder * Simplify Makefile * Use only as many arguments as required * Allow sum function to handle non float types * Allow values to be a slice of type float64, float32, int, int64, int32, bool * Use generic function to simplify code * Add missing case for type []int32 * Use generic function to compute minimum * Use generic function to compute maximum * Use generic function to compute average * Add error value to sumAnyType * Use generic function to compute median * For older versions of go slices is not part of the installation * Remove old entries from go.sum * Use simpler sort function * Compute metrics ib_total and ib_total_pkts * Add aggregated metrics. Add missing units * Update likwidMetric.go Fixes a potential bug when `fsnotify.NewWatcher()` fails with an error * Completly avoid memory allocations in infinibandMetric read() * Fixed initialization: Initalization and measurements should run in the same thread * Add safe.directory to Release action * Fix path after installation to /usr/bin after installation * ioutil.ReadFile is deprecated: As of Go 1.16, this function simply calls os.ReadFile * Switch to package slices from the golang 1.21 default library * Read file line by line * Read file line by line * Read file line by line * Use CamelCase * Use CamelCase * Fix function getNumaDomain, it always returned 0 * Avoid type conversion by using Atoi Avoid copying structs by using pointer access Increase readability with CamelCase variable names * Add caching * Cache CpuData * Cleanup * Use init function to initalize cache structure to avoid multi threading problems * Reuse information from /proc/cpuinfo * Avoid slice cloning. Directly use the cache * Add DieList * Add NumaDomainList and SMTList * Cleanup * Add comment * Lookup core ID from /sys/devices/system/cpu, /proc/cpuinfo is not portable * Lookup all information from /sys/devices/system/cpu, /proc/cpuinfo is not portable * Correctly handle lists from /sys * Add Simultaneous Multithreading siblings * Replace deprecated thread_siblings_list by core_cpus_list * Reduce number of required slices * Allow to send total values per core, socket and node * Send all metrics with same time stamp calcEventsetMetrics does only computiation, counter measurement is done before * Input parameters should be float64 when evaluating to float64 * Send all metrics with same time stamp calcGlobalMetrics does only computiation, counter measurement is done before * Remove unused variable gmresults * Add comments * Updated go packages * Add build with golang 1.21 * Switch to checkout action version 4 * Switch to setup-go action version 4 * Add workflow_dispatch to allow manual run of workflow * Add workflow_dispatch to allow manual run of workflow * Add release build jobs to runonce.yml * Switch to golang 1.20 for RHEL based distributions * Use dnf to download golang * Remove golang versions before 1.20 * Upgrade Ubuntu focal -> jammy * Pipe golang tar package directly to tar * Update golang version * Fix Ubuntu version number * Add links to ipmi and redfish receivers * Fix http server addr format * github.com/influxdata/line-protocol -> github.com/influxdata/line-protocol/v2/lineprotocol * Corrected spelling * Add some comments * github.com/influxdata/line-protocol -> github.com/influxdata/line-protocol/v2/lineprotocol * Allow other fields not only field "value" * Add some basic debugging documentation * Add some basic debugging documentation * Use a lock for the flush timer * Add tags in lexical order as required by AddTag() * Only access meta data, when it gets used as tag * Use slice to store lexialicly orderd key value pairs * Increase golang version requirement to 1.20. * Avoid package cmp to allow builds with golang v1.20 * Fix: Error NVML library not found did crash cc-metric-collector with "SIGSEGV: segmentation violation" * Add config option idle_timeout * Add basic authentication support * Add basic authentication support * Avoid unneccessary memory allocations * Add documentation for send_*_total values * Use generic package maps to clone maps * Reuse flush timer * Add Influx client options * Reuse ccTopology functionality * Do not store unused topology information * Add batch_size config * Cleanup * Use stype and stype-id for the NIC in NetstatCollector * Wait for concurrent flush operations to finish * Be more verbose in error messages * Reverted previous changes. Made the code to complex without much advantages * Use line protocol encoder * Go pkg update * Stop flush timer, when immediatelly flushing * Fix: Corrected unlock access to batch slice * Add config option to specify whether to use GZip compression in influx write requests * Add asynchron send of encoder metrics * Use DefaultServeMux instead of github.com/gorilla/mux * Add config option for HTTP keep-alives * Be more strict, when parsing json * Add config option for HTTP request timeout and Retry interval * Allow more then one background send operation * Fix %sysusers_create_package args (#108) %sysusers_create_package requires two arguments. See: https://github.com/systemd/systemd/blob/main/src/rpm/macros.systemd.in#L165 * Add nfsiostat to list of collectors --------- Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com> Co-authored-by: Holger Obermaier <holgerob@gmx.de> Co-authored-by: Obihörnchen <obihoernchende@gmail.com>
This commit is contained in:
		@@ -14,29 +14,18 @@ import (
 | 
			
		||||
	lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// CPUFreqCollector
 | 
			
		||||
// a metric collector to measure the current frequency of the CPUs
 | 
			
		||||
// as obtained from /proc/cpuinfo
 | 
			
		||||
// Only measure on the first hyperthread
 | 
			
		||||
//
 | 
			
		||||
type CPUFreqCpuInfoCollectorTopology struct {
 | 
			
		||||
	processor               string // logical processor number (continuous, starting at 0)
 | 
			
		||||
	coreID                  string // socket local core ID
 | 
			
		||||
	coreID_int              int64
 | 
			
		||||
	physicalPackageID       string // socket / package ID
 | 
			
		||||
	physicalPackageID_int   int64
 | 
			
		||||
	numPhysicalPackages     string // number of  sockets / packages
 | 
			
		||||
	numPhysicalPackages_int int64
 | 
			
		||||
	isHT                    bool
 | 
			
		||||
	numNonHT                string // number of non hyperthreading processors
 | 
			
		||||
	numNonHT_int            int64
 | 
			
		||||
	tagSet                  map[string]string
 | 
			
		||||
	isHT   bool
 | 
			
		||||
	tagSet map[string]string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type CPUFreqCpuInfoCollector struct {
 | 
			
		||||
	metricCollector
 | 
			
		||||
	topology []*CPUFreqCpuInfoCollectorTopology
 | 
			
		||||
	topology []CPUFreqCpuInfoCollectorTopology
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
 | 
			
		||||
@@ -65,11 +54,9 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
 | 
			
		||||
	// Collect topology information from file cpuinfo
 | 
			
		||||
	foundFreq := false
 | 
			
		||||
	processor := ""
 | 
			
		||||
	var numNonHT_int int64 = 0
 | 
			
		||||
	coreID := ""
 | 
			
		||||
	physicalPackageID := ""
 | 
			
		||||
	var maxPhysicalPackageID int64 = 0
 | 
			
		||||
	m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
 | 
			
		||||
	m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0)
 | 
			
		||||
	coreSeenBefore := make(map[string]bool)
 | 
			
		||||
 | 
			
		||||
	// Read cpuinfo file, line by line
 | 
			
		||||
@@ -98,41 +85,22 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
 | 
			
		||||
			len(coreID) > 0 &&
 | 
			
		||||
			len(physicalPackageID) > 0 {
 | 
			
		||||
 | 
			
		||||
			topology := new(CPUFreqCpuInfoCollectorTopology)
 | 
			
		||||
 | 
			
		||||
			// Processor
 | 
			
		||||
			topology.processor = processor
 | 
			
		||||
 | 
			
		||||
			// Core ID
 | 
			
		||||
			topology.coreID = coreID
 | 
			
		||||
			topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Physical package ID
 | 
			
		||||
			topology.physicalPackageID = physicalPackageID
 | 
			
		||||
			topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return fmt.Errorf("unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// increase maximun socket / package ID, when required
 | 
			
		||||
			if topology.physicalPackageID_int > maxPhysicalPackageID {
 | 
			
		||||
				maxPhysicalPackageID = topology.physicalPackageID_int
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// is hyperthread?
 | 
			
		||||
			globalID := physicalPackageID + ":" + coreID
 | 
			
		||||
			topology.isHT = coreSeenBefore[globalID]
 | 
			
		||||
			coreSeenBefore[globalID] = true
 | 
			
		||||
			if !topology.isHT {
 | 
			
		||||
				// increase number on non hyper thread cores
 | 
			
		||||
				numNonHT_int++
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// store collected topology information
 | 
			
		||||
			m.topology = append(m.topology, topology)
 | 
			
		||||
			m.topology = append(m.topology,
 | 
			
		||||
				CPUFreqCpuInfoCollectorTopology{
 | 
			
		||||
					isHT: coreSeenBefore[globalID],
 | 
			
		||||
					tagSet: map[string]string{
 | 
			
		||||
						"type":       "hwthread",
 | 
			
		||||
						"type-id":    processor,
 | 
			
		||||
						"package_id": physicalPackageID,
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			)
 | 
			
		||||
 | 
			
		||||
			// mark core as seen before
 | 
			
		||||
			coreSeenBefore[globalID] = true
 | 
			
		||||
 | 
			
		||||
			// reset topology information
 | 
			
		||||
			foundFreq = false
 | 
			
		||||
@@ -142,24 +110,9 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
        // Check if at least one CPU with frequency information was detected
 | 
			
		||||
        if len(m.topology) == 0 {
 | 
			
		||||
                return fmt.Errorf("No CPU frequency info found in %s", cpuInfoFile)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
	numPhysicalPackageID_int := maxPhysicalPackageID + 1
 | 
			
		||||
	numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
 | 
			
		||||
	numNonHT := fmt.Sprint(numNonHT_int)
 | 
			
		||||
	for _, t := range m.topology {
 | 
			
		||||
		t.numPhysicalPackages = numPhysicalPackageID
 | 
			
		||||
		t.numPhysicalPackages_int = numPhysicalPackageID_int
 | 
			
		||||
		t.numNonHT = numNonHT
 | 
			
		||||
		t.numNonHT_int = numNonHT_int
 | 
			
		||||
		t.tagSet = map[string]string{
 | 
			
		||||
			"type":       "hwthread",
 | 
			
		||||
			"type-id":    t.processor,
 | 
			
		||||
			"package_id": t.physicalPackageID,
 | 
			
		||||
		}
 | 
			
		||||
	// Check if at least one CPU with frequency information was detected
 | 
			
		||||
	if len(m.topology) == 0 {
 | 
			
		||||
		return fmt.Errorf("No CPU frequency info found in %s", cpuInfoFile)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	m.init = true
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user