Compare commits

..

7 Commits

Author SHA1 Message Date
Thomas Roehl
0ca87ea0be Fix SqliteSink 2022-01-31 13:24:39 +01:00
Thomas Roehl
4195786242 Add all CCMetric functions to interface 2022-01-31 06:04:30 +01:00
Thomas Roehl
328d26bf3c Merge branch 'develop' into sqlite3_sink 2022-01-31 05:59:25 +01:00
Thomas Roehl
2b07798af2 Fix Write() arguments 2021-11-26 19:21:18 +01:00
Thomas Roehl
aa842a8a9c Add Flush method 2021-11-26 19:13:48 +01:00
Thomas Roehl
06ab58dc92 Merge branch 'main' into sqlite3_sink 2021-11-25 18:23:04 +01:00
Thomas Roehl
40855b1164 Sqlite3 sink 2021-05-18 15:53:20 +02:00
82 changed files with 2140 additions and 4720 deletions

View File

@@ -1 +1 @@
{}
[]

View File

@@ -1,6 +1,6 @@
{
"testoutput" : {
[
{
"type" : "stdout",
"meta_as_tags" : true
}
}
]

View File

@@ -1,10 +1,45 @@
name: Run RPM Build
on:
push:
tags:
- '**'
on: push
jobs:
build-centos8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@master
id: rpm
name: Build RPM package on CentOS8
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM CentOS8
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM CentOS8
path: ${{ steps.rpm.outputs.source_rpm_path }}
build-centos-latest:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@centos_latest
id: rpm
name: Build RPM package on CentOS 'Latest'
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM CentOS 'Latest'
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM CentOS 'Latest'
path: ${{ steps.rpm.outputs.source_rpm_path }}
build-alma-8_5:
runs-on: ubuntu-latest
steps:
@@ -18,41 +53,9 @@ jobs:
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM AlmaLinux 8.5
path: ${{ steps.rpm.outputs.rpm_path }}
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM AlmaLinux 8.5
path: ${{ steps.rpm.outputs.source_rpm_path }}
- name: Release
uses: softprops/action-gh-release@v1
with:
name: cc-metric-collector-${{github.ref_name}}
files: |
${{ steps.rpm.outputs.source_rpm_path }}
${{ steps.rpm.outputs.rpm_path }}
build-rhel-ubi8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@rh-ubi8
id: rpm
name: Build RPM package on Red Hat Universal Base Image 8
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM Red Hat Universal Base Image 8
path: ${{ steps.rpm.outputs.rpm_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM Red Hat Universal Base Image 8
path: ${{ steps.rpm.outputs.source_rpm_path }}
- name: Release
uses: softprops/action-gh-release@v1
with:
files: |
${{ steps.rpm.outputs.source_rpm_path }}
${{ steps.rpm.outputs.rpm_path }}

View File

@@ -2,12 +2,10 @@ name: Run Test
on: push
jobs:
build-1-17:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
@@ -15,32 +13,8 @@ jobs:
with:
go-version: '^1.17.6'
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
- name: Build MetricCollector
run: make
- name: Run MetricCollector
run: ./cc-metric-collector --once --config .github/ci-config.json
build-1-16:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v2.1.5
with:
go-version: '^1.16.7' # The version AlmaLinux 8.5 uses
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
- name: Build MetricCollector
run: make
- name: Run MetricCollectorlibganglia1
run: ./cc-metric-collector --once --config .github/ci-config.json

4
.gitmodules vendored
View File

@@ -1,4 +0,0 @@
[submodule ".github/actions/rpmbuild-centos8-golang"]
path = .github/actions/rpmbuild-centos8-golang
url = https://github.com/naveenrajm7/rpmbuild.git
branch = centos8

View File

@@ -5,15 +5,6 @@ GOSRC_SINKS := $(wildcard sinks/*.go)
GOSRC_RECEIVERS := $(wildcard receivers/*.go)
GOSRC_INTERNAL := $(wildcard internal/*/*.go)
GOSRC := $(GOSRC_APP) $(GOSRC_COLLECTORS) $(GOSRC_SINKS) $(GOSRC_RECEIVERS) $(GOSRC_INTERNAL)
COMPONENT_DIRS := collectors \
sinks \
receivers \
internal/metricRouter \
internal/ccMetric \
internal/metricAggregator \
internal/ccLogger \
internal/ccTopology \
internal/multiChanTicker
.PHONY: all
@@ -21,13 +12,12 @@ all: $(APP)
$(APP): $(GOSRC)
make -C collectors
make -C sinks
go get
go build -o $(APP) $(GOSRC_APP)
.PHONY: clean
clean:
@for COMP in $(COMPONENT_DIRS); do if [ -e $$COMP/Makefile ]; then make -C $$COMP clean; fi; done
make -C collectors clean
rm -f $(APP)
.PHONY: fmt

View File

@@ -39,15 +39,14 @@ See the component READMEs for their configuration:
```
$ git clone git@github.com:ClusterCockpit/cc-metric-collector.git
$ make (downloads LIKWID, builds it as static library with 'direct' accessmode and copies all required files for the collector)
$ go get (requires at least golang 1.16)
$ make
$ go get (requires at least golang 1.13)
$ go build metric-collector
```
# Running
```
$ ./cc-metric-collector --help
$ ./metric-collector --help
Usage of metric-collector:
-config string
Path to configuration file (default "./config.json")
@@ -55,6 +54,8 @@ Usage of metric-collector:
Path for logfile (default "stderr")
-once
Run all collectors only once
-pidfile string
Path for PID file (default "/var/run/cc-metric-collector.pid")
```

View File

@@ -1,31 +1,15 @@
{
"cpufreq": {},
"cpufreq_cpuinfo": {},
"gpfs": {
"exclude_filesystem": [
"test_fs"
]
},
"ibstat": {},
"loadavg": {
"exclude_metrics": [
"proc_total"
]
},
"numastats": {},
"nvidia": {},
"tempstat": {
"report_max_temperature": true,
"report_critical_temperature": true,
"tag_override": {
"hwmon0": {
"type": "socket",
"type-id": "0"
},
"hwmon1": {
"type": "socket",
"type-id": "1"
}
"tag_override": {
"hwmon0" : {
"type" : "socket",
"type-id" : "0"
},
"hwmon1" : {
"type" : "socket",
"type-id" : "1"
}
}
}
}

View File

@@ -18,28 +18,21 @@ In contrast to the configuration files for sinks and receivers, the collectors c
* [`cpustat`](./cpustatMetric.md)
* [`memstat`](./memstatMetric.md)
* [`iostat`](./iostatMetric.md)
* [`diskstat`](./diskstatMetric.md)
* [`loadavg`](./loadavgMetric.md)
* [`netstat`](./netstatMetric.md)
* [`ibstat`](./infinibandMetric.md)
* [`ibstat_perfquery`](./infinibandPerfQueryMetric.md)
* [`tempstat`](./tempMetric.md)
* [`lustrestat`](./lustreMetric.md)
* [`lustre`](./lustreMetric.md)
* [`likwid`](./likwidMetric.md)
* [`nvidia`](./nvidiaMetric.md)
* [`customcmd`](./customCmdMetric.md)
* [`ipmistat`](./ipmiMetric.md)
* [`topprocs`](./topprocsMetric.md)
* [`nfs3stat`](./nfs3Metric.md)
* [`nfs4stat`](./nfs4Metric.md)
* [`cpufreq`](./cpufreqMetric.md)
* [`cpufreq_cpuinfo`](./cpufreqCpuinfoMetric.md)
* [`numastat`](./numastatMetric.md)
* [`gpfs`](./gpfsMetric.md)
## Todos
* [ ] Exclude devices for `diskstat` collector
* [ ] Aggreate metrics to higher topology entity (sum hwthread metrics to socket metric, ...). Needs to be configurable
# Contributing own collectors
@@ -78,11 +71,6 @@ type SampleCollector struct {
}
func (m *SampleCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "SampleCollector"
m.setup()
if len(config) > 0 {
@@ -103,15 +91,10 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric)
}
// tags for the metric, if type != node use proper type and type-id
tags := map[string]string{"type" : "node"}
x, err := GetMetric()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Read(): %v", err))
}
// Each metric has exactly one field: value !
value := map[string]interface{}{"value": int64(x)}
if y, err := lp.New("sample_metric", tags, m.meta, value, time.Now()); err == nil {
value := map[string]interface{}{"value": int(x)}
y, err := lp.New("sample_metric", tags, m.meta, value, time.Now())
if err == nil {
output <- y
}
}

View File

@@ -25,16 +25,13 @@ var AvailableCollectors = map[string]MetricCollector{
"topprocs": new(TopProcsCollector),
"nvidia": new(NvidiaCollector),
"customcmd": new(CustomCmdCollector),
"iostat": new(IOstatCollector),
"diskstat": new(DiskstatCollector),
"tempstat": new(TempCollector),
"ipmistat": new(IpmiCollector),
"gpfs": new(GpfsCollector),
"cpufreq": new(CPUFreqCollector),
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
"nfs3stat": new(Nfs3Collector),
"nfs4stat": new(Nfs4Collector),
"numastats": new(NUMAStatsCollector),
"nfsstat": new(NfsCollector),
}
// Metric collector manager data structure

View File

@@ -5,13 +5,13 @@ import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
@@ -23,55 +23,45 @@ import (
type CPUFreqCpuInfoCollectorTopology struct {
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
coreID_int int
physicalPackageID string // socket / package ID
physicalPackageID_int int64
physicalPackageID_int int
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
numPhysicalPackages_int int
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
numNonHT_int int
tagSet map[string]string
}
type CPUFreqCpuInfoCollector struct {
metricCollector
topology []*CPUFreqCpuInfoCollectorTopology
topology []CPUFreqCpuInfoCollectorTopology
}
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.setup()
m.name = "CPUFreqCpuInfoCollector"
m.meta = map[string]string{
"source": m.name,
"group": "CPU",
"unit": "MHz",
"group": "cpufreq",
}
const cpuInfoFile = "/proc/cpuinfo"
file, err := os.Open(cpuInfoFile)
if err != nil {
return fmt.Errorf("Failed to open file '%s': %v", cpuInfoFile, err)
return fmt.Errorf("Failed to open '%s': %v", cpuInfoFile, err)
}
defer file.Close()
// Collect topology information from file cpuinfo
foundFreq := false
processor := ""
var numNonHT_int int64 = 0
numNonHT_int := 0
coreID := ""
physicalPackageID := ""
var maxPhysicalPackageID int64 = 0
m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
maxPhysicalPackageID := 0
m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0)
coreSeenBefore := make(map[string]bool)
// Read cpuinfo file, line by line
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lineSplit := strings.Split(scanner.Text(), ":")
@@ -97,41 +87,39 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
len(coreID) > 0 &&
len(physicalPackageID) > 0 {
topology := new(CPUFreqCpuInfoCollectorTopology)
// Processor
topology.processor = processor
// Core ID
topology.coreID = coreID
topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
coreID_int, err := strconv.Atoi(coreID)
if err != nil {
return fmt.Errorf("Unable to convert coreID '%s' to int64: %v", coreID, err)
return fmt.Errorf("Unable to convert coreID to int: %v", err)
}
// Physical package ID
topology.physicalPackageID = physicalPackageID
topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
physicalPackageID_int, err := strconv.Atoi(physicalPackageID)
if err != nil {
return fmt.Errorf("Unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
return fmt.Errorf("Unable to convert physicalPackageID to int: %v", err)
}
// increase maximun socket / package ID, when required
if topology.physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = topology.physicalPackageID_int
if physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = physicalPackageID_int
}
// is hyperthread?
globalID := physicalPackageID + ":" + coreID
topology.isHT = coreSeenBefore[globalID]
isHT := coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
if !topology.isHT {
if !isHT {
// increase number on non hyper thread cores
numNonHT_int++
}
// store collected topology information
m.topology = append(m.topology, topology)
m.topology = append(
m.topology,
CPUFreqCpuInfoCollectorTopology{
processor: processor,
coreID: coreID,
coreID_int: coreID_int,
physicalPackageID: physicalPackageID,
physicalPackageID_int: physicalPackageID_int,
isHT: isHT,
})
// reset topology information
foundFreq = false
@@ -144,15 +132,18 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
numPhysicalPackageID_int := maxPhysicalPackageID + 1
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
numNonHT := fmt.Sprint(numNonHT_int)
for _, t := range m.topology {
for i := range m.topology {
t := &m.topology[i]
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"package_id": t.physicalPackageID,
"type": "cpu",
"type-id": t.processor,
"num_core": t.numNonHT,
"package_id": t.physicalPackageID,
"num_package": t.numPhysicalPackages,
}
}
@@ -160,18 +151,15 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
return nil
}
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
const cpuInfoFile = "/proc/cpuinfo"
file, err := os.Open(cpuInfoFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to open file '%s': %v", cpuInfoFile, err))
log.Printf("Failed to open '%s': %v", cpuInfoFile, err)
return
}
defer file.Close()
@@ -186,16 +174,15 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
// frequency
if key == "cpu MHz" {
t := m.topology[processorCounter]
t := &m.topology[processorCounter]
if !t.isHT {
value, err := strconv.ParseFloat(strings.TrimSpace(lineSplit[1]), 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
log.Printf("Failed to convert cpu MHz to float: %v", err)
return
}
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
output <- y
}
}

View File

@@ -1,10 +0,0 @@
## `cpufreq_cpuinfo` collector
```json
"cpufreq_cpuinfo": {}
```
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **cpu** metrics.
Metrics:
* `cpufreq`

View File

@@ -1,30 +1,48 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"golang.org/x/sys/unix"
)
//
// readOneLine reads one line from a file.
// It returns ok when file was successfully read.
// In this case text contains the first line of the files contents.
//
func readOneLine(filename string) (text string, ok bool) {
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
ok = scanner.Scan()
text = scanner.Text()
return
}
type CPUFreqCollectorTopology struct {
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
coreID_int int
physicalPackageID string // socket / package ID
physicalPackageID_int int64
physicalPackageID_int int
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
numPhysicalPackages_int int
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
numNonHT_int int
scalingCurFreqFile string
tagSet map[string]string
}
@@ -46,11 +64,6 @@ type CPUFreqCollector struct {
}
func (m *CPUFreqCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "CPUFreqCollector"
m.setup()
if len(config) > 0 {
@@ -61,8 +74,7 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
m.meta = map[string]string{
"source": m.name,
"group": "CPU",
"unit": "MHz",
"group": "CPU Frequency",
}
// Loop for all CPU directories
@@ -70,50 +82,48 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
cpuDirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("Unable to glob files with pattern '%s': %v", globPattern, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to glob files with pattern %s: %v", globPattern, err)
}
if cpuDirs == nil {
return fmt.Errorf("Unable to find any files with pattern '%s'", globPattern)
return fmt.Errorf("CPUFreqCollector.Init() unable to find any files with pattern %s", globPattern)
}
// Initialize CPU topology
m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
for _, cpuDir := range cpuDirs {
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
processor_int, err := strconv.ParseInt(processor, 10, 64)
processor_int, err := strconv.Atoi(processor)
if err != nil {
return fmt.Errorf("Unable to convert cpuID '%s' to int64: %v", processor, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert cpuID to int: %v", err)
}
// Read package ID
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
line, err := ioutil.ReadFile(physicalPackageIDFile)
if err != nil {
return fmt.Errorf("Unable to read physical package ID from file '%s': %v", physicalPackageIDFile, err)
physicalPackageID, ok := readOneLine(physicalPackageIDFile)
if !ok {
return fmt.Errorf("CPUFreqCollector.Init() unable to read physical package ID from %s", physicalPackageIDFile)
}
physicalPackageID := strings.TrimSpace(string(line))
physicalPackageID_int, err := strconv.ParseInt(physicalPackageID, 10, 64)
physicalPackageID_int, err := strconv.Atoi(physicalPackageID)
if err != nil {
return fmt.Errorf("Unable to convert packageID '%s' to int64: %v", physicalPackageID, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert packageID to int: %v", err)
}
// Read core ID
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
line, err = ioutil.ReadFile(coreIDFile)
if err != nil {
return fmt.Errorf("Unable to read core ID from file '%s': %v", coreIDFile, err)
coreID, ok := readOneLine(coreIDFile)
if !ok {
return fmt.Errorf("CPUFreqCollector.Init() unable to read core ID from %s", coreIDFile)
}
coreID := strings.TrimSpace(string(line))
coreID_int, err := strconv.ParseInt(coreID, 10, 64)
coreID_int, err := strconv.Atoi(coreID)
if err != nil {
return fmt.Errorf("Unable to convert coreID '%s' to int64: %v", coreID, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert coreID to int: %v", err)
}
// Check access to current frequency file
scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
err = unix.Access(scalingCurFreqFile, unix.R_OK)
if err != nil {
return fmt.Errorf("Unable to access file '%s': %v", scalingCurFreqFile, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to access %s: %v", scalingCurFreqFile, err)
}
t := &m.topology[processor_int]
@@ -136,8 +146,8 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
// number of non hyper thread cores and packages / sockets
var numNonHT_int int64 = 0
var maxPhysicalPackageID int64 = 0
numNonHT_int := 0
maxPhysicalPackageID := 0
for i := range m.topology {
t := &m.topology[i]
@@ -161,9 +171,11 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"package_id": t.physicalPackageID,
"type": "cpu",
"type-id": t.processor,
"num_core": t.numNonHT,
"package_id": t.physicalPackageID,
"num_package": t.numPhysicalPackages,
}
}
@@ -172,7 +184,6 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
@@ -187,22 +198,19 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric)
}
// Read current frequency
line, err := ioutil.ReadFile(t.scalingCurFreqFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", t.scalingCurFreqFile, err))
line, ok := readOneLine(t.scalingCurFreqFile)
if !ok {
log.Printf("CPUFreqCollector.Read(): Failed to read one line from file '%s'", t.scalingCurFreqFile)
continue
}
cpuFreq, err := strconv.ParseInt(strings.TrimSpace(string(line)), 10, 64)
cpuFreq, err := strconv.Atoi(line)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert CPU frequency '%s' to int64: %v", line, err))
log.Printf("CPUFreqCollector.Read(): Failed to convert CPU frequency '%s': %v", line, err)
continue
}
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now)
if err == nil {
output <- y
}
}

View File

@@ -1,11 +0,0 @@
## `cpufreq_cpuinfo` collector
```json
"cpufreq": {
"exclude_metrics": []
}
```
The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **cpu** metrics.
Metrics:
* `cpufreq`

View File

@@ -1,15 +1,12 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"io/ioutil"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
@@ -21,93 +18,45 @@ type CpustatCollectorConfig struct {
type CpustatCollector struct {
metricCollector
config CpustatCollectorConfig
matches map[string]int
cputags map[string]map[string]string
nodetags map[string]string
num_cpus_metric lp.CCMetric
config CpustatCollectorConfig
}
func (m *CpustatCollector) Init(config json.RawMessage) error {
m.name = "CpustatCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
m.nodetags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "CPU"}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
matches := map[string]int{
"cpu_user": 1,
"cpu_nice": 2,
"cpu_system": 3,
"cpu_idle": 4,
"cpu_iowait": 5,
"cpu_irq": 6,
"cpu_softirq": 7,
"cpu_steal": 8,
"cpu_guest": 9,
"cpu_guest_nice": 10,
}
m.matches = make(map[string]int)
for match, index := range matches {
doExclude := false
for _, exclude := range m.config.ExcludeMetrics {
if match == exclude {
doExclude = true
break
}
}
if !doExclude {
m.matches[match] = index
}
}
// Check input file
file, err := os.Open(string(CPUSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
// Pre-generate tags for all CPUs
num_cpus := 0
m.cputags = make(map[string]map[string]string)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
cpustr := strings.TrimLeft(linefields[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
m.cputags[linefields[0]] = map[string]string{"type": "cpu", "type-id": fmt.Sprintf("%d", cpu)}
num_cpus++
}
}
m.init = true
return nil
}
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric) {
values := make(map[string]float64)
total := 0.0
for match, index := range m.matches {
if len(match) > 0 {
x, err := strconv.ParseInt(linefields[index], 0, 64)
if err == nil {
values[match] = float64(x)
total += values[match]
}
}
func (c *CpustatCollector) parseStatLine(line string, cpu int, exclude []string, output chan lp.CCMetric) {
ls := strings.Fields(line)
matches := []string{"", "cpu_user", "cpu_nice", "cpu_system", "cpu_idle", "cpu_iowait", "cpu_irq", "cpu_softirq", "cpu_steal", "cpu_guest", "cpu_guest_nice"}
for _, ex := range exclude {
matches, _ = RemoveFromStringList(matches, ex)
}
t := time.Now()
for name, value := range values {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": (value * 100.0) / total}, t)
if err == nil {
output <- y
var tags map[string]string
if cpu < 0 {
tags = map[string]string{"type": "node"}
} else {
tags = map[string]string{"type": "cpu", "type-id": fmt.Sprintf("%d", cpu)}
}
for i, m := range matches {
if len(m) > 0 {
x, err := strconv.ParseInt(ls[i], 0, 64)
if err == nil {
y, err := lp.New(m, tags, c.meta, map[string]interface{}{"value": int(x)}, time.Now())
if err == nil {
output <- y
}
}
}
}
}
@@ -116,33 +65,25 @@ func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric)
if !m.init {
return
}
num_cpus := 0
file, err := os.Open(string(CPUSTATFILE))
buffer, err := ioutil.ReadFile(string(CPUSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.Compare(linefields[0], "cpu") == 0 {
m.parseStatLine(linefields, m.nodetags, output)
} else if strings.HasPrefix(linefields[0], "cpu") {
m.parseStatLine(linefields, m.cputags[linefields[0]], output)
num_cpus++
ll := strings.Split(string(buffer), "\n")
for _, line := range ll {
if len(line) == 0 {
continue
}
ls := strings.Fields(line)
if strings.Compare(ls[0], "cpu") == 0 {
m.parseStatLine(line, -1, m.config.ExcludeMetrics, output)
} else if strings.HasPrefix(ls[0], "cpu") {
cpustr := strings.TrimLeft(ls[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
m.parseStatLine(line, cpu, m.config.ExcludeMetrics, output)
}
}
num_cpus_metric, err := lp.New("num_cpus",
m.nodetags,
m.meta,
map[string]interface{}{"value": int(num_cpus)},
time.Now(),
)
if err == nil {
output <- num_cpus_metric
}
}

View File

@@ -97,8 +97,7 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetri
if skip {
continue
}
y := lp.FromInfluxMetric(c)
y, err := lp.New(c.Name(), Tags2Map(c), m.meta, Fields2Map(c), c.Time())
if err == nil {
output <- y
}
@@ -120,7 +119,7 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetri
if skip {
continue
}
y := lp.FromInfluxMetric(f)
y, err := lp.New(f.Name(), Tags2Map(f), m.meta, Fields2Map(f), f.Time())
if err == nil {
output <- y
}

View File

@@ -1,21 +1,18 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
"syscall"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"io/ioutil"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
// "log"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
)
// "log"
const MOUNTFILE = `/proc/self/mounts`
const DISKSTATFILE = `/proc/diskstats`
const DISKSTAT_SYSFSPATH = `/sys/block`
type DiskstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
@@ -23,89 +20,93 @@ type DiskstatCollectorConfig struct {
type DiskstatCollector struct {
metricCollector
//matches map[string]int
config IOstatCollectorConfig
//devices map[string]IOstatCollectorEntry
matches map[int]string
config DiskstatCollectorConfig
}
func (m *DiskstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "DiskstatCollector"
m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup()
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
file, err := os.Open(string(MOUNTFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
// https://www.kernel.org/doc/html/latest/admin-guide/iostats.html
matches := map[int]string{
3: "reads",
4: "reads_merged",
5: "read_sectors",
6: "read_ms",
7: "writes",
8: "writes_merged",
9: "writes_sectors",
10: "writes_ms",
11: "ioops",
12: "ioops_ms",
13: "ioops_weighted_ms",
14: "discards",
15: "discards_merged",
16: "discards_sectors",
17: "discards_ms",
18: "flushes",
19: "flushes_ms",
}
defer file.Close()
m.init = true
return nil
m.matches = make(map[int]string)
for k, v := range matches {
_, skip := stringArrayContains(m.config.ExcludeMetrics, v)
if !skip {
m.matches[k] = v
}
}
if len(m.matches) == 0 {
return errors.New("No metrics to collect")
}
_, err = ioutil.ReadFile(string(DISKSTATFILE))
if err == nil {
m.init = true
}
return err
}
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var lines []string
if !m.init {
return
}
file, err := os.Open(string(MOUNTFILE))
buffer, err := ioutil.ReadFile(string(DISKSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
lines = strings.Split(string(buffer), "\n")
part_max_used := uint64(0)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
for _, line := range lines {
if len(line) == 0 {
continue
}
if !strings.HasPrefix(line, "/dev") {
f := strings.Fields(line)
if strings.Contains(f[2], "loop") {
continue
}
linefields := strings.Fields(line)
if strings.Contains(linefields[0], "loop") {
continue
tags := map[string]string{
"device": f[2],
"type": "node",
}
if strings.Contains(linefields[1], "boot") {
continue
for idx, name := range m.matches {
if idx < len(f) {
x, err := strconv.ParseInt(f[idx], 0, 64)
if err == nil {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": int(x)}, time.Now())
if err == nil {
output <- y
}
}
}
}
path := strings.Replace(linefields[1], `\040`, " ", -1)
stat := syscall.Statfs_t{}
err := syscall.Statfs(path, &stat)
if err != nil {
fmt.Println(err.Error())
return
}
tags := map[string]string{"type": "node", "device": linefields[0]}
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
}
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
}
perc := (100 * (total - free)) / total
if perc > part_max_used {
part_max_used = perc
}
}
y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": part_max_used}, time.Now())
if err == nil {
y.AddMeta("unit", "percent")
output <- y
}
}

View File

@@ -4,18 +4,31 @@
```json
"diskstat": {
"exclude_metrics": [
"disk_total"
"read_ms"
],
}
```
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
Metrics per device (with `device` tag):
* `disk_total` (unit `GBytes`)
* `disk_free` (unit `GBytes`)
Global metrics:
* `part_max_used` (unit `percent`)
Metrics:
* `reads`
* `reads_merged`
* `read_sectors`
* `read_ms`
* `writes`
* `writes_merged`
* `writes_sectors`
* `writes_ms`
* `ioops`
* `ioops_ms`
* `ioops_weighted_ms`
* `discards`
* `discards_merged`
* `discards_sectors`
* `discards_ms`
* `flushes`
* `flushes_ms`
The device name is added as tag `device`.

View File

@@ -7,13 +7,13 @@ import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
@@ -21,18 +21,11 @@ type GpfsCollector struct {
metricCollector
tags map[string]string
config struct {
Mmpmon string `json:"mmpmon_path,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
Mmpmon string `json:"mmpmon"`
}
skipFS map[string]struct{}
}
func (m *GpfsCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
var err error
m.name = "GpfsCollector"
m.setup()
@@ -56,24 +49,20 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
"type": "node",
"filesystem": "",
}
m.skipFS = make(map[string]struct{})
for _, fs := range m.config.ExcludeFilesystem {
m.skipFS[fs] = struct{}{}
}
// GPFS / IBM Spectrum Scale file system statistics can only be queried by user root
user, err := user.Current()
if err != nil {
return fmt.Errorf("Failed to get current user: %v", err)
return fmt.Errorf("GpfsCollector.Init(): Failed to get current user: %v", err)
}
if user.Uid != "0" {
return fmt.Errorf("GPFS file system statistics can only be queried by user root")
return fmt.Errorf("GpfsCollector.Init(): GPFS file system statistics can only be queried by user root")
}
// Check if mmpmon is in executable search path
_, err = exec.LookPath(m.config.Mmpmon)
if err != nil {
return fmt.Errorf("Failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err)
return fmt.Errorf("GpfsCollector.Init(): Failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err)
}
m.init = true
@@ -81,7 +70,6 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
}
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
@@ -98,15 +86,12 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
cmd.Stderr = cmdStderr
err := cmd.Run()
if err != nil {
dataStdErr, _ := ioutil.ReadAll(cmdStderr)
dataStdOut, _ := ioutil.ReadAll(cmdStdout)
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to execute command \"%s\": %v\n", cmd.String(), err),
fmt.Sprintf("Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode()),
fmt.Sprintf("Read(): command stderr: \"%s\"\n", string(dataStdErr)),
fmt.Sprintf("Read(): command stdout: \"%s\"\n", string(dataStdOut)),
)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command stdout: \"%s\"\n", string(data))
return
}
@@ -114,163 +99,148 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
scanner := bufio.NewScanner(cmdStdout)
for scanner.Scan() {
lineSplit := strings.Fields(scanner.Text())
if lineSplit[0] == "_fs_io_s_" {
key_value := make(map[string]string)
for i := 1; i < len(lineSplit); i += 2 {
key_value[lineSplit[i]] = lineSplit[i+1]
}
// Only process lines starting with _fs_io_s_
if lineSplit[0] != "_fs_io_s_" {
continue
}
// Ignore keys:
// _n_: node IP address,
// _nn_: node name,
// _cl_: cluster name,
// _d_: number of disks
key_value := make(map[string]string)
for i := 1; i < len(lineSplit); i += 2 {
key_value[lineSplit[i]] = lineSplit[i+1]
}
filesystem, ok := key_value["_fs_"]
if !ok {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to get filesystem name.\n")
continue
}
// Ignore keys:
// _n_: node IP address,
// _nn_: node name,
// _cl_: cluster name,
// _d_: number of disks
m.tags["filesystem"] = filesystem
filesystem, ok := key_value["_fs_"]
if !ok {
cclog.ComponentError(
m.name,
"Read(): Failed to get filesystem name.")
continue
}
// return code
rc, err := strconv.Atoi(key_value["_rc_"])
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert return code: %s\n", err.Error())
continue
}
if rc != 0 {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Filesystem %s not ok.", filesystem)
continue
}
// Skip excluded filesystems
if _, skip := m.skipFS[filesystem]; skip {
continue
}
sec, err := strconv.ParseInt(key_value["_t_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert seconds to int '%s': %v\n",
key_value["_t_"], err)
continue
}
msec, err := strconv.ParseInt(key_value["_tu_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert micro seconds to int '%s': %v\n",
key_value["_tu_"], err)
continue
}
timestamp := time.Unix(sec, msec*1000)
m.tags["filesystem"] = filesystem
// bytes read
bytesRead, err := strconv.ParseInt(key_value["_br_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert bytes read '%s': %s\n",
key_value["_br_"], err.Error())
continue
}
// return code
rc, err := strconv.Atoi(key_value["_rc_"])
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert return code '%s' to int: %v", key_value["_rc_"], err))
continue
}
if rc != 0 {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Filesystem '%s' is not ok.", filesystem))
continue
}
y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp)
if err == nil {
output <- y
}
sec, err := strconv.ParseInt(key_value["_t_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert seconds '%s' to int64: %v", key_value["_t_"], err))
continue
}
msec, err := strconv.ParseInt(key_value["_tu_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert micro seconds '%s' to int64: %v", key_value["_tu_"], err))
continue
}
timestamp := time.Unix(sec, msec*1000)
// bytes written
bytesWritten, err := strconv.ParseInt(key_value["_bw_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert bytes written '%s': %s\n",
key_value["_bw_"], err.Error())
continue
}
// bytes read
bytesRead, err := strconv.ParseInt(key_value["_br_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
continue
}
if y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp); err == nil {
output <- y
}
y, err = lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp)
if err == nil {
output <- y
}
// bytes written
bytesWritten, err := strconv.ParseInt(key_value["_bw_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
continue
}
if y, err := lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp); err == nil {
output <- y
}
// number of opens
numOpens, err := strconv.ParseInt(key_value["_oc_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert number of opens '%s': %s\n",
key_value["_oc_"], err.Error())
continue
}
y, err = lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp)
if err == nil {
output <- y
}
// number of opens
numOpens, err := strconv.ParseInt(key_value["_oc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
continue
}
if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
output <- y
}
// number of closes
numCloses, err := strconv.ParseInt(key_value["_cc_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of closes: %s\n", err.Error())
continue
}
y, err = lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp)
if err == nil {
output <- y
}
// number of closes
numCloses, err := strconv.ParseInt(key_value["_cc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
continue
}
if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
output <- y
}
// number of reads
numReads, err := strconv.ParseInt(key_value["_rdc_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of reads: %s\n", err.Error())
continue
}
y, err = lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp)
if err == nil {
output <- y
}
// number of reads
numReads, err := strconv.ParseInt(key_value["_rdc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
continue
}
if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
output <- y
}
// number of writes
numWrites, err := strconv.ParseInt(key_value["_wc_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of writes: %s\n", err.Error())
continue
}
y, err = lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp)
if err == nil {
output <- y
}
// number of writes
numWrites, err := strconv.ParseInt(key_value["_wc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
continue
}
if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
output <- y
}
// number of read directories
numReaddirs, err := strconv.ParseInt(key_value["_dir_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of read directories: %s\n", err.Error())
continue
}
y, err = lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp)
if err == nil {
output <- y
}
// number of read directories
numReaddirs, err := strconv.ParseInt(key_value["_dir_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
continue
}
if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
output <- y
}
// Number of inode updates
numInodeUpdates, err := strconv.ParseInt(key_value["_iu_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
continue
}
if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
output <- y
// Number of inode updates
numInodeUpdates, err := strconv.ParseInt(key_value["_iu_"], 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert Number of inode updates: %s\n", err.Error())
continue
}
y, err = lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp)
if err == nil {
output <- y
}
}
}
}

View File

@@ -1,30 +0,0 @@
## `gpfs` collector
```json
"ibstat": {
"mmpmon_path": "/path/to/mmpmon",
"exclude_filesystem": [
"fs1"
]
}
```
The `gpfs` collector uses the `mmpmon` command to read performance metrics for
GPFS / IBM Spectrum Scale filesystems.
The reported filesystems can be filtered with the `exclude_filesystem` option
in the configuration.
The path to the `mmpmon` command can be configured with the `mmpmon_path` option
in the configuration.
Metrics:
* `bytes_read`
* `gpfs_bytes_written`
* `gpfs_num_opens`
* `gpfs_num_closes`
* `gpfs_num_reads`
* `gpfs_num_readdirs`
* `gpfs_num_inode_updates`
The collector adds a `filesystem` tag to all metrics

View File

@@ -2,10 +2,8 @@ package collectors
import (
"fmt"
"io/ioutil"
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"golang.org/x/sys/unix"
@@ -22,7 +20,7 @@ type InfinibandCollectorInfo struct {
LID string // IB local Identifier (LID)
device string // IB device
port string // IB device port
portCounterFiles map[string]string // mapping counter name -> sysfs file
portCounterFiles map[string]string // mapping counter name -> file
tagSet map[string]string // corresponding tag list
}
@@ -31,17 +29,29 @@ type InfinibandCollector struct {
config struct {
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
}
info []*InfinibandCollectorInfo
info []InfinibandCollectorInfo
}
func (m *InfinibandCollector) Help() {
fmt.Println("This collector includes all devices that can be found below ", IB_BASEPATH)
fmt.Println("and where any of the ports provides a 'lid' file (glob ", IB_BASEPATH, "/<dev>/ports/<port>/lid).")
fmt.Println("The devices can be filtered with the 'exclude_devices' option in the configuration.")
fmt.Println("For each found LIDs the collector calls the 'perfquery' command")
fmt.Println("")
fmt.Println("Full configuration object:")
fmt.Println("\"ibstat\" : {")
fmt.Println(" \"exclude_devices\" : [\"dev1\"]")
fmt.Println("}")
fmt.Println("")
fmt.Println("Metrics:")
fmt.Println("- ib_recv")
fmt.Println("- ib_xmit")
fmt.Println("- ib_recv_pkts")
fmt.Println("- ib_xmit_pkts")
}
// Init initializes the Infiniband collector by walking through files below IB_BASEPATH
func (m *InfinibandCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
var err error
m.name = "InfinibandCollector"
m.setup()
@@ -69,12 +79,8 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
for _, path := range ibDirs {
// Skip, when no LID is assigned
line, err := ioutil.ReadFile(filepath.Join(path, "lid"))
if err != nil {
continue
}
LID := strings.TrimSpace(string(line))
if LID == "0x0" {
LID, ok := readOneLine(path + "/lid")
if !ok || LID == "0x0" {
continue
}
@@ -111,7 +117,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
}
m.info = append(m.info,
&InfinibandCollectorInfo{
InfinibandCollectorInfo{
LID: LID,
device: device,
port: port,
@@ -142,28 +148,19 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetr
}
now := time.Now()
for _, info := range m.info {
for i := range m.info {
// device info
info := &m.info[i]
for counterName, counterFile := range info.portCounterFiles {
line, err := ioutil.ReadFile(counterFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read from file '%s': %v", counterFile, err))
continue
}
data := strings.TrimSpace(string(line))
v, err := strconv.ParseInt(data, 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterName, data, err))
continue
}
if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
output <- y
if data, ok := readOneLine(counterFile); ok {
if v, err := strconv.ParseInt(data, 10, 64); err == nil {
if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
output <- y
}
}
}
}
}
}

View File

@@ -3,24 +3,17 @@
```json
"ibstat": {
"perfquery_path" : "<path to perfquery command>",
"exclude_devices": [
"mlx4"
]
}
```
The `ibstat` collector includes all Infiniband devices that can be
found below `/sys/class/infiniband/` and where any of the ports provides a
LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
The devices can be filtered with the `exclude_devices` option in the configuration.
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`.
The `ibstat` collector reads either data through the `perfquery` command or the sysfs files below `/sys/class/infiniband/<device>`.
Metrics:
* `ib_recv`
* `ib_xmit`
* `ib_recv_pkts`
* `ib_xmit_pkts`
The collector adds a `device` tag to all metrics

View File

@@ -29,6 +29,27 @@ type InfinibandPerfQueryCollector struct {
}
}
func (m *InfinibandPerfQueryCollector) Help() {
fmt.Println("This collector includes all devices that can be found below ", IB_BASEPATH)
fmt.Println("and where any of the ports provides a 'lid' file (glob ", IB_BASEPATH, "/<dev>/ports/<port>/lid).")
fmt.Println("The devices can be filtered with the 'exclude_devices' option in the configuration.")
fmt.Println("For each found LIDs the collector calls the 'perfquery' command")
fmt.Println("The path to the 'perfquery' command can be configured with the 'perfquery_path' option")
fmt.Println("in the configuration")
fmt.Println("")
fmt.Println("Full configuration object:")
fmt.Println("\"ibstat\" : {")
fmt.Println(" \"perfquery_path\" : \"path/to/perfquery\" # if omitted, it searches in $PATH")
fmt.Println(" \"exclude_devices\" : [\"dev1\"]")
fmt.Println("}")
fmt.Println("")
fmt.Println("Metrics:")
fmt.Println("- ib_recv")
fmt.Println("- ib_xmit")
fmt.Println("- ib_recv_pkts")
fmt.Println("- ib_xmit_pkts")
}
func (m *InfinibandPerfQueryCollector) Init(config json.RawMessage) error {
var err error
m.name = "InfinibandCollectorPerfQuery"
@@ -50,9 +71,6 @@ func (m *InfinibandPerfQueryCollector) Init(config json.RawMessage) error {
m.lids = make(map[string]map[string]string)
p := fmt.Sprintf("%s/*/ports/*/lid", string(IB_BASEPATH))
files, err := filepath.Glob(p)
if err != nil {
return err
}
for _, f := range files {
lid, err := ioutil.ReadFile(f)
if err == nil {

View File

@@ -1,28 +0,0 @@
## `ibstat_perfquery` collector
```json
"ibstat_perfquery": {
"perfquery_path": "/path/to/perfquery",
"exclude_devices": [
"mlx4"
]
}
```
The `ibstat_perfquery` collector includes all Infiniband devices that can be
found below `/sys/class/infiniband/` and where any of the ports provides a
LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
The devices can be filtered with the `exclude_devices` option in the configuration.
For each found LID the collector calls the `perfquery` command. The path to the
`perfquery` command can be configured with the `perfquery_path` option in the configuration
Metrics:
* `ib_recv`
* `ib_xmit`
* `ib_recv_pkts`
* `ib_xmit_pkts`
The collector adds a `device` tag to all metrics

View File

@@ -1,155 +0,0 @@
package collectors
import (
"bufio"
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
// "log"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
)
const IOSTATFILE = `/proc/diskstats`
const IOSTAT_SYSFSPATH = `/sys/block`
type IOstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
type IOstatCollectorEntry struct {
lastValues map[string]int64
tags map[string]string
}
type IOstatCollector struct {
metricCollector
matches map[string]int
config IOstatCollectorConfig
devices map[string]IOstatCollectorEntry
}
func (m *IOstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "IOstatCollector"
m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup()
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
// https://www.kernel.org/doc/html/latest/admin-guide/iostats.html
matches := map[string]int{
"io_reads": 3,
"io_reads_merged": 4,
"io_read_sectors": 5,
"io_read_ms": 6,
"io_writes": 7,
"io_writes_merged": 8,
"io_writes_sectors": 9,
"io_writes_ms": 10,
"io_ioops": 11,
"io_ioops_ms": 12,
"io_ioops_weighted_ms": 13,
"io_discards": 14,
"io_discards_merged": 15,
"io_discards_sectors": 16,
"io_discards_ms": 17,
"io_flushes": 18,
"io_flushes_ms": 19,
}
m.devices = make(map[string]IOstatCollectorEntry)
m.matches = make(map[string]int)
for k, v := range matches {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, k); !skip {
m.matches[k] = v
}
}
if len(m.matches) == 0 {
return errors.New("no metrics to collect")
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
values := make(map[string]int64)
for m := range m.matches {
values[m] = 0
}
m.devices[device] = IOstatCollectorEntry{
tags: map[string]string{
"device": linefields[2],
"type": "node",
},
lastValues: values,
}
}
m.init = true
return err
}
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 {
continue
}
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
if _, ok := m.devices[device]; !ok {
continue
}
entry := m.devices[device]
for name, idx := range m.matches {
if idx < len(linefields) {
x, err := strconv.ParseInt(linefields[idx], 0, 64)
if err == nil {
diff := x - entry.lastValues[name]
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
if err == nil {
output <- y
}
}
entry.lastValues[name] = x
}
}
m.devices[device] = entry
}
}
func (m *IOstatCollector) Close() {
m.init = false
}

View File

@@ -1,34 +0,0 @@
## `iostat` collector
```json
"iostat": {
"exclude_metrics": [
"read_ms"
],
}
```
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
Metrics:
* `io_reads`
* `io_reads_merged`
* `io_read_sectors`
* `io_read_ms`
* `io_writes`
* `io_writes_merged`
* `io_writes_sectors`
* `io_writes_ms`
* `io_ioops`
* `io_ioops_ms`
* `io_ioops_weighted_ms`
* `io_discards`
* `io_discards_merged`
* `io_discards_sectors`
* `io_discards_ms`
* `io_flushes`
* `io_flushes_ms`
The device name is added as tag `device`. For more details, see https://www.kernel.org/doc/html/latest/admin-guide/iostats.html

View File

@@ -9,12 +9,11 @@ import (
"strconv"
"strings"
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const IPMITOOL_PATH = `ipmitool`
const IPMISENSORS_PATH = `ipmi-sensors`
const IPMITOOL_PATH = `/usr/bin/ipmitool`
const IPMISENSORS_PATH = `/usr/sbin/ipmi-sensors`
type IpmiCollectorConfig struct {
ExcludeDevices []string `json:"exclude_devices"`
@@ -24,36 +23,30 @@ type IpmiCollectorConfig struct {
type IpmiCollector struct {
metricCollector
//tags map[string]string
//matches map[string]string
config IpmiCollectorConfig
ipmitool string
ipmisensors string
tags map[string]string
matches map[string]string
config IpmiCollectorConfig
}
func (m *IpmiCollector) Init(config json.RawMessage) error {
m.name = "IpmiCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "IPMI"}
m.config.IpmitoolPath = string(IPMITOOL_PATH)
m.config.IpmisensorsPath = string(IPMISENSORS_PATH)
m.ipmitool = ""
m.ipmisensors = ""
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
p, err := exec.LookPath(m.config.IpmitoolPath)
if err == nil {
m.ipmitool = p
_, err1 := os.Stat(m.config.IpmitoolPath)
_, err2 := os.Stat(m.config.IpmisensorsPath)
if err1 != nil {
m.config.IpmitoolPath = ""
}
p, err = exec.LookPath(m.config.IpmisensorsPath)
if err == nil {
m.ipmisensors = p
if err2 != nil {
m.config.IpmisensorsPath = ""
}
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
if err1 != nil && err2 != nil {
return errors.New("No IPMI reader found")
}
m.init = true

View File

@@ -2,7 +2,7 @@ package collectors
/*
#cgo CFLAGS: -I./likwid
#cgo LDFLAGS: -L./likwid -llikwid -llikwid-hwloc -lm -Wl,--unresolved-symbols=ignore-in-object-files
#cgo LDFLAGS: -L./likwid -llikwid -llikwid-hwloc -lm
#include <stdlib.h>
#include <likwid.h>
*/
@@ -13,111 +13,67 @@ import (
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"regexp"
"strconv"
"strings"
"time"
"unsafe"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
topo "github.com/ClusterCockpit/cc-metric-collector/internal/ccTopology"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
"github.com/NVIDIA/go-nvml/pkg/dl"
"gopkg.in/Knetic/govaluate.v2"
)
type MetricScope string
type MetricScope int
const (
METRIC_SCOPE_HWTHREAD = iota
METRIC_SCOPE_CORE
METRIC_SCOPE_LLC
METRIC_SCOPE_NUMA
METRIC_SCOPE_DIE
METRIC_SCOPE_SOCKET
METRIC_SCOPE_NUMA
METRIC_SCOPE_NODE
)
func (ms MetricScope) String() string {
return string(ms)
return []string{"Head", "Shoulder", "Knee", "Toe"}[ms]
}
func (ms MetricScope) Likwid() string {
LikwidDomains := map[string]string{
"cpu": "",
"core": "",
"llc": "C",
"numadomain": "M",
"die": "D",
"socket": "S",
"node": "N",
}
return LikwidDomains[string(ms)]
}
func (ms MetricScope) Granularity() int {
for i, g := range GetAllMetricScopes() {
if ms == g {
return i
}
}
return -1
}
func GetAllMetricScopes() []MetricScope {
return []MetricScope{"cpu" /*, "core", "llc", "numadomain", "die",*/, "socket", "node"}
}
const (
LIKWID_LIB_NAME = "liblikwid.so"
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
)
type LikwidCollectorMetricConfig struct {
Name string `json:"name"` // Name of the metric
Calc string `json:"calc"` // Calculation for the metric using
//Aggr string `json:"aggregation"` // if scope unequal to LIKWID metric scope, the values are combined (sum, min, max, mean or avg, median)
Scope MetricScope `json:"scope"` // scope for calculation. subscopes are aggregated using the 'aggregation' function
Publish bool `json:"publish"`
granulatity MetricScope
Name string `json:"name"`
Calc string `json:"calc"`
Scope MetricScope `json:"socket_scope"`
Publish bool `json:"publish"`
}
type LikwidCollectorEventsetConfig struct {
Events map[string]string `json:"events"`
granulatity map[string]MetricScope
Metrics []LikwidCollectorMetricConfig `json:"metrics"`
Events map[string]string `json:"events"`
Metrics []LikwidCollectorMetricConfig `json:"metrics"`
}
type LikwidCollectorConfig struct {
Eventsets []LikwidCollectorEventsetConfig `json:"eventsets"`
Metrics []LikwidCollectorMetricConfig `json:"globalmetrics,omitempty"`
ForceOverwrite bool `json:"force_overwrite,omitempty"`
InvalidToZero bool `json:"invalid_to_zero,omitempty"`
Metrics []LikwidCollectorMetricConfig `json:"globalmetrics"`
ExcludeMetrics []string `json:"exclude_metrics"`
ForceOverwrite bool `json:"force_overwrite"`
}
type LikwidCollector struct {
metricCollector
cpulist []C.int
cpu2tid map[int]int
sock2tid map[int]int
scopeRespTids map[MetricScope]map[int]int
metrics map[C.int]map[string]int
groups []C.int
config LikwidCollectorConfig
results map[int]map[int]map[string]interface{}
mresults map[int]map[int]map[string]float64
gmresults map[int]map[string]float64
basefreq float64
running bool
cpulist []C.int
sock2tid map[int]int
metrics map[C.int]map[string]int
groups []C.int
config LikwidCollectorConfig
results map[int]map[int]map[string]interface{}
mresults map[int]map[int]map[string]float64
gmresults map[int]map[string]float64
basefreq float64
}
type LikwidMetric struct {
name string
search string
scope MetricScope
group_idx int
name string
search string
socket_scope bool
group_idx int
}
func eventsToEventStr(events map[string]string) string {
@@ -128,27 +84,12 @@ func eventsToEventStr(events map[string]string) string {
return strings.Join(elist, ",")
}
func getGranularity(counter, event string) MetricScope {
if strings.HasPrefix(counter, "PMC") || strings.HasPrefix(counter, "FIXC") {
return "cpu"
} else if strings.Contains(counter, "BOX") || strings.Contains(counter, "DEV") {
return "socket"
} else if strings.HasPrefix(counter, "PWR") {
if event == "RAPL_CORE_ENERGY" {
return "cpu"
} else {
return "socket"
}
}
return "unknown"
}
func getBaseFreq() float64 {
var freq float64 = math.NaN()
C.power_init(0)
info := C.get_powerInfo()
if float64(info.baseFrequency) != 0 {
freq = float64(info.baseFrequency) * 1e3
freq = float64(info.baseFrequency)
} else {
buffer, err := ioutil.ReadFile("/sys/devices/system/cpu/cpu0/cpufreq/bios_limit")
if err == nil {
@@ -162,99 +103,18 @@ func getBaseFreq() float64 {
return freq
}
func (m *LikwidCollector) initGranularity() {
splitRegex := regexp.MustCompile("[+-/*()]")
for _, evset := range m.config.Eventsets {
evset.granulatity = make(map[string]MetricScope)
for counter, event := range evset.Events {
gran := getGranularity(counter, event)
if gran.Granularity() >= 0 {
evset.granulatity[counter] = gran
}
}
for i, metric := range evset.Metrics {
s := splitRegex.Split(metric.Calc, -1)
gran := MetricScope("cpu")
evset.Metrics[i].granulatity = gran
for _, x := range s {
if _, ok := evset.Events[x]; ok {
if evset.granulatity[x].Granularity() > gran.Granularity() {
gran = evset.granulatity[x]
}
}
}
evset.Metrics[i].granulatity = gran
func getSocketCpus() map[C.int]int {
slist := SocketList()
var cpu C.int
outmap := make(map[C.int]int)
for _, s := range slist {
t := C.CString(fmt.Sprintf("S%d", s))
clen := C.cpustr_to_cpulist(t, &cpu, 1)
if int(clen) == 1 {
outmap[cpu] = s
}
}
for i, metric := range m.config.Metrics {
s := splitRegex.Split(metric.Calc, -1)
gran := MetricScope("cpu")
m.config.Metrics[i].granulatity = gran
for _, x := range s {
for _, evset := range m.config.Eventsets {
for _, m := range evset.Metrics {
if m.Name == x && m.granulatity.Granularity() > gran.Granularity() {
gran = m.granulatity
}
}
}
}
m.config.Metrics[i].granulatity = gran
}
}
type TopoResolveFunc func(cpuid int) int
func (m *LikwidCollector) getResponsiblities() map[MetricScope]map[int]int {
get_cpus := func(scope MetricScope) map[int]int {
var slist []int
var cpu C.int
var input func(index int) string
switch scope {
case "node":
slist = []int{0}
input = func(index int) string { return "N:0" }
case "socket":
input = func(index int) string { return fmt.Sprintf("%s%d:0", scope.Likwid(), index) }
slist = topo.SocketList()
// case "numadomain":
// input = func(index int) string { return fmt.Sprintf("%s%d:0", scope.Likwid(), index) }
// slist = topo.NumaNodeList()
// cclog.Debug(scope, " ", input(0), " ", slist)
// case "die":
// input = func(index int) string { return fmt.Sprintf("%s%d:0", scope.Likwid(), index) }
// slist = topo.DieList()
// case "llc":
// input = fmt.Sprintf("%s%d:0", scope.Likwid(), s)
// slist = topo.LLCacheList()
case "cpu":
input = func(index int) string { return fmt.Sprintf("%d", index) }
slist = topo.CpuList()
case "hwthread":
input = func(index int) string { return fmt.Sprintf("%d", index) }
slist = topo.CpuList()
}
outmap := make(map[int]int)
for _, s := range slist {
t := C.CString(input(s))
clen := C.cpustr_to_cpulist(t, &cpu, 1)
if int(clen) == 1 {
outmap[s] = m.cpu2tid[int(cpu)]
} else {
cclog.Error(fmt.Sprintf("Cannot determine responsible CPU for %s", input(s)))
outmap[s] = -1
}
C.free(unsafe.Pointer(t))
}
return outmap
}
scopes := GetAllMetricScopes()
complete := make(map[MetricScope]map[int]int)
for _, s := range scopes {
complete[s] = get_cpus(s)
}
return complete
return outmap
}
func (m *LikwidCollector) Init(config json.RawMessage) error {
@@ -266,78 +126,38 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
return err
}
}
lib := dl.New(LIKWID_LIB_NAME, LIKWID_LIB_DL_FLAGS)
if lib == nil {
return fmt.Errorf("error instantiating DynamicLibrary for %s", LIKWID_LIB_NAME)
}
if m.config.ForceOverwrite {
cclog.ComponentDebug(m.name, "Set LIKWID_FORCE=1")
os.Setenv("LIKWID_FORCE", "1")
}
m.setup()
m.meta = map[string]string{"source": m.name, "group": "PerfCounter"}
cclog.ComponentDebug(m.name, "Get cpulist and init maps and lists")
cpulist := topo.CpuList()
cpulist := CpuList()
m.cpulist = make([]C.int, len(cpulist))
m.cpu2tid = make(map[int]int)
slist := getSocketCpus()
m.sock2tid = make(map[int]int)
// m.numa2tid = make(map[int]int)
for i, c := range cpulist {
m.cpulist[i] = C.int(c)
m.cpu2tid[c] = i
if sid, found := slist[m.cpulist[i]]; found {
m.sock2tid[sid] = i
}
}
m.results = make(map[int]map[int]map[string]interface{})
m.mresults = make(map[int]map[int]map[string]float64)
m.gmresults = make(map[int]map[string]float64)
cclog.ComponentDebug(m.name, "initialize LIKWID topology")
ret = C.topology_init()
if ret != 0 {
err := errors.New("failed to initialize LIKWID topology")
cclog.ComponentError(m.name, err.Error())
return err
return errors.New("Failed to initialize LIKWID topology")
}
if m.config.ForceOverwrite {
os.Setenv("LIKWID_FORCE", "1")
}
// Determine which counter works at which level. PMC*: cpu, *BOX*: socket, ...
m.initGranularity()
// Generate map for MetricScope -> scope_id (like socket id) -> responsible id (offset in cpulist)
m.scopeRespTids = m.getResponsiblities()
cclog.ComponentDebug(m.name, "initialize LIKWID perfmon module")
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
if ret != 0 {
C.topology_finalize()
err := errors.New("failed to initialize LIKWID topology")
cclog.ComponentError(m.name, err.Error())
return err
return errors.New("Failed to initialize LIKWID topology")
}
// This is for the global metrics computation test
globalParams := make(map[string]interface{})
globalParams["time"] = float64(1.0)
globalParams["inverseClock"] = float64(1.0)
// While adding the events, we test the metrics whether they can be computed at all
for i, evset := range m.config.Eventsets {
estr := eventsToEventStr(evset.Events)
// Generate parameter list for the metric computing test
params := make(map[string]interface{})
params["time"] = float64(1.0)
params["inverseClock"] = float64(1.0)
for counter := range evset.Events {
params[counter] = float64(1.0)
}
for _, metric := range evset.Metrics {
// Try to evaluate the metric
_, err := agg.EvalFloat64Condition(metric.Calc, params)
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
}
// If the metric is not in the parameter list for the global metrics, add it
if _, ok := globalParams[metric.Name]; !ok {
globalParams[metric.Name] = float64(1.0)
}
}
// Now we add the list of events to likwid
cstr := C.CString(estr)
gid := C.perfmon_addEventSet(cstr)
if gid >= 0 {
@@ -349,208 +169,161 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
for tid := range m.cpulist {
m.results[i][tid] = make(map[string]interface{})
m.mresults[i][tid] = make(map[string]float64)
if i == 0 {
m.gmresults[tid] = make(map[string]float64)
}
}
}
for _, metric := range m.config.Metrics {
// Try to evaluate the global metric
_, err := agg.EvalFloat64Condition(metric.Calc, globalParams)
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
m.gmresults[tid] = make(map[string]float64)
}
}
// If no event set could be added, shut down LikwidCollector
if len(m.groups) == 0 {
C.perfmon_finalize()
C.topology_finalize()
err := errors.New("no LIKWID performance group initialized")
cclog.ComponentError(m.name, err.Error())
return err
return errors.New("No LIKWID performance group initialized")
}
m.basefreq = getBaseFreq()
cclog.ComponentDebug(m.name, "BaseFreq", m.basefreq)
m.init = true
return nil
}
// take a measurement for 'interval' seconds of event set index 'group'
func (m *LikwidCollector) takeMeasurement(group int, interval time.Duration) error {
var ret C.int
gid := m.groups[group]
ret = C.perfmon_setupCounters(gid)
if ret != 0 {
gctr := C.GoString(C.perfmon_getGroupName(gid))
err := fmt.Errorf("failed to setup performance group %d (%s)", gid, gctr)
return err
}
ret = C.perfmon_startCounters()
if ret != 0 {
gctr := C.GoString(C.perfmon_getGroupName(gid))
err := fmt.Errorf("failed to start performance group %d (%s)", gid, gctr)
return err
}
m.running = true
time.Sleep(interval)
m.running = false
ret = C.perfmon_stopCounters()
if ret != 0 {
gctr := C.GoString(C.perfmon_getGroupName(gid))
err := fmt.Errorf("failed to stop performance group %d (%s)", gid, gctr)
return err
}
return nil
}
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
func (m *LikwidCollector) calcEventsetMetrics(group int, interval time.Duration, output chan lp.CCMetric) error {
var eidx C.int
evset := m.config.Eventsets[group]
gid := m.groups[group]
invClock := float64(1.0 / m.basefreq)
// Go over events and get the results
for eidx = 0; int(eidx) < len(evset.Events); eidx++ {
ctr := C.perfmon_getCounterName(gid, eidx)
ev := C.perfmon_getEventName(gid, eidx)
gctr := C.GoString(ctr)
gev := C.GoString(ev)
// MetricScope for the counter (and if needed the event)
scope := getGranularity(gctr, gev)
// Get the map scope-id -> tids
// This way we read less counters like only the responsible hardware thread for a socket
scopemap := m.scopeRespTids[scope]
for _, tid := range scopemap {
if tid >= 0 {
m.results[group][tid]["time"] = interval.Seconds()
m.results[group][tid]["inverseClock"] = invClock
res := C.perfmon_getLastResult(gid, eidx, C.int(tid))
m.results[group][tid][gctr] = float64(res)
}
}
}
// Go over the event set metrics, derive the value out of the event:counter values and send it
for _, metric := range evset.Metrics {
// The metric scope is determined in the Init() function
// Get the map scope-id -> tids
scopemap := m.scopeRespTids[metric.Scope]
for domain, tid := range scopemap {
if tid >= 0 {
value, err := agg.EvalFloat64Condition(metric.Calc, m.results[group][tid])
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
}
m.mresults[group][tid][metric.Name] = value
if m.config.InvalidToZero && math.IsNaN(value) {
value = 0.0
}
if m.config.InvalidToZero && math.IsInf(value, 0) {
value = 0.0
}
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) {
if metric.Publish {
tags := map[string]string{"type": metric.Scope.String()}
if metric.Scope != "node" {
tags["type-id"] = fmt.Sprintf("%d", domain)
}
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, tags, m.meta, fields, time.Now())
if err == nil {
output <- y
}
}
}
}
}
}
return nil
}
// Go over the global metrics, derive the value out of the event sets' metric values and send it
func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan lp.CCMetric) error {
for _, metric := range m.config.Metrics {
scopemap := m.scopeRespTids[metric.Scope]
for domain, tid := range scopemap {
if tid >= 0 {
// Here we generate parameter list
params := make(map[string]interface{})
for j := range m.groups {
for mname, mres := range m.mresults[j][tid] {
params[mname] = mres
}
}
// Evaluate the metric
value, err := agg.EvalFloat64Condition(metric.Calc, params)
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
}
m.gmresults[tid][metric.Name] = value
if m.config.InvalidToZero && math.IsNaN(value) {
value = 0.0
}
if m.config.InvalidToZero && math.IsInf(value, 0) {
value = 0.0
}
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) {
if metric.Publish {
tags := map[string]string{"type": metric.Scope.String()}
if metric.Scope != "node" {
tags["type-id"] = fmt.Sprintf("%d", domain)
}
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, tags, m.meta, fields, time.Now())
if err == nil {
output <- y
}
}
}
}
}
}
return nil
}
// main read function taking multiple measurement rounds, each 'interval' seconds long
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
var ret C.int
for i := range m.groups {
// measure event set 'i' for 'interval' seconds
err := m.takeMeasurement(i, interval)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
for i, gid := range m.groups {
evset := m.config.Eventsets[i]
ret = C.perfmon_setupCounters(gid)
if ret != 0 {
log.Print("Failed to setup performance group ", C.perfmon_getGroupName(gid))
continue
}
ret = C.perfmon_startCounters()
if ret != 0 {
log.Print("Failed to start performance group ", C.perfmon_getGroupName(gid))
continue
}
time.Sleep(interval)
ret = C.perfmon_stopCounters()
if ret != 0 {
log.Print("Failed to stop performance group ", C.perfmon_getGroupName(gid))
continue
}
var eidx C.int
for tid := range m.cpulist {
for eidx = 0; int(eidx) < len(evset.Events); eidx++ {
ctr := C.perfmon_getCounterName(gid, eidx)
gctr := C.GoString(ctr)
res := C.perfmon_getLastResult(gid, eidx, C.int(tid))
m.results[i][tid][gctr] = float64(res)
}
m.results[i][tid]["time"] = interval.Seconds()
m.results[i][tid]["inverseClock"] = float64(1.0 / m.basefreq)
for _, metric := range evset.Metrics {
expression, err := govaluate.NewEvaluableExpression(metric.Calc)
if err != nil {
log.Print(err.Error())
continue
}
result, err := expression.Evaluate(m.results[i][tid])
if err != nil {
log.Print(err.Error())
continue
}
m.mresults[i][tid][metric.Name] = float64(result.(float64))
}
}
}
for _, metric := range m.config.Metrics {
for tid := range m.cpulist {
var params map[string]interface{}
expression, err := govaluate.NewEvaluableExpression(metric.Calc)
if err != nil {
log.Print(err.Error())
continue
}
params = make(map[string]interface{})
for j := range m.groups {
for mname, mres := range m.mresults[j][tid] {
params[mname] = mres
}
}
result, err := expression.Evaluate(params)
if err != nil {
log.Print(err.Error())
continue
}
m.gmresults[tid][metric.Name] = float64(result.(float64))
}
}
for i := range m.groups {
evset := m.config.Eventsets[i]
for _, metric := range evset.Metrics {
_, skip := stringArrayContains(m.config.ExcludeMetrics, metric.Name)
if metric.Publish && !skip {
if metric.Scope.String() == "socket" {
for sid, tid := range m.sock2tid {
y, err := lp.New(metric.Name,
map[string]string{"type": "socket",
"type-id": fmt.Sprintf("%d", int(sid))},
m.meta,
map[string]interface{}{"value": m.mresults[i][tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
} else if metric.Scope.String() == "hwthread" {
for tid, cpu := range m.cpulist {
y, err := lp.New(metric.Name,
map[string]string{"type": "cpu",
"type-id": fmt.Sprintf("%d", int(cpu))},
m.meta,
map[string]interface{}{"value": m.mresults[i][tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
}
}
}
}
for _, metric := range m.config.Metrics {
_, skip := stringArrayContains(m.config.ExcludeMetrics, metric.Name)
if metric.Publish && !skip {
if metric.Scope.String() == "socket" {
for sid, tid := range m.sock2tid {
y, err := lp.New(metric.Name,
map[string]string{"type": "socket",
"type-id": fmt.Sprintf("%d", int(sid))},
m.meta,
map[string]interface{}{"value": m.gmresults[tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
} else {
for tid, cpu := range m.cpulist {
y, err := lp.New(metric.Name,
map[string]string{"type": "cpu",
"type-id": fmt.Sprintf("%d", int(cpu))},
m.meta,
map[string]interface{}{"value": m.gmresults[tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
}
}
// read measurements and derive event set metrics
m.calcEventsetMetrics(i, interval, output)
}
// use the event set metrics to derive the global metrics
m.calcGlobalMetrics(interval, output)
}
func (m *LikwidCollector) Close() {
if m.init {
cclog.ComponentDebug(m.name, "Closing ...")
m.init = false
if m.running {
cclog.ComponentDebug(m.name, "Stopping counters")
C.perfmon_stopCounters()
}
cclog.ComponentDebug(m.name, "Finalize LIKWID perfmon module")
C.perfmon_finalize()
cclog.ComponentDebug(m.name, "Finalize LIKWID topology module")
C.topology_finalize()
cclog.ComponentDebug(m.name, "Closing done")
}
}

View File

@@ -1,39 +1,7 @@
## `likwid` collector
The `likwid` collector is probably the most complicated collector. The LIKWID library is included as static library with *direct* access mode. The *direct* access mode is suitable if the daemon is executed by a root user. The static library does not contain the performance groups, so all information needs to be provided in the configuration.
The `likwid` configuration consists of two parts, the "eventsets" and "globalmetrics":
- An event set list itself has two parts, the "events" and a set of derivable "metrics". Each of the "events" is a counter:event pair in LIKWID's syntax. The "metrics" are a list of formulas to derive the metric value from the measurements of the "events". Each metric has a name, the formula, a scope and a publish flag. A counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. The scope tells the Collector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). The last one is the publishing flag. It tells the collector whether a metric should be sent to the router.
- The global metrics are metrics which require data from all event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a scope and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. So, the idea is to derive a metric in the "eventsets" section and reuse it in the "globalmetrics" part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics. **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases.
Additional options:
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`.
### Available metric scopes
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the collector provides the specification of a 'scope' for each metric.
- `cpu` : One metric per CPU hardware thread with the tags `"type" : "cpu"` and `"type-id" : "$cpu_id"`
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
**Note:** You cannot specify `socket` scope for a metric that is measured at `cpu` scope, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the scope of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
As a guideline:
- All counters `FIXCx`, `PMCy` and `TMAz` have the scope `cpu`
- All counters names containing `BOX` have the scope `socket`
- All `PWRx` counters have scope `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `cpu` scope
- All `DFCx` counters have scope `socket`
### Example configuration
```json
"likwid": {
"force_overwrite" : false,
"nan_to_zero" : false,
"eventsets": [
{
"events": {
@@ -52,25 +20,25 @@ As a guideline:
{
"name": "ipc",
"calc": "PMC0/PMC1",
"scope": "cpu",
"socket_scope": false,
"publish": true
},
{
"name": "flops_any",
"calc": "0.000001*PMC2/time",
"scope": "cpu",
"socket_scope": false,
"publish": true
},
{
"name": "clock_mhz",
"calc": "0.000001*(FIXC1/FIXC2)/inverseClock",
"scope": "cpu",
"socket_scope": false,
"publish": true
},
{
"name": "mem1",
"calc": "0.000001*(DFC0+DFC1+DFC2+DFC3)*64.0/time",
"scope": "socket",
"socket_scope": true,
"publish": false
}
]
@@ -88,19 +56,19 @@ As a guideline:
{
"name": "pwr_core",
"calc": "PWR0/time",
"scope": "socket",
"socket_scope": false,
"publish": true
},
{
"name": "pwr_pkg",
"calc": "PWR1/time",
"scope": "socket",
"socket_scope": true,
"publish": true
},
{
"name": "mem2",
"calc": "0.000001*(DFC0+DFC1+DFC2+DFC3)*64.0/time",
"scope": "socket",
"socket_scope": true,
"publish": false
}
]
@@ -110,16 +78,16 @@ As a guideline:
{
"name": "mem_bw",
"calc": "mem1+mem2",
"scope": "socket",
"socket_scope": true,
"publish": true
}
]
}
```
### How to get the eventsets and metrics from LIKWID
_Example config suitable for AMD Zen3_
The `likwid` collector reads hardware performance counters at a **cpu** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
```
@@ -140,9 +108,12 @@ METRICS -> "metrics": [
IPC PMC0/PMC1 -> {
-> "name" : "IPC",
-> "calc" : "PMC0/PMC1",
-> "scope": "cpu",
-> "socket_scope": false,
-> "publish": true
-> }
-> ]
```
The `socket_scope` option tells whether it is submitted per socket or per hwthread. If a metric is only used for internal calculations, you can set `publish = false`.
Since some metrics can only be gathered in multiple measurements (like the memory bandwidth on AMD Zen3 chips), configure multiple eventsets like in the example config and use the `globalmetrics` section to combine them. **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases.

View File

@@ -2,36 +2,25 @@ package collectors
import (
"encoding/json"
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
// LoadavgCollector collects:
// * load average of last 1, 5 & 15 minutes
// * number of processes currently runnable
// * total number of processes in system
//
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
//
const LOADAVGFILE = "/proc/loadavg"
const LOADAVGFILE = `/proc/loadavg`
type LoadavgCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
type LoadavgCollector struct {
metricCollector
tags map[string]string
load_matches []string
load_skips []bool
proc_matches []string
proc_skips []bool
config struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
config LoadavgCollectorConfig
}
func (m *LoadavgCollector) Init(config json.RawMessage) error {
@@ -43,82 +32,46 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "LOAD"}
m.meta = map[string]string{"source": m.name, "group": "LOAD"}
m.tags = map[string]string{"type": "node"}
m.load_matches = []string{
"load_one",
"load_five",
"load_fifteen"}
m.load_skips = make([]bool, len(m.load_matches))
m.proc_matches = []string{
"proc_run",
"proc_total"}
m.proc_skips = make([]bool, len(m.proc_matches))
for i, name := range m.load_matches {
_, m.load_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name)
}
for i, name := range m.proc_matches {
_, m.proc_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name)
}
m.load_matches = []string{"load_one", "load_five", "load_fifteen"}
m.proc_matches = []string{"proc_run", "proc_total"}
m.init = true
return nil
}
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var skip bool
if !m.init {
return
}
buffer, err := ioutil.ReadFile(LOADAVGFILE)
buffer, err := ioutil.ReadFile(string(LOADAVGFILE))
if err != nil {
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
}
return
}
now := time.Now()
// Load metrics
ls := strings.Split(string(buffer), ` `)
for i, name := range m.load_matches {
x, err := strconv.ParseFloat(ls[i], 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert '%s' to float64: %v", ls[i], err))
continue
}
if m.load_skips[i] {
continue
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil {
output <- y
_, skip = stringArrayContains(m.config.ExcludeMetrics, name)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": float64(x)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
// Process metrics
lv := strings.Split(ls[3], `/`)
for i, name := range m.proc_matches {
x, err := strconv.ParseInt(lv[i], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert '%s' to float64: %v", lv[i], err))
continue
}
if m.proc_skips[i] {
continue
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
x, err := strconv.ParseFloat(lv[i], 64)
if err == nil {
output <- y
_, skip = stringArrayContains(m.config.ExcludeMetrics, name)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": float64(x)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
}

View File

@@ -3,83 +3,29 @@ package collectors
import (
"encoding/json"
"errors"
"fmt"
"os/exec"
"os/user"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const LUSTRE_SYSFS = `/sys/fs/lustre`
const LCTL_CMD = `lctl`
const LCTL_OPTION = `get_param`
const LUSTREFILE = `/proc/fs/lustre/llite/lnec-XXXXXX/stats`
type LustreCollectorConfig struct {
LCtlCommand string `json:"lctl_command"`
Procfiles []string `json:"procfiles"`
ExcludeMetrics []string `json:"exclude_metrics"`
SendAllMetrics bool `json:"send_all_metrics"`
}
type LustreCollector struct {
metricCollector
tags map[string]string
matches map[string]map[string]int
stats map[string]map[string]int64
devices []string
config LustreCollectorConfig
lctl string
}
func (m *LustreCollector) getDeviceDataCommand(device string) []string {
statsfile := fmt.Sprintf("llite.%s.stats", device)
command := exec.Command(m.lctl, LCTL_OPTION, statsfile)
command.Wait()
stdout, _ := command.Output()
return strings.Split(string(stdout), "\n")
}
func (m *LustreCollector) getDevices() []string {
devices := make([]string, 0)
// //Version reading devices from sysfs
// globPattern := filepath.Join(LUSTRE_SYSFS, "llite/*/stats")
// files, err := filepath.Glob(globPattern)
// if err != nil {
// return devices
// }
// for _, f := range files {
// pathlist := strings.Split(f, "/")
// devices = append(devices, pathlist[4])
// }
data := m.getDeviceDataCommand("*")
for _, line := range data {
if strings.HasPrefix(line, "llite") {
linefields := strings.Split(line, ".")
if len(linefields) > 2 {
devices = append(devices, linefields[1])
}
}
}
return devices
}
// //Version reading the stats data of a device from sysfs
// func (m *LustreCollector) getDeviceDataSysfs(device string) []string {
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
// devdir := filepath.Join(llitedir, device)
// statsfile := filepath.Join(devdir, "stats")
// buffer, err := ioutil.ReadFile(statsfile)
// if err != nil {
// return make([]string, 0)
// }
// return strings.Split(string(buffer), "\n")
// }
func (m *LustreCollector) Init(config json.RawMessage) error {
var err error
m.name = "LustreCollector"
@@ -92,63 +38,27 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
m.setup()
m.tags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "Lustre"}
defmatches := map[string]map[string]int{
"read_bytes": {"lustre_read_bytes": 6, "lustre_read_requests": 1},
"write_bytes": {"lustre_write_bytes": 6, "lustre_write_requests": 1},
"open": {"lustre_open": 1},
"close": {"lustre_close": 1},
"setattr": {"lustre_setattr": 1},
"getattr": {"lustre_getattr": 1},
"statfs": {"lustre_statfs": 1},
"inode_permission": {"lustre_inode_permission": 1}}
// Lustre file system statistics can only be queried by user root
user, err := user.Current()
if err != nil {
cclog.ComponentError(m.name, "Failed to get current user:", err.Error())
return err
}
if user.Uid != "0" {
cclog.ComponentError(m.name, "Lustre file system statistics can only be queried by user root:", err.Error())
return err
}
m.matches = make(map[string]map[string]int)
for lineprefix, names := range defmatches {
for metricname, offset := range names {
_, skip := stringArrayContains(m.config.ExcludeMetrics, metricname)
if skip {
continue
}
if _, prefixExist := m.matches[lineprefix]; !prefixExist {
m.matches[lineprefix] = make(map[string]int)
}
if _, metricExist := m.matches[lineprefix][metricname]; !metricExist {
m.matches[lineprefix][metricname] = offset
}
m.matches = map[string]map[string]int{"read_bytes": {"read_bytes": 6, "read_requests": 1},
"write_bytes": {"write_bytes": 6, "write_requests": 1},
"open": {"open": 1},
"close": {"close": 1},
"setattr": {"setattr": 1},
"getattr": {"getattr": 1},
"statfs": {"statfs": 1},
"inode_permission": {"inode_permission": 1}}
m.devices = make([]string, 0)
for _, p := range m.config.Procfiles {
_, err := ioutil.ReadFile(p)
if err == nil {
m.devices = append(m.devices, p)
} else {
log.Print(err.Error())
continue
}
}
p, err := exec.LookPath(m.config.LCtlCommand)
if err != nil {
p, err = exec.LookPath(LCTL_CMD)
if err != nil {
return err
}
}
m.lctl = p
devices := m.getDevices()
if len(devices) == 0 {
return errors.New("no metrics to collect")
}
m.stats = make(map[string]map[string]int64)
for _, d := range devices {
m.stats[d] = make(map[string]int64)
for _, names := range m.matches {
for metricname := range names {
m.stats[d][metricname] = 0
}
}
if len(m.devices) == 0 {
return errors.New("No metrics to collect")
}
m.init = true
return nil
@@ -158,53 +68,39 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric)
if !m.init {
return
}
for device, devData := range m.stats {
stats := m.getDeviceDataCommand(device)
processed := []string{}
for _, p := range m.devices {
buffer, err := ioutil.ReadFile(p)
for _, line := range stats {
if err != nil {
log.Print(err)
return
}
for _, line := range strings.Split(string(buffer), "\n") {
lf := strings.Fields(line)
if len(lf) > 1 {
if fields, ok := m.matches[lf[0]]; ok {
for name, idx := range fields {
x, err := strconv.ParseInt(lf[idx], 0, 64)
if err != nil {
continue
}
value := x - devData[name]
devData[name] = x
if value < 0 {
value = 0
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
y.AddTag("device", device)
if strings.Contains(name, "byte") {
y.AddMeta("unit", "Byte")
for match, fields := range m.matches {
if lf[0] == match {
for name, idx := range fields {
_, skip := stringArrayContains(m.config.ExcludeMetrics, name)
if skip {
continue
}
output <- y
if m.config.SendAllMetrics {
processed = append(processed, name)
x, err := strconv.ParseInt(lf[idx], 0, 64)
if err == nil {
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, time.Now())
if err == nil {
if strings.Contains(name, "byte") {
y.AddMeta("unit", "Byte")
}
output <- y
}
}
}
}
}
}
}
if m.config.SendAllMetrics {
for name := range devData {
if _, done := stringArrayContains(processed, name); !done {
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": 0}, time.Now())
if err == nil {
y.AddTag("device", device)
if strings.Contains(name, "byte") {
y.AddMeta("unit", "Byte")
}
output <- y
}
}
}
}
}
}

View File

@@ -10,6 +10,7 @@ import (
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type MetricCollector interface {
@@ -114,6 +115,24 @@ func CpuList() []int {
return cpulist
}
// Tags2Map stores a InfluxDB list of tags in a map of key value pairs
func Tags2Map(metric influx.Metric) map[string]string {
tags := make(map[string]string)
for _, t := range metric.TagList() {
tags[t.Key] = t.Value
}
return tags
}
// Fields2Map stores a InfluxDB list of fields in a map of key value pairs
func Fields2Map(metric influx.Metric) map[string]interface{} {
fields := make(map[string]interface{})
for _, f := range metric.FieldList() {
fields[f.Key] = f.Value
}
return fields
}
// RemoveFromStringList removes the string r from the array of strings s
// If r is not contained in the array an error is returned
func RemoveFromStringList(s []string, r string) ([]string, error) {

View File

@@ -1,138 +1,92 @@
package collectors
import (
"bufio"
"encoding/json"
"errors"
"os"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const NETSTATFILE = `/proc/net/dev`
type NetstatCollectorConfig struct {
IncludeDevices []string `json:"include_devices"`
}
type NetstatCollectorMetric struct {
index int
lastValue float64
ExcludeDevices []string `json:"exclude_devices"`
}
type NetstatCollector struct {
metricCollector
config NetstatCollectorConfig
matches map[string]map[string]NetstatCollectorMetric
devtags map[string]map[string]string
lastTimestamp time.Time
config NetstatCollectorConfig
matches map[int]string
}
func (m *NetstatCollector) Init(config json.RawMessage) error {
m.name = "NetstatCollector"
m.setup()
m.lastTimestamp = time.Now()
m.meta = map[string]string{"source": m.name, "group": "Network"}
m.devtags = make(map[string]map[string]string)
nameIndexMap := map[string]int{
"net_bytes_in": 1,
"net_pkts_in": 2,
"net_bytes_out": 9,
"net_pkts_out": 10,
m.meta = map[string]string{"source": m.name, "group": "Memory"}
m.matches = map[int]string{
1: "bytes_in",
9: "bytes_out",
2: "pkts_in",
10: "pkts_out",
}
m.matches = make(map[string]map[string]NetstatCollectorMetric)
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
log.Print(err.Error())
return err
}
}
file, err := os.Open(string(NETSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
_, err := ioutil.ReadFile(string(NETSTATFILE))
if err == nil {
m.init = true
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
l := scanner.Text()
if !strings.Contains(l, ":") {
continue
}
f := strings.Fields(l)
dev := strings.Trim(f[0], ": ")
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
m.matches[dev] = make(map[string]NetstatCollectorMetric)
for name, idx := range nameIndexMap {
m.matches[dev][name] = NetstatCollectorMetric{
index: idx,
lastValue: 0,
}
}
m.devtags[dev] = map[string]string{"device": dev, "type": "node"}
}
}
if len(m.devtags) == 0 {
return errors.New("no devices to collector metrics found")
}
m.init = true
return nil
}
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
now := time.Now()
file, err := os.Open(string(NETSTATFILE))
data, err := ioutil.ReadFile(string(NETSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
log.Print(err.Error())
return
}
defer file.Close()
tdiff := now.Sub(m.lastTimestamp)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
l := scanner.Text()
lines := strings.Split(string(data), "\n")
for _, l := range lines {
if !strings.Contains(l, ":") {
continue
}
f := strings.Fields(l)
dev := strings.Trim(f[0], ":")
if devmetrics, ok := m.matches[dev]; ok {
for name, data := range devmetrics {
v, err := strconv.ParseFloat(f[data.index], 64)
dev := f[0][0 : len(f[0])-1]
cont := false
for _, d := range m.config.ExcludeDevices {
if d == dev {
cont = true
}
}
if cont {
continue
}
tags := map[string]string{"device": dev, "type": "node"}
for i, name := range m.matches {
v, err := strconv.ParseInt(f[i], 10, 0)
if err == nil {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": int(float64(v) * 1.0e-3)}, time.Now())
if err == nil {
vdiff := v - data.lastValue
value := vdiff / tdiff.Seconds()
if data.lastValue == 0 {
value = 0
switch {
case strings.Contains(name, "byte"):
y.AddMeta("unit", "Byte")
case strings.Contains(name, "pkt"):
y.AddMeta("unit", "Packets")
}
data.lastValue = v
y, err := lp.New(name, m.devtags[dev], m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
switch {
case strings.Contains(name, "byte"):
y.AddMeta("unit", "bytes/sec")
case strings.Contains(name, "pkt"):
y.AddMeta("unit", "packets/sec")
}
output <- y
}
devmetrics[name] = data
output <- y
}
}
}
}
m.lastTimestamp = time.Now()
}
func (m *NetstatCollector) Close() {

View File

@@ -3,19 +3,19 @@
```json
"netstat": {
"include_devices": [
"eth0"
"exclude_devices": [
"lo"
]
}
```
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list.
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. If a device is not required, it can be excluded from forwarding it to the sink. Commonly the `lo` device should be excluded.
Metrics:
* `net_bytes_in` (`unit=bytes/sec`)
* `net_bytes_out` (`unit=bytes/sec`)
* `net_pkts_in` (`unit=packets/sec`)
* `net_pkts_out` (`unit=packets/sec`)
* `bytes_in`
* `bytes_out`
* `pkts_in`
* `pkts_out`
The device name is added as tag `device`.

View File

@@ -1,39 +0,0 @@
## `nfs3stat` collector
```json
"nfs3stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs3_total"
]
}
```
The `nfs3stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs3_total`
* `nfs3_null`
* `nfs3_getattr`
* `nfs3_setattr`
* `nfs3_lookup`
* `nfs3_access`
* `nfs3_readlink`
* `nfs3_read`
* `nfs3_write`
* `nfs3_create`
* `nfs3_mkdir`
* `nfs3_symlink`
* `nfs3_remove`
* `nfs3_rmdir`
* `nfs3_rename`
* `nfs3_link`
* `nfs3_readdir`
* `nfs3_readdirplus`
* `nfs3_fsstat`
* `nfs3_fsinfo`
* `nfs3_pathconf`
* `nfs3_commit`

View File

@@ -1,62 +0,0 @@
## `nfs4stat` collector
```json
"nfs4stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs4_total"
]
}
```
The `nfs4stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs4_total`
* `nfs4_null`
* `nfs4_read`
* `nfs4_write`
* `nfs4_commit`
* `nfs4_open`
* `nfs4_open_conf`
* `nfs4_open_noat`
* `nfs4_open_dgrd`
* `nfs4_close`
* `nfs4_setattr`
* `nfs4_fsinfo`
* `nfs4_renew`
* `nfs4_setclntid`
* `nfs4_confirm`
* `nfs4_lock`
* `nfs4_lockt`
* `nfs4_locku`
* `nfs4_access`
* `nfs4_getattr`
* `nfs4_lookup`
* `nfs4_lookup_root`
* `nfs4_remove`
* `nfs4_rename`
* `nfs4_link`
* `nfs4_symlink`
* `nfs4_create`
* `nfs4_pathconf`
* `nfs4_statfs`
* `nfs4_readlink`
* `nfs4_readdir`
* `nfs4_server_caps`
* `nfs4_delegreturn`
* `nfs4_getacl`
* `nfs4_setacl`
* `nfs4_rel_lkowner`
* `nfs4_exchange_id`
* `nfs4_create_session`
* `nfs4_destroy_session`
* `nfs4_sequence`
* `nfs4_get_lease_time`
* `nfs4_reclaim_comp`
* `nfs4_secinfo_no`
* `nfs4_bind_conn_to_ses`

View File

@@ -14,29 +14,23 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// First part contains the code for the general NfsCollector.
// Later, the general NfsCollector is more limited to Nfs3- and Nfs4Collector.
const NFSSTAT_EXEC = `nfsstat`
type NfsCollectorData struct {
current int64
last int64
}
type nfsCollector struct {
type NfsCollector struct {
metricCollector
tags map[string]string
version string
config struct {
Nfsstats string `json:"nfsstat"`
tags map[string]string
config struct {
Nfsutils string `json:"nfsutils"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
data map[string]NfsCollectorData
data map[string]map[string]NfsCollectorData
}
func (m *nfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`)
func (m *NfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@@ -45,16 +39,17 @@ func (m *nfsCollector) initStats() error {
if len(lf) != 5 {
continue
}
if lf[1] == m.version {
name := strings.Trim(lf[3], ":")
if _, exist := m.data[name]; !exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[name]
x.current = value
x.last = 0
m.data[name] = x
}
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
name := strings.Trim(lf[3], ":")
if _, exist := m.data[lf[1]][name]; !exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[lf[1]][name]
x.current = value
x.last = 0
m.data[lf[1]][name] = x
}
}
}
@@ -62,8 +57,8 @@ func (m *nfsCollector) initStats() error {
return err
}
func (m *nfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`)
func (m *NfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@@ -72,16 +67,17 @@ func (m *nfsCollector) updateStats() error {
if len(lf) != 5 {
continue
}
if lf[1] == m.version {
name := strings.Trim(lf[3], ":")
if _, exist := m.data[name]; exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[name]
x.last = x.current
x.current = value
m.data[name] = x
}
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
name := strings.Trim(lf[3], ":")
if _, exist := m.data[lf[1]][name]; exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[lf[1]][name]
x.last = x.current
x.current = value
m.data[lf[1]][name] = x
}
}
}
@@ -89,11 +85,17 @@ func (m *nfsCollector) updateStats() error {
return err
}
func (m *nfsCollector) MainInit(config json.RawMessage) error {
m.config.Nfsstats = string(NFSSTAT_EXEC)
func (m *NfsCollector) Init(config json.RawMessage) error {
var err error
m.name = "NfsCollector"
m.setup()
// Set default mmpmon binary
m.config.Nfsutils = "/usr/sbin/nfsstat"
// Read JSON configuration
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
err = json.Unmarshal(config, &m.config)
if err != nil {
log.Print(err.Error())
return err
@@ -106,69 +108,40 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
m.tags = map[string]string{
"type": "node",
}
// Check if nfsstat is in executable search path
_, err := exec.LookPath(m.config.Nfsstats)
// Check if mmpmon is in executable search path
_, err = exec.LookPath(m.config.Nfsutils)
if err != nil {
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsstats, err)
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsutils, err)
}
m.data = make(map[string]NfsCollectorData)
m.data = make(map[string]map[string]NfsCollectorData)
m.initStats()
m.init = true
return nil
}
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
func (m *NfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
timestamp := time.Now()
m.updateStats()
prefix := ""
switch m.version {
case "v3":
prefix = "nfs3"
case "v4":
prefix = "nfs4"
default:
prefix = "nfs"
}
for name, data := range m.data {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, name); skip {
continue
}
value := data.current - data.last
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddMeta("version", m.version)
output <- y
for version, metrics := range m.data {
for name, data := range metrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, name); skip {
continue
}
value := data.current - data.last
y, err := lp.New(fmt.Sprintf("nfs_%s", name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddMeta("version", version)
output <- y
}
}
}
}
func (m *nfsCollector) Close() {
func (m *NfsCollector) Close() {
m.init = false
}
type Nfs3Collector struct {
nfsCollector
}
type Nfs4Collector struct {
nfsCollector
}
func (m *Nfs3Collector) Init(config json.RawMessage) error {
m.name = "Nfs3Collector"
m.version = `v3`
m.setup()
return m.MainInit(config)
}
func (m *Nfs4Collector) Init(config json.RawMessage) error {
m.name = "Nfs4Collector"
m.version = `v4`
m.setup()
return m.MainInit(config)
}

View File

@@ -1,139 +0,0 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
// Numa policy hit/miss statistics
//
// numa_hit:
// A process wanted to allocate memory from this node, and succeeded.
// numa_miss:
// A process wanted to allocate memory from another node,
// but ended up with memory from this node.
// numa_foreign:
// A process wanted to allocate on this node,
// but ended up with memory from another node.
// local_node:
// A process ran on this node's CPU,
// and got memory from this node.
// other_node:
// A process ran on a different node's CPU
// and got memory from this node.
// interleave_hit:
// Interleaving wanted to allocate from this node
// and succeeded.
//
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
//
type NUMAStatsCollectorTopolgy struct {
file string
tagSet map[string]string
}
type NUMAStatsCollector struct {
metricCollector
topology []NUMAStatsCollectorTopolgy
}
func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "NUMAStatsCollector"
m.setup()
m.meta = map[string]string{
"source": m.name,
"group": "NUMA",
}
// Loop for all NUMA node directories
base := "/sys/devices/system/node/node"
globPattern := base + "[0-9]*"
dirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s'", globPattern)
}
if dirs == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
}
m.topology = make([]NUMAStatsCollectorTopolgy, 0, len(dirs))
for _, dir := range dirs {
node := strings.TrimPrefix(dir, base)
file := filepath.Join(dir, "numastat")
m.topology = append(m.topology,
NUMAStatsCollectorTopolgy{
file: file,
tagSet: map[string]string{"memoryDomain": node},
})
}
m.init = true
return nil
}
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
for i := range m.topology {
// Loop for all NUMA domains
t := &m.topology[i]
now := time.Now()
file, err := os.Open(t.file)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to open file '%s': %v", t.file, err))
return
}
scanner := bufio.NewScanner(file)
// Read line by line
for scanner.Scan() {
split := strings.Fields(scanner.Text())
if len(split) != 2 {
continue
}
key := split[0]
value, err := strconv.ParseInt(split[1], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
continue
}
y, err := lp.New(
"numastats_"+key,
t.tagSet,
m.meta,
map[string]interface{}{"value": value},
now,
)
if err == nil {
output <- y
}
}
file.Close()
}
}
func (m *NUMAStatsCollector) Close() {
m.init = false
}

View File

@@ -1,15 +0,0 @@
## `numastat` collector
```json
"numastat": {}
```
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
Metrics:
* `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded.
* `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node.
* `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node.
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"log"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/nvml"
)
@@ -15,20 +13,12 @@ import (
type NvidiaCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeDevices []string `json:"exclude_devices,omitempty"`
AddPciInfoTag bool `json:"add_pci_info_tag,omitempty"`
}
type NvidiaCollectorDevice struct {
device nvml.Device
excludeMetrics map[string]bool
tags map[string]string
}
type NvidiaCollector struct {
metricCollector
num_gpus int
config NvidiaCollectorConfig
gpus []NvidiaCollectorDevice
}
func (m *NvidiaCollector) CatchPanic() {
@@ -41,86 +31,26 @@ func (m *NvidiaCollector) CatchPanic() {
func (m *NvidiaCollector) Init(config json.RawMessage) error {
var err error
m.name = "NvidiaCollector"
m.config.AddPciInfoTag = false
m.setup()
m.meta = map[string]string{"source": m.name, "group": "Nvidia"}
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "Nvidia",
}
m.num_gpus = 0
defer m.CatchPanic()
// Initialize NVIDIA Management Library (NVML)
ret := nvml.Init()
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to initialize NVML", err.Error())
return err
}
// Number of NVIDIA GPUs
num_gpus, ret := nvml.DeviceGetCount()
m.num_gpus, ret = nvml.DeviceGetCount()
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get device count", err.Error())
return err
}
// For all GPUs
m.gpus = make([]NvidiaCollectorDevice, num_gpus)
for i := 0; i < num_gpus; i++ {
g := &m.gpus[i]
// Skip excluded devices
str_i := fmt.Sprintf("%d", i)
if _, skip := stringArrayContains(m.config.ExcludeDevices, str_i); skip {
continue
}
// Get device handle
device, ret := nvml.DeviceGetHandleByIndex(i)
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get device at index", i, ":", err.Error())
return err
}
g.device = device
// Add tags
g.tags = map[string]string{
"type": "accelerator",
"type-id": str_i,
}
// Add excluded metrics
g.excludeMetrics = map[string]bool{}
for _, e := range m.config.ExcludeMetrics {
g.excludeMetrics[e] = true
}
// Add PCI info as tag
if m.config.AddPciInfoTag {
pciInfo, ret := nvml.DeviceGetPciInfo(g.device)
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get PCI info for device at index", i, ":", err.Error())
return err
}
g.tags["pci_identifier"] = fmt.Sprintf(
"%08X:%02X:%02X.0",
pciInfo.Domain,
pciInfo.Bus,
pciInfo.Device)
}
}
m.init = true
return nil
}
@@ -129,335 +59,207 @@ func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMetric)
if !m.init {
return
}
for i := 0; i < m.num_gpus; i++ {
device, ret := nvml.DeviceGetHandleByIndex(i)
if ret != nvml.SUCCESS {
log.Fatalf("Unable to get device at index %d: %v", i, nvml.ErrorString(ret))
return
}
_, skip := stringArrayContains(m.config.ExcludeDevices, fmt.Sprintf("%d", i))
if skip {
continue
}
tags := map[string]string{"type": "accelerator", "type-id": fmt.Sprintf("%d", i)}
for i := range m.gpus {
device := &m.gpus[i]
if !device.excludeMetrics["nv_util"] || !device.excludeMetrics["nv_mem_util"] {
// Retrieves the current utilization rates for the device's major subsystems.
//
// Available utilization rates
// * Gpu: Percent of time over the past sample period during which one or more kernels was executing on the GPU.
// * Memory: Percent of time over the past sample period during which global (device) memory was being read or written
//
// Note:
// * During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings.
// This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization.
// * On MIG-enabled GPUs, querying device utilization rates is not currently supported.
util, ret := nvml.DeviceGetUtilizationRates(device.device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_util"] {
y, err := lp.New("nv_util", device.tags, m.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
}
if !device.excludeMetrics["nv_mem_util"] {
y, err := lp.New("nv_mem_util", device.tags, m.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
}
util, ret := nvml.DeviceGetUtilizationRates(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_util")
y, err := lp.New("nv_util", tags, m.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
if err == nil && !skip {
output <- y
}
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_util")
y, err = lp.New("nv_mem_util", tags, m.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_mem_total"] || !device.excludeMetrics["nv_fb_memory"] {
// Retrieves the amount of used, free and total memory available on the device, in bytes.
//
// Enabling ECC reduces the amount of total available memory, due to the extra required parity bits.
//
// The reported amount of used memory is equal to the sum of memory allocated by all active channels on the device.
//
// Available memory info:
// * Free: Unallocated FB memory (in bytes).
// * Total: Total installed FB memory (in bytes).
// * Used: Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping.
//
// Note:
// In MIG mode, if device handle is provided, the API returns aggregate information, only if the caller has appropriate privileges.
// Per-instance information can be queried by using specific MIG device handles.
meminfo, ret := nvml.DeviceGetMemoryInfo(device.device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_mem_total"] {
t := float64(meminfo.Total) / (1024 * 1024)
y, err := lp.New("nv_mem_total", device.tags, m.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
}
}
if !device.excludeMetrics["nv_fb_memory"] {
f := float64(meminfo.Used) / (1024 * 1024)
y, err := lp.New("nv_fb_memory", device.tags, m.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
}
}
meminfo, ret := nvml.DeviceGetMemoryInfo(device)
if ret == nvml.SUCCESS {
t := float64(meminfo.Total) / (1024 * 1024)
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_total")
y, err := lp.New("nv_mem_total", tags, m.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "MByte")
output <- y
}
f := float64(meminfo.Used) / (1024 * 1024)
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_fb_memory")
y, err = lp.New("nv_fb_memory", tags, m.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "MByte")
output <- y
}
}
if !device.excludeMetrics["nv_temp"] {
// Retrieves the current temperature readings for the device, in degrees C.
//
// Available temperature sensors:
// * TEMPERATURE_GPU: Temperature sensor for the GPU die.
// * NVML_TEMPERATURE_COUNT
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_temp", device.tags, m.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
if err == nil {
y.AddMeta("unit", "degC")
output <- y
}
temp, ret := nvml.DeviceGetTemperature(device, nvml.TEMPERATURE_GPU)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_temp")
y, err := lp.New("nv_temp", tags, m.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "degC")
output <- y
}
}
if !device.excludeMetrics["nv_fan"] {
// Retrieves the intended operating speed of the device's fan.
//
// Note: The reported speed is the intended fan speed.
// If the fan is physically blocked and unable to spin, the output will not match the actual fan speed.
//
// For all discrete products with dedicated fans.
//
// The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
// This value may exceed 100% in certain cases.
fan, ret := nvml.DeviceGetFanSpeed(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_fan", device.tags, m.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
fan, ret := nvml.DeviceGetFanSpeed(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_fan")
y, err := lp.New("nv_fan", tags, m.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_ecc_mode"] {
// Retrieves the current and pending ECC modes for the device.
//
// For Fermi or newer fully supported devices. Only applicable to devices with ECC.
// Requires NVML_INFOROM_ECC version 1.0 or higher.
//
// Changing ECC modes requires a reboot.
// The "pending" ECC mode refers to the target mode following the next reboot.
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
if ret == nvml.SUCCESS {
var y lp.CCMetric
var err error
switch ecc_pend {
case nvml.FEATURE_DISABLED:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "OFF"}, time.Now())
case nvml.FEATURE_ENABLED:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "ON"}, time.Now())
default:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
}
if err == nil {
output <- y
}
} else if ret == nvml.ERROR_NOT_SUPPORTED {
y, err := lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "N/A"}, time.Now())
if err == nil {
output <- y
}
_, ecc_pend, ret := nvml.DeviceGetEccMode(device)
if ret == nvml.SUCCESS {
var y lp.CCMetric
var err error
switch ecc_pend {
case nvml.FEATURE_DISABLED:
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("OFF")}, time.Now())
case nvml.FEATURE_ENABLED:
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("ON")}, time.Now())
default:
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("UNKNOWN")}, time.Now())
}
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_mode")
if err == nil && !skip {
output <- y
}
} else if ret == nvml.ERROR_NOT_SUPPORTED {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_mode")
y, err := lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("N/A")}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_perf_state"] {
// Retrieves the current performance state for the device.
//
// Allowed PStates:
// 0: Maximum Performance.
// ..
// 15: Minimum Performance.
// 32: Unknown performance state.
pState, ret := nvml.DeviceGetPerformanceState(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_perf_state", device.tags, m.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
if err == nil {
output <- y
}
pstate, ret := nvml.DeviceGetPerformanceState(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_perf_state")
y, err := lp.New("nv_perf_state", tags, m.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pstate))}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_power_usage_report"] {
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
//
// On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
//
// It is only available if power management mode is supported
power, ret := nvml.DeviceGetPowerUsage(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_power_usage_report", device.tags, m.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
output <- y
}
power, ret := nvml.DeviceGetPowerUsage(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_power_usage_report")
y, err := lp.New("nv_power_usage_report", tags, m.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
if err == nil && !skip {
output <- y
}
}
// Retrieves the current clock speeds for the device.
//
// Available clock information:
// * CLOCK_GRAPHICS: Graphics clock domain.
// * CLOCK_SM: Streaming Multiprocessor clock domain.
// * CLOCK_MEM: Memory clock domain.
if !device.excludeMetrics["nv_graphics_clock_report"] {
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_graphics_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
gclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_graphics_clock_report")
y, err := lp.New("nv_graphics_clock_report", tags, m.meta, map[string]interface{}{"value": float64(gclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_sm_clock_report"] {
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_sm_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
smclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_sm_clock_report")
y, err := lp.New("nv_sm_clock_report", tags, m.meta, map[string]interface{}{"value": float64(smclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_mem_clock_report"] {
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_mem_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
memclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_clock_report")
y, err := lp.New("nv_mem_clock_report", tags, m.meta, map[string]interface{}{"value": float64(memclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
// Retrieves the maximum clock speeds for the device.
//
// Available clock information:
// * CLOCK_GRAPHICS: Graphics clock domain.
// * CLOCK_SM: Streaming multiprocessor clock domain.
// * CLOCK_MEM: Memory clock domain.
// * CLOCK_VIDEO: Video encoder/decoder clock domain.
// * CLOCK_COUNT: Count of clock types.
//
// Note:
/// On GPUs from Fermi family current P0 clocks (reported by nvmlDeviceGetClockInfo) can differ from max clocks by few MHz.
if !device.excludeMetrics["nv_max_graphics_clock"] {
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_graphics_clock", device.tags, m.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_graphics_clock")
y, err := lp.New("nv_max_graphics_clock", tags, m.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_max_sm_clock"] {
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_sm_clock", device.tags, m.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
max_smclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_sm_clock")
y, err := lp.New("nv_max_sm_clock", tags, m.meta, map[string]interface{}{"value": float64(max_smclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_max_mem_clock"] {
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_mem_clock", device.tags, m.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
}
max_memclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_mem_clock")
y, err := lp.New("nv_max_mem_clock", tags, m.meta, map[string]interface{}{"value": float64(max_memclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_ecc_db_error"] {
// Retrieves the total ECC error counts for the device.
//
// For Fermi or newer fully supported devices.
// Only applicable to devices with ECC.
// Requires NVML_INFOROM_ECC version 1.0 or higher.
// Requires ECC Mode to be enabled.
//
// The total error count is the sum of errors across each of the separate memory systems,
// i.e. the total set of errors across the entire device.
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_ecc_db_error", device.tags, m.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
if err == nil {
output <- y
}
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device, 1, 1)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_db_error")
y, err := lp.New("nv_ecc_db_error", tags, m.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_ecc_sb_error"] {
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_ecc_sb_error", device.tags, m.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
if err == nil {
output <- y
}
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device, 0, 1)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_sb_error")
y, err := lp.New("nv_ecc_sb_error", tags, m.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_power_man_limit"] {
// Retrieves the power management limit associated with this device.
//
// For Fermi or newer fully supported devices.
//
// The power limit defines the upper boundary for the card's power draw.
// If the card's total power draw reaches this limit the power management algorithm kicks in.
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_power_man_limit", device.tags, m.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
output <- y
}
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_power_man_limit")
y, err := lp.New("nv_power_man_limit", tags, m.meta, map[string]interface{}{"value": float64(pwr_limit)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_encoder_util"] {
// Retrieves the current utilization and sampling size in microseconds for the Encoder
//
// For Kepler or newer fully supported devices.
//
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_encoder_util", device.tags, m.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_encoder_util")
y, err := lp.New("nv_encoder_util", tags, m.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_decoder_util"] {
// Retrieves the current utilization and sampling size in microseconds for the Decoder
//
// For Kepler or newer fully supported devices.
//
// Note: On MIG-enabled GPUs, querying decoder utilization is not currently supported.
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_decoder_util", device.tags, m.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device)
if ret == nvml.SUCCESS {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_decoder_util")
y, err := lp.New("nv_decoder_util", tags, m.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}

View File

@@ -4,227 +4,110 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
// /sys/class/hwmon/hwmon*/name -> coretemp
// /sys/class/hwmon/hwmon*/temp*_label -> Core 0
// /sys/class/hwmon/hwmon*/temp*_input -> 27800 = 27.8°C
// /sys/class/hwmon/hwmon*/temp*_max -> 86000 = 86.0°C
// /sys/class/hwmon/hwmon*/temp*_crit -> 100000 = 100.0°C
const HWMON_PATH = `/sys/class/hwmon`
type TempCollectorSensor struct {
name string
label string
metricName string // Default: name_label
file string
maxTempName string
maxTemp int64
critTempName string
critTemp int64
tags map[string]string
type TempCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics"`
TagOverride map[string]map[string]string `json:"tag_override"`
}
type TempCollector struct {
metricCollector
config struct {
ExcludeMetrics []string `json:"exclude_metrics"`
TagOverride map[string]map[string]string `json:"tag_override"`
ReportMaxTemp bool `json:"report_max_temperature"`
ReportCriticalTemp bool `json:"report_critical_temperature"`
}
sensors []*TempCollectorSensor
config TempCollectorConfig
}
func (m *TempCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "TempCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "IPMI", "unit": "degC"}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "IPMI",
"unit": "degC",
}
m.sensors = make([]*TempCollectorSensor, 0)
// Find all temperature sensor files
globPattern := filepath.Join("/sys/class/hwmon", "*", "temp*_input")
inputFiles, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("Unable to glob files with pattern '%s': %v", globPattern, err)
}
if inputFiles == nil {
return fmt.Errorf("Unable to find any files with pattern '%s'", globPattern)
}
// Get sensor name for each temperature sensor file
for _, file := range inputFiles {
sensor := new(TempCollectorSensor)
// sensor name
nameFile := filepath.Join(filepath.Dir(file), "name")
name, err := ioutil.ReadFile(nameFile)
if err == nil {
sensor.name = strings.TrimSpace(string(name))
}
// sensor label
labelFile := strings.TrimSuffix(file, "_input") + "_label"
label, err := ioutil.ReadFile(labelFile)
if err == nil {
sensor.label = strings.TrimSpace(string(label))
}
// sensor metric name
switch {
case len(sensor.name) == 0 && len(sensor.label) == 0:
continue
case sensor.name == "coretemp" && strings.HasPrefix(sensor.label, "Core ") ||
sensor.name == "coretemp" && strings.HasPrefix(sensor.label, "Package id "):
sensor.metricName = "temp_" + sensor.label
case len(sensor.name) != 0 && len(sensor.label) != 0:
sensor.metricName = sensor.name + "_" + sensor.label
case len(sensor.name) != 0:
sensor.metricName = sensor.name
case len(sensor.label) != 0:
sensor.metricName = sensor.label
}
sensor.metricName = strings.ToLower(sensor.metricName)
sensor.metricName = strings.Replace(sensor.metricName, " ", "_", -1)
// Add temperature prefix, if required
if !strings.Contains(sensor.metricName, "temp") {
sensor.metricName = "temp_" + sensor.metricName
}
// Sensor file
sensor.file = file
// Sensor tags
sensor.tags = map[string]string{
"type": "node",
}
// Apply tag override configuration
for key, newtags := range m.config.TagOverride {
if strings.Contains(sensor.file, key) {
sensor.tags = newtags
break
}
}
// max temperature
if m.config.ReportMaxTemp {
maxTempFile := strings.TrimSuffix(file, "_input") + "_max"
if buffer, err := ioutil.ReadFile(maxTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.maxTempName = strings.Replace(sensor.metricName, "temp", "max_temp", 1)
sensor.maxTemp = x / 1000
}
}
}
// critical temperature
if m.config.ReportCriticalTemp {
criticalTempFile := strings.TrimSuffix(file, "_input") + "_crit"
if buffer, err := ioutil.ReadFile(criticalTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.critTempName = strings.Replace(sensor.metricName, "temp", "crit_temp", 1)
sensor.critTemp = x / 1000
}
}
}
m.sensors = append(m.sensors, sensor)
}
// Empty sensors map
if len(m.sensors) == 0 {
return fmt.Errorf("No temperature sensors found")
}
// Finished initialization
m.init = true
return nil
}
func get_hwmon_sensors() (map[string]map[string]string, error) {
var folders []string
var sensors map[string]map[string]string
sensors = make(map[string]map[string]string)
err := filepath.Walk(HWMON_PATH, func(p string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
folders = append(folders, p)
return nil
})
if err != nil {
return sensors, err
}
for _, f := range folders {
sensors[f] = make(map[string]string)
myp := fmt.Sprintf("%s/", f)
err := filepath.Walk(myp, func(path string, info os.FileInfo, err error) error {
dir, fname := filepath.Split(path)
if strings.Contains(fname, "temp") && strings.Contains(fname, "_input") {
namefile := fmt.Sprintf("%s/%s", dir, strings.Replace(fname, "_input", "_label", -1))
name, ierr := ioutil.ReadFile(namefile)
if ierr == nil {
sensors[f][strings.Replace(string(name), "\n", "", -1)] = path
}
}
return nil
})
if err != nil {
continue
}
}
return sensors, nil
}
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
for _, sensor := range m.sensors {
// Read sensor file
buffer, err := ioutil.ReadFile(sensor.file)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", sensor.file, err))
continue
}
x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert temperature '%s' to int64: %v", buffer, err))
continue
}
x /= 1000
y, err := lp.New(
sensor.metricName,
sensor.tags,
m.meta,
map[string]interface{}{"value": x},
time.Now(),
)
if err == nil {
output <- y
}
// max temperature
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
y, err := lp.New(
sensor.maxTempName,
sensor.tags,
m.meta,
map[string]interface{}{"value": sensor.maxTemp},
time.Now(),
)
if err == nil {
output <- y
sensors, err := get_hwmon_sensors()
if err != nil {
return
}
for _, files := range sensors {
for name, file := range files {
tags := map[string]string{"type": "node"}
for key, newtags := range m.config.TagOverride {
if strings.Contains(file, key) {
tags = newtags
break
}
}
}
// critical temperature
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
y, err := lp.New(
sensor.critTempName,
sensor.tags,
m.meta,
map[string]interface{}{"value": sensor.critTemp},
time.Now(),
)
mname := strings.Replace(name, " ", "_", -1)
if !strings.Contains(mname, "temp") {
mname = fmt.Sprintf("temp_%s", mname)
}
buffer, err := ioutil.ReadFile(string(file))
if err != nil {
continue
}
x, err := strconv.ParseInt(strings.Replace(string(buffer), "\n", "", -1), 0, 64)
if err == nil {
output <- y
y, err := lp.New(strings.ToLower(mname), tags, m.meta, map[string]interface{}{"value": int(float64(x) / 1000)}, time.Now())
if err == nil {
cclog.ComponentDebug(m.name, y)
output <- y
}
}
}
}
}
func (m *TempCollector) Close() {

View File

@@ -8,7 +8,6 @@ import (
"os/exec"
"strings"
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)

1
go.mod
View File

@@ -14,6 +14,7 @@ require (
require (
github.com/PaesslerAG/gval v1.1.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/mattn/go-sqlite3 v1.14.11
github.com/nats-io/nats-server/v2 v2.7.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
)

2
go.sum
View File

@@ -54,6 +54,8 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ=
github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=

View File

@@ -2,194 +2,239 @@ package ccmetric
import (
"fmt"
"sort"
"time"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
write "github.com/influxdata/influxdb-client-go/v2/api/write"
lp "github.com/influxdata/line-protocol" // MIT license
)
// Most functions are derived from github.com/influxdata/line-protocol/metric.go
// The metric type is extended with an extra meta information list re-using the Tag
// type.
//
// See: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/
type ccMetric struct {
name string // Measurement name
meta map[string]string // map of meta data tags
tags map[string]string // map of of tags
fields map[string]interface{} // map of of fields
tm time.Time // timestamp
name string
tags []*lp.Tag
fields []*lp.Field
tm time.Time
meta []*lp.Tag
}
// ccMetric access functions
type CCMetric interface {
ToPoint(metaAsTags bool) *write.Point // Generate influxDB point for data type ccMetric
ToLineProtocol(metaAsTags bool) string // Generate influxDB line protocol for data type ccMetric
Name() string // Get metric name
SetName(name string) // Set metric name
Time() time.Time // Get timestamp
SetTime(t time.Time) // Set timestamp
Tags() map[string]string // Map of tags
AddTag(key, value string) // Add a tag
GetTag(key string) (value string, ok bool) // Get a tag by its key
HasTag(key string) (ok bool) // Check if a tag key is present
RemoveTag(key string) // Remove a tag by its key
Meta() map[string]string // Map of meta data tags
AddMeta(key, value string) // Add a meta data tag
GetMeta(key string) (value string, ok bool) // Get a meta data tab addressed by its key
HasMeta(key string) (ok bool) // Check if a meta data key is present
RemoveMeta(key string) // Remove a meta data tag by its key
Fields() map[string]interface{} // Map of fields
AddField(key string, value interface{}) // Add a field
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
HasField(key string) (ok bool) // Check if a field key is present
RemoveField(key string) // Remove a field addressed by its key
lp.MutableMetric
Name() string
AddTag(key, value string)
GetTag(key string) (string, bool)
HasTag(key string) bool
RemoveTag(key string)
Tags() map[string]string
TagList() []*lp.Tag
AddMeta(key, value string)
GetMeta(key string) (string, bool)
HasMeta(key string) bool
RemoveMeta(key string)
Meta() map[string]string
MetaList() []*lp.Tag
AddField(key string, value interface{})
GetField(key string) (interface{}, bool)
HasField(key string) bool
RemoveField(key string)
Fields() map[string]interface{}
FieldList() []*lp.Field
String() string
SetTime(t time.Time)
}
// String implements the stringer interface for data type ccMetric
func (m *ccMetric) String() string {
return fmt.Sprintf(
"Name: %s, Tags: %+v, Meta: %+v, fields: %+v, Timestamp: %d",
m.name, m.tags, m.meta, m.fields, m.tm.UnixNano(),
)
}
// ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToPoint(metaAsTags bool) (p *write.Point) {
if !metaAsTags {
p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)
} else {
tags := make(map[string]string, len(m.tags)+len(m.meta))
for key, value := range m.tags {
tags[key] = value
}
for key, value := range m.meta {
tags[key] = value
}
p = influxdb2.NewPoint(m.name, tags, m.fields, m.tm)
func (m *ccMetric) Meta() map[string]string {
meta := make(map[string]string, len(m.meta))
for _, m := range m.meta {
meta[m.Key] = m.Value
}
return
return meta
}
// ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToLineProtocol(metaAsTags bool) string {
return write.PointToLineProtocol(
m.ToPoint(metaAsTags),
time.Nanosecond,
)
func (m *ccMetric) MetaList() []*lp.Tag {
return m.meta
}
func (m *ccMetric) String() string {
return fmt.Sprintf("%s %v %v %v %d", m.name, m.Tags(), m.Meta(), m.Fields(), m.tm.UnixNano())
}
// Name returns the measurement name
func (m *ccMetric) Name() string {
return m.name
}
// SetName sets the measurement name
func (m *ccMetric) SetName(name string) {
m.name = name
func (m *ccMetric) Tags() map[string]string {
tags := make(map[string]string, len(m.tags))
for _, tag := range m.tags {
tags[tag.Key] = tag.Value
}
return tags
}
func (m *ccMetric) TagList() []*lp.Tag {
return m.tags
}
func (m *ccMetric) Fields() map[string]interface{} {
fields := make(map[string]interface{}, len(m.fields))
for _, field := range m.fields {
fields[field.Key] = field.Value
}
return fields
}
func (m *ccMetric) FieldList() []*lp.Field {
return m.fields
}
// Time returns timestamp
func (m *ccMetric) Time() time.Time {
return m.tm
}
// SetTime sets the timestamp
func (m *ccMetric) SetTime(t time.Time) {
m.tm = t
}
// Tags returns the the list of tags as key-value-mapping
func (m *ccMetric) Tags() map[string]string {
return m.tags
}
// AddTag adds a tag (consisting of key and value) to the map of tags
func (m *ccMetric) AddTag(key, value string) {
m.tags[key] = value
}
// GetTag returns the tag with tag's key equal to <key>
func (m *ccMetric) GetTag(key string) (string, bool) {
value, ok := m.tags[key]
return value, ok
}
// HasTag checks if a tag with key equal to <key> is present in the list of tags
func (m *ccMetric) HasTag(key string) bool {
_, ok := m.tags[key]
return ok
for _, tag := range m.tags {
if tag.Key == key {
return true
}
}
return false
}
func (m *ccMetric) GetTag(key string) (string, bool) {
for _, tag := range m.tags {
if tag.Key == key {
return tag.Value, true
}
}
return "", false
}
// RemoveTag removes the tag with tag's key equal to <key>
func (m *ccMetric) RemoveTag(key string) {
delete(m.tags, key)
for i, tag := range m.tags {
if tag.Key == key {
copy(m.tags[i:], m.tags[i+1:])
m.tags[len(m.tags)-1] = nil
m.tags = m.tags[:len(m.tags)-1]
return
}
}
}
// Meta returns the meta data tags as key-value mapping
func (m *ccMetric) Meta() map[string]string {
return m.meta
func (m *ccMetric) AddTag(key, value string) {
for i, tag := range m.tags {
if key > tag.Key {
continue
}
if key == tag.Key {
tag.Value = value
return
}
m.tags = append(m.tags, nil)
copy(m.tags[i+1:], m.tags[i:])
m.tags[i] = &lp.Tag{Key: key, Value: value}
return
}
m.tags = append(m.tags, &lp.Tag{Key: key, Value: value})
}
// AddMeta adds a meta data tag (consisting of key and value) to the map of meta data tags
func (m *ccMetric) AddMeta(key, value string) {
m.meta[key] = value
}
// GetMeta returns the meta data tag with meta data's key equal to <key>
func (m *ccMetric) GetMeta(key string) (string, bool) {
value, ok := m.meta[key]
return value, ok
}
// HasMeta checks if a meta data tag with meta data's key equal to <key> is present in the map of meta data tags
func (m *ccMetric) HasMeta(key string) bool {
_, ok := m.meta[key]
return ok
for _, tag := range m.meta {
if tag.Key == key {
return true
}
}
return false
}
func (m *ccMetric) GetMeta(key string) (string, bool) {
for _, tag := range m.meta {
if tag.Key == key {
return tag.Value, true
}
}
return "", false
}
// RemoveMeta removes the meta data tag with tag's key equal to <key>
func (m *ccMetric) RemoveMeta(key string) {
delete(m.meta, key)
for i, tag := range m.meta {
if tag.Key == key {
copy(m.meta[i:], m.meta[i+1:])
m.meta[len(m.meta)-1] = nil
m.meta = m.meta[:len(m.meta)-1]
return
}
}
}
// Fields returns the list of fields as key-value-mapping
func (m *ccMetric) Fields() map[string]interface{} {
return m.fields
func (m *ccMetric) AddMeta(key, value string) {
for i, tag := range m.meta {
if key > tag.Key {
continue
}
if key == tag.Key {
tag.Value = value
return
}
m.meta = append(m.meta, nil)
copy(m.meta[i+1:], m.meta[i:])
m.meta[i] = &lp.Tag{Key: key, Value: value}
return
}
m.meta = append(m.meta, &lp.Tag{Key: key, Value: value})
}
// AddField adds a field (consisting of key and value) to the map of fields
func (m *ccMetric) AddField(key string, value interface{}) {
m.fields[key] = value
for i, field := range m.fields {
if key == field.Key {
m.fields[i] = &lp.Field{Key: key, Value: convertField(value)}
return
}
}
m.fields = append(m.fields, &lp.Field{Key: key, Value: convertField(value)})
}
// GetField returns the field with field's key equal to <key>
func (m *ccMetric) GetField(key string) (interface{}, bool) {
v, ok := m.fields[key]
return v, ok
for _, field := range m.fields {
if field.Key == key {
return field.Value, true
}
}
return "", false
}
// HasField checks if a field with field's key equal to <key> is present in the map of fields
func (m *ccMetric) HasField(key string) bool {
_, ok := m.fields[key]
return ok
for _, field := range m.fields {
if field.Key == key {
return true
}
}
return false
}
// RemoveField removes the field with field's key equal to <key>
// from the map of fields
func (m *ccMetric) RemoveField(key string) {
delete(m.fields, key)
for i, field := range m.fields {
if field.Key == key {
copy(m.fields[i:], m.fields[i+1:])
m.fields[len(m.fields)-1] = nil
m.fields = m.fields[:len(m.fields)-1]
return
}
}
}
// New creates a new measurement point
func New(
name string,
tags map[string]string,
@@ -199,79 +244,85 @@ func New(
) (CCMetric, error) {
m := &ccMetric{
name: name,
tags: make(map[string]string, len(tags)),
meta: make(map[string]string, len(meta)),
fields: make(map[string]interface{}, len(fields)),
tags: nil,
fields: nil,
tm: tm,
meta: nil,
}
// deep copy tags, meta data tags and fields
for k, v := range tags {
m.tags[k] = v
}
for k, v := range meta {
m.meta[k] = v
}
for k, v := range fields {
v := convertField(v)
if v == nil {
continue
if len(tags) > 0 {
m.tags = make([]*lp.Tag, 0, len(tags))
for k, v := range tags {
m.tags = append(m.tags,
&lp.Tag{Key: k, Value: v})
}
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
}
if len(meta) > 0 {
m.meta = make([]*lp.Tag, 0, len(meta))
for k, v := range meta {
m.meta = append(m.meta,
&lp.Tag{Key: k, Value: v})
}
sort.Slice(m.meta, func(i, j int) bool { return m.meta[i].Key < m.meta[j].Key })
}
if len(fields) > 0 {
m.fields = make([]*lp.Field, 0, len(fields))
for k, v := range fields {
v := convertField(v)
if v == nil {
continue
}
m.AddField(k, v)
}
m.fields[k] = v
}
return m, nil
}
// FromMetric copies the metric <other>
func FromMetric(other ccMetric) CCMetric {
func FromMetric(other CCMetric) CCMetric {
m := &ccMetric{
name: other.Name(),
tags: make(map[string]string, len(other.tags)),
meta: make(map[string]string, len(other.meta)),
fields: make(map[string]interface{}, len(other.fields)),
tags: make([]*lp.Tag, len(other.TagList())),
fields: make([]*lp.Field, len(other.FieldList())),
meta: make([]*lp.Tag, len(other.MetaList())),
tm: other.Time(),
}
// deep copy tags, meta data tags and fields
for key, value := range other.tags {
m.tags[key] = value
for i, tag := range other.TagList() {
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
}
for key, value := range other.meta {
m.meta[key] = value
for i, s := range other.MetaList() {
m.meta[i] = &lp.Tag{Key: s.Key, Value: s.Value}
}
for key, value := range other.fields {
m.fields[key] = value
for i, field := range other.FieldList() {
m.fields[i] = &lp.Field{Key: field.Key, Value: field.Value}
}
return m
}
// FromInfluxMetric copies the influxDB line protocol metric <other>
func FromInfluxMetric(other lp.Metric) CCMetric {
m := &ccMetric{
name: other.Name(),
tags: make(map[string]string),
meta: make(map[string]string),
fields: make(map[string]interface{}),
tags: make([]*lp.Tag, len(other.TagList())),
fields: make([]*lp.Field, len(other.FieldList())),
meta: make([]*lp.Tag, 0),
tm: other.Time(),
}
// deep copy tags and fields
for _, otherTag := range other.TagList() {
m.tags[otherTag.Key] = otherTag.Value
for i, tag := range other.TagList() {
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
}
for _, otherField := range other.FieldList() {
m.fields[otherField.Key] = otherField.Value
for i, field := range other.FieldList() {
m.fields[i] = &lp.Field{Key: field.Key, Value: field.Value}
}
return m
}
// convertField converts data types of fields by the following schemata:
// *float32, *float64, float32, float64 -> float64
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
// *[]byte, *string, []byte, string -> string
// *bool, bool -> bool
func convertField(v interface{}) interface{} {
switch v := v.(type) {
case float64:

View File

@@ -6,17 +6,12 @@ import (
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
cclogger "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
)
const SYSFS_NUMABASE = `/sys/devices/system/node`
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
const PROCFS_CPUINFO = `/proc/cpuinfo`
// intArrayContains scans an array of ints if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
@@ -29,26 +24,20 @@ func intArrayContains(array []int, str int) (int, bool) {
return -1, false
}
func fileToInt(path string) int {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
var id int64
//_, err = fmt.Scanf("%d", sbuffer, &id)
id, err = strconv.ParseInt(sbuffer, 10, 32)
if err != nil {
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
return -1
}
return int(id)
}
// stringArrayContains scans an array of strings if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
// func stringArrayContains(array []string, str string) (int, bool) {
// for i, a := range array {
// if a == str {
// return i, true
// }
// }
// return -1, false
// }
func SocketList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
@@ -73,13 +62,13 @@ func SocketList() []int {
}
func CpuList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
cpulist := make([]int, 0)
var cpulist []int
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
@@ -97,81 +86,6 @@ func CpuList() []int {
return cpulist
}
func CoreList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
corelist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "core id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return corelist
}
_, found := intArrayContains(corelist, int(id))
if !found {
corelist = append(corelist, int(id))
}
}
}
return corelist
}
func NumaNodeList() []int {
numaList := make([]int, 0)
globPath := filepath.Join(string(SYSFS_NUMABASE), "node*")
regexPath := filepath.Join(string(SYSFS_NUMABASE), "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "NumaNodeList", err.Error())
}
for _, f := range files {
if !regex.MatchString(f) {
continue
}
finfo, err := os.Lstat(f)
if err != nil {
continue
}
if !finfo.IsDir() {
continue
}
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
if _, found := intArrayContains(numaList, id); !found {
numaList = append(numaList, id)
}
}
}
}
return numaList
}
func DieList() []int {
cpulist := CpuList()
dielist := make([]int, 0)
for _, c := range cpulist {
diepath := filepath.Join(string(SYSFS_CPUBASE), fmt.Sprintf("cpu%d", c), "topology/die_id")
dieid := fileToInt(diepath)
if dieid > 0 {
_, found := intArrayContains(dielist, int(dieid))
if !found {
dielist = append(dielist, int(dieid))
}
}
}
return dielist
}
type CpuEntry struct {
Cpuid int
SMT int
@@ -187,7 +101,7 @@ func CpuData() []CpuEntry {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
//cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
@@ -215,14 +129,14 @@ func CpuData() []CpuEntry {
getSMT := func(cpuid int, basepath string) int {
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/thread_siblings_list", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
log.Print(err)
}
threadlist := make([]int, 0)
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
for _, x := range strings.Split(sbuffer, ",") {
id, err := strconv.ParseInt(x, 10, 32)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
log.Print(err)
}
threadlist = append(threadlist, int(id))
}
@@ -235,22 +149,18 @@ func CpuData() []CpuEntry {
}
getNumaDomain := func(basepath string) int {
globPath := filepath.Join(basepath, "node*")
regexPath := filepath.Join(basepath, "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
files, err := filepath.Glob(fmt.Sprintf("%s/node*", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getNumaDomain", err.Error())
log.Print(err)
}
for _, f := range files {
finfo, err := os.Lstat(f)
if err == nil && finfo.IsDir() {
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
return id
}
if err == nil && (finfo.IsDir() || finfo.Mode()&os.ModeSymlink != 0) {
var id int
parts := strings.Split(f, "/")
_, err = fmt.Scanf("node%d", parts[len(parts)-1], &id)
if err == nil {
return id
}
}
}
@@ -267,24 +177,19 @@ func CpuData() []CpuEntry {
centry.Die = -1
centry.Core = -1
// Set base directory for topology lookup
cpustr := fmt.Sprintf("cpu%d", centry.Cpuid)
base := filepath.Join("/sys/devices/system/cpu", cpustr)
topoBase := filepath.Join(base, "topology")
base := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/topology", centry.Cpuid)
// Lookup CPU core id
centry.Core = getCore(topoBase)
centry.Core = getCore(base)
// Lookup CPU socket id
centry.Socket = getSocket(topoBase)
centry.Socket = getSocket(base)
// Lookup CPU die id
centry.Die = getDie(topoBase)
if centry.Die < 0 {
centry.Die = centry.Socket
}
centry.Die = getDie(base)
// Lookup SMT thread id
centry.SMT = getSMT(centry.Cpuid, topoBase)
centry.SMT = getSMT(centry.Cpuid, base)
// Lookup NUMA domain id
centry.Numadomain = getNumaDomain(base)
@@ -298,41 +203,35 @@ type CpuInformation struct {
SMTWidth int
NumSockets int
NumDies int
NumCores int
NumNumaDomains int
}
func CpuInfo() CpuInformation {
var c CpuInformation
smtList := make([]int, 0)
numaList := make([]int, 0)
dieList := make([]int, 0)
socketList := make([]int, 0)
coreList := make([]int, 0)
smt := 0
numa := 0
die := 0
socket := 0
cdata := CpuData()
for _, d := range cdata {
if _, ok := intArrayContains(smtList, d.SMT); !ok {
smtList = append(smtList, d.SMT)
if d.SMT > smt {
smt = d.SMT
}
if _, ok := intArrayContains(numaList, d.Numadomain); !ok {
numaList = append(numaList, d.Numadomain)
if d.Numadomain > numa {
numa = d.Numadomain
}
if _, ok := intArrayContains(dieList, d.Die); !ok {
dieList = append(dieList, d.Die)
if d.Die > die {
die = d.Die
}
if _, ok := intArrayContains(socketList, d.Socket); !ok {
socketList = append(socketList, d.Socket)
}
if _, ok := intArrayContains(coreList, d.Core); !ok {
coreList = append(coreList, d.Core)
if d.Socket > socket {
socket = d.Socket
}
}
c.NumNumaDomains = len(numaList)
c.SMTWidth = len(smtList)
c.NumDies = len(dieList)
c.NumCores = len(coreList)
c.NumSockets = len(socketList)
c.NumNumaDomains = numa + 1
c.SMTWidth = smt + 1
c.NumDies = die + 1
c.NumSockets = socket + 1
c.NumHWthreads = len(cdata)
return c
}
@@ -376,47 +275,3 @@ func GetCpuCore(cpuid int) int {
}
return -1
}
func GetSocketCpus(socket int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Socket == socket {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetNumaDomainCpus(domain int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Numadomain == domain {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetDieCpus(die int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Die == die {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetCoreCpus(core int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Core == core {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}

View File

@@ -1,38 +0,0 @@
# The MetricAggregator
In some cases, further combination of metrics or raw values is required. For that strings like `foo + 1` with runtime dependent `foo` need to be evaluated. The MetricAggregator relies on the [`gval`](https://github.com/PaesslerAG/gval) Golang package to perform all expression evaluation. The `gval` package provides the basic arithmetic operations but the MetricAggregator defines additional ones.
**Note**: To get an impression which expressions can be handled by `gval`, see its [README](https://github.com/PaesslerAG/gval/blob/master/README.md)
## Simple expression evaluation
For simple expression evaluation, the MetricAggregator provides two function for different use-cases:
- `EvalBoolCondition(expression string, params map[string]interface{}`: Used by the MetricRouter to match metrics like `metric.Name() == 'mymetric'`
- `EvalFloat64Condition(expression string, params map[string]interface{})`: Used by the MetricRouter and LikwidCollector to derive new values like `(PMC0+PMC1)/PMC3`
## MetricAggregator extensions for `gval`
The MetricAggregator provides these functions additional to the `Full` language in `gval`:
- `sum(array)`: Sum up values in an array like `sum(values)`
- `min(array)`: Get the minimum value in an array like `min(values)`
- `avg(array)`: Get the mean value in an array like `avg(values)`
- `mean(array)`: Get the mean value in an array like `mean(values)`
- `max(array)`: Get the maximum value in an array like `max(values)`
- `len(array)`: Get the length of an array like `len(values)`
- `median(array)`: Get the median value in an array like `mean(values)`
- `in`: Check existence in an array like `0 in getCpuList()` to check whether there is an entry `0`. Also substring matching works like `temp in metric.Name()`
- `match`: Regular-expression matching like `match('temp_cores_%d+', metric.Name())`. **Note** all `\` in an regex has to be replaced with `%`
- `getCpuCore(cpuid)`: For a CPU id, the the corresponding CPU core id like `getCpuCore(0)`
- `getCpuSocket(cpuid)`: For a CPU id, the the corresponding CPU socket id
- `getCpuNuma(cpuid)`: For a CPU id, the the corresponding NUMA domain id
- `getCpuDie(cpuid)`: For a CPU id, the the corresponding CPU die id
- `getSockCpuList(sockid)`: For a given CPU socket id, the list of CPU ids is returned like the CPUs on socket 1 `getSockCpuList(1)`
- `getNumaCpuList(numaid)`: For a given NUMA node id, the list of CPU ids is returned
- `getDieCpuList(dieid)`: For a given CPU die id, the list of CPU ids is returned
- `getCoreCpuList(coreid)`: For a given CPU core id, the list of CPU ids is returned
- `getCpuList`: Get the list of all CPUs
## Limitations
- Since the metrics are written in JSON files which do not allow `""` without proper escaping inside of JSON strings, you have to use `''` for strings.
- Since `\` is interpreted by JSON as escape character, it cannot be used in metrics. But it is required to write regular expressions. So instead of `/`, use `%` and the MetricAggregator replaces them after reading the JSON file.

View File

@@ -6,8 +6,6 @@ The CCMetric router sits in between the collectors and the sinks and can be used
```json
{
"num_cache_intervals" : 1,
"interval_timestamp" : true,
"add_tags" : [
{
"key" : "cluster",
@@ -27,58 +25,16 @@ The CCMetric router sits in between the collectors and the sinks and can be used
"if" : "*"
}
],
"interval_aggregates" : [
{
"name" : "temp_cores_avg",
"if" : "match('temp_core_%d+', metric.Name())",
"function" : "avg(values)",
"tags" : {
"type" : "node"
},
"meta" : {
"group": "IPMI",
"unit": "degC",
"source": "TempCollector"
}
}
],
"drop_metrics" : [
"not_interesting_metric_at_all"
],
"drop_metrics_if" : [
"match('temp_core_%d+', metric.Name())"
],
"rename_metrics" : {
"metric_12345" : "mymetric"
}
"interval_timestamp" : true
}
```
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
# The `interval_timestamp` option
The collectors' `Read()` functions are not called simultaneously and therefore the metrics gathered in an interval can have different timestamps. If you want to avoid that and have a common timestamp (the beginning of the interval), set this option to `true` and the MetricRouter sets the time.
# Conditional manipulation of tags
# The `num_cache_intervals` option
The `if` setting allows conditional testing of a single metric like in the example:
If the MetricRouter should buffer metrics of intervals in a MetricCache, this option specifies the number of past intervals that should be kept. If `num_cache_intervals = 0`, the cache is disabled. With `num_cache_intervals = 1`, only the metrics of the last interval are buffered.
A `num_cache_intervals > 0` is required to use the `interval_aggregates` option.
# The `rename_metrics` option
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
```json
{
"oldname" : "newname",
"clock_mhz" : "clock"
}
```
# Conditional manipulation of tags (`add_tags` and `del_tags`)
Common config format:
```json
{
"key" : "test",
@@ -87,131 +43,8 @@ Common config format:
}
```
## The `del_tags` option
If the CCMetric name is equal to 'temp_package_id_0', it adds an additional tag `test=testing` to the metric.
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
Never delete tags:
- `hostname`
- `type`
- `type-id`
## The `add_tags` option
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
For this metric, a more useful example would be:
```json
[
{
"key" : "type",
"value" : "socket",
"if" : "name == 'temp_package_id_0'"
},
{
"key" : "type-id",
"value" : "0",
"if" : "name == 'temp_package_id_0'"
},
]
```
The metric `temp_package_id_0` corresponds to the tempature of the first CPU socket (=package). With the above configuration, the tags would reflect that because commonly the [TempCollector](../../collectors/tempMetric.md) submits only `node` metrics.
In order to match all metrics, you can use `*`, so in order to add a flag per default. This is useful to attached system-specific tags like `cluster=testcluster`:
```json
{
"key" : "cluster",
"value" : "testcluster",
"if" : "*"
}
```
# Dropping metrics
In some cases, you want to drop a metric and don't get it forwarded to the sinks. There are two options based on the required specification:
- Based only on the metric name -> `drop_metrics` section
- An evaluable condition with more overhead -> `drop_metrics_if` section
## The `drop_metrics` section
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
```json
{
"drop_metrics" : [
"drop_metric_1",
"drop_metric_2"
]
}
```
The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
## The `drop_metrics_if` section
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
```json
{
"drop_metrics_if" : [
"match('drop_metric_%d+', name)",
"match('cpu', type) && type-id == 0"
]
}
```
The first line is comparable with the example in `drop_metrics`, it drops all metrics starting with `drop_metric_` and ending with a number. The second line drops all metrics of the first hardware thread (**not** recommended)
In order to match all metrics, you can use `*`, so in order to add a flag per default, like the `cluster=testcluster` tag in the example.
# Aggregate metric values of the current interval with the `interval_aggregates` option
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
```json
"interval_aggregates" : [
{
"name" : "new_metric_name",
"if" : "match('sub_metric_%d+', metric.Name())",
"function" : "avg(values)",
"tags" : {
"key" : "value",
"type" : "node"
},
"meta" : {
"key" : "value",
"group": "IPMI",
"unit": "<copy>",
}
}
]
```
The above configuration, collects all metric values for metrics evaluating `if` to `true`. Afterwards it calculates the average `avg` of the `values` (list of all metrics' field `value`) and creates a new CCMetric with the name `new_metric_name` and adds the tags in `tags` and the meta information in `meta`. The special value `<copy>` searches the input metrics and copies the value of the first match of `key` to the new CCMetric.
If you are not interested in the input metrics `sub_metric_%d+` at all, you can add the same condition used here to the `drop_metrics_if` section to drop them.
Use cases for `interval_aggregates`:
- Combine multiple metrics of the a collector to a new one like the [MemstatCollector](../../collectors/memstatMetric.md) does it for `mem_used`)):
```json
{
"name" : "mem_used",
"if" : "source == 'MemstatCollector'",
"function" : "sum(mem_total) - (sum(mem_free) + sum(mem_buffers) + sum(mem_cached))",
"tags" : {
"type" : "node"
},
"meta" : {
"group": "<copy>",
"unit": "<copy>",
"source": "<copy>"
}
}
```

View File

@@ -1,12 +1,10 @@
package metricAggregator
package metricRouter
import (
"context"
"fmt"
"math"
"os"
"strings"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
@@ -17,7 +15,7 @@ import (
"github.com/PaesslerAG/gval"
)
type MetricAggregatorIntervalConfig struct {
type metricAggregatorIntervalConfig struct {
Name string `json:"name"` // Metric name for the new metric
Function string `json:"function"` // Function to apply on the metric
Condition string `json:"if"` // Condition for applying function
@@ -28,7 +26,7 @@ type MetricAggregatorIntervalConfig struct {
}
type metricAggregator struct {
functions []*MetricAggregatorIntervalConfig
functions []*metricAggregatorIntervalConfig
constants map[string]interface{}
language gval.Language
output chan lp.CCMetric
@@ -63,20 +61,10 @@ var metricCacheLanguage = gval.NewLanguage(
gval.Function("getCpuList", getCpuListOfNode),
gval.Function("getCpuListOfType", getCpuListOfType),
)
var language gval.Language = gval.NewLanguage(
gval.Full(),
metricCacheLanguage,
)
var evaluables = struct {
mapping map[string]gval.Evaluable
mutex sync.Mutex
}{
mapping: make(map[string]gval.Evaluable),
}
func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.output = output
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
c.functions = make([]*metricAggregatorIntervalConfig, 0)
c.constants = make(map[string]interface{})
// add constants like hostname, numSockets, ... to constants list
@@ -96,7 +84,7 @@ func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.constants["smtWidth"] = cinfo.SMTWidth
c.language = gval.NewLanguage(
gval.Full(),
gval.Base(),
metricCacheLanguage,
)
@@ -257,16 +245,15 @@ func (c *metricAggregator) AddAggregation(name, function, condition string, tags
return nil
}
}
agg := &MetricAggregatorIntervalConfig{
Name: name,
Condition: newcond,
gvalCond: gvalCond,
Function: newfunc,
gvalFunc: gvalFunc,
Tags: tags,
Meta: meta,
}
c.functions = append(c.functions, agg)
var agg metricAggregatorIntervalConfig
agg.Name = name
agg.Condition = newcond
agg.gvalCond = gvalCond
agg.Function = newfunc
agg.gvalFunc = gvalFunc
agg.Tags = tags
agg.Meta = meta
c.functions = append(c.functions, &agg)
return nil
}
@@ -294,50 +281,6 @@ func (c *metricAggregator) AddFunction(name string, function func(args ...interf
c.language = gval.NewLanguage(c.language, gval.Function(name, function))
}
func EvalBoolCondition(condition string, params map[string]interface{}) (bool, error) {
evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock()
if !ok {
newcond :=
strings.ReplaceAll(
strings.ReplaceAll(
condition, "'", "\""), "%", "\\")
var err error
evaluable, err = language.NewEvaluable(newcond)
if err != nil {
return false, err
}
evaluables.mutex.Lock()
evaluables.mapping[condition] = evaluable
evaluables.mutex.Unlock()
}
value, err := evaluable.EvalBool(context.Background(), params)
return value, err
}
func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock()
if !ok {
newcond :=
strings.ReplaceAll(
strings.ReplaceAll(
condition, "'", "\""), "%", "\\")
var err error
evaluable, err = language.NewEvaluable(newcond)
if err != nil {
return math.NaN(), err
}
evaluables.mutex.Lock()
evaluables.mapping[condition] = evaluable
evaluables.mutex.Unlock()
}
value, err := evaluable.EvalFloat64(context.Background(), params)
return value, err
}
func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
a := new(metricAggregator)
err := a.Init(output)

View File

@@ -1,4 +1,4 @@
package metricAggregator
package metricRouter
import (
"errors"

View File

@@ -7,7 +7,6 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
)
@@ -23,14 +22,13 @@ type metricCachePeriod struct {
type metricCache struct {
numPeriods int
curPeriod int
lock sync.Mutex
intervals []*metricCachePeriod
wg *sync.WaitGroup
ticker mct.MultiChanTicker
tickchan chan time.Time
done chan bool
output chan lp.CCMetric
aggEngine agg.MetricAggregator
aggEngine MetricAggregator
}
type MetricCache interface {
@@ -61,7 +59,7 @@ func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker,
// Create a new aggregation engine. No separate goroutine at the moment
// The code is executed by the MetricCache goroutine
c.aggEngine, err = agg.NewAggregator(c.output)
c.aggEngine, err = NewAggregator(c.output)
if err != nil {
cclog.ComponentError("MetricCache", "Cannot create aggregator")
return err
@@ -104,11 +102,9 @@ func (c *metricCache) Start() {
done()
return
case tick := <-c.tickchan:
c.lock.Lock()
old := rotate(tick)
// Get the last period and evaluate aggregation metrics
starttime, endtime, metrics := c.GetPeriod(old)
c.lock.Unlock()
if len(metrics) > 0 {
c.aggEngine.Eval(starttime, endtime, metrics)
} else {
@@ -126,7 +122,6 @@ func (c *metricCache) Start() {
// to avoid reallocations
func (c *metricCache) Add(metric lp.CCMetric) {
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
c.lock.Lock()
p := c.intervals[c.curPeriod]
if p.numMetrics < p.sizeMetrics {
p.metrics[p.numMetrics] = metric
@@ -138,7 +133,6 @@ func (c *metricCache) Add(metric lp.CCMetric) {
p.sizeMetrics = p.sizeMetrics + 1
p.stopstamp = metric.Time()
}
c.lock.Unlock()
}
}
@@ -154,26 +148,16 @@ func (c *metricCache) DeleteAggregation(name string) error {
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
// is given (negative index, index larger than configured number of total intervals, ...)
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
var start time.Time = time.Now()
var stop time.Time = time.Now()
var metrics []lp.CCMetric
if index >= 0 && index < c.numPeriods {
pindex := c.curPeriod - index
if pindex < 0 {
pindex = c.numPeriods - pindex
}
if pindex >= 0 && pindex < c.numPeriods {
start = c.intervals[pindex].startstamp
stop = c.intervals[pindex].stopstamp
metrics = c.intervals[pindex].metrics
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
} else {
metrics = make([]lp.CCMetric, 0)
return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
}
} else {
metrics = make([]lp.CCMetric, 0)
}
return start, stop, metrics
return time.Now(), time.Now(), make([]lp.CCMetric, 0)
}
// Close finishes / stops the metric cache

View File

@@ -10,12 +10,10 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
"gopkg.in/Knetic/govaluate.v2"
)
const ROUTER_MAX_FORWARD = 50
// Metric router tag configuration
type metricRouterTagConfig struct {
Key string `json:"key"` // Tag name
@@ -25,15 +23,11 @@ type metricRouterTagConfig struct {
// Metric router configuration
type metricRouterConfig struct {
AddTags []metricRouterTagConfig `json:"add_tags"` // List of tags that are added when the condition is met
DelTags []metricRouterTagConfig `json:"delete_tags"` // List of tags that are removed when the condition is met
IntervalAgg []agg.MetricAggregatorIntervalConfig `json:"interval_aggregates"` // List of aggregation function processed at the end of an interval
DropMetrics []string `json:"drop_metrics"` // List of metric names to drop. For fine-grained dropping use drop_metrics_if
DropMetricsIf []string `json:"drop_metrics_if"` // List of evaluatable terms to drop metrics
RenameMetrics map[string]string `json:"rename_metrics"` // Map to rename metric name from key to value
IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval?
NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation
dropMetrics map[string]bool // Internal map for O(1) lookup
AddTags []metricRouterTagConfig `json:"add_tags"` // List of tags that are added when the condition is met
DelTags []metricRouterTagConfig `json:"delete_tags"` // List of tags that are removed when the condition is met
IntervalAgg []metricAggregatorIntervalConfig `json:"interval_aggregates"` // List of aggregation function processed at the end of an interval
IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval?
NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation
}
// Metric router data structure
@@ -51,7 +45,6 @@ type metricRouter struct {
config metricRouterConfig // json encoded config for metric router
cache MetricCache // pointer to MetricCache
cachewg sync.WaitGroup // wait group for MetricCache
maxForward int // number of metrics to forward maximally in one iteration
}
// MetricRouter access functions
@@ -76,7 +69,6 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
r.cache_input = make(chan lp.CCMetric)
r.wg = wg
r.ticker = ticker
r.maxForward = ROUTER_MAX_FORWARD
// Set hostname
hostname, err := os.Hostname()
@@ -100,19 +92,17 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
cclog.ComponentError("MetricRouter", err.Error())
return err
}
if r.config.NumCacheIntervals > 0 {
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, r.config.NumCacheIntervals)
if err != nil {
cclog.ComponentError("MetricRouter", "MetricCache initialization failed:", err.Error())
return err
}
for _, agg := range r.config.IntervalAgg {
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
}
numIntervals := r.config.NumCacheIntervals
if numIntervals <= 0 {
numIntervals = 1
}
r.config.dropMetrics = make(map[string]bool)
for _, mname := range r.config.DropMetrics {
r.config.dropMetrics[mname] = true
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, numIntervals)
if err != nil {
cclog.ComponentError("MetricRouter", "MetricCache initialization failed:", err.Error())
return err
}
for _, agg := range r.config.IntervalAgg {
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
}
return nil
}
@@ -140,34 +130,47 @@ func (r *metricRouter) StartTimer() {
cclog.ComponentDebug("MetricRouter", "TIMER START")
}
func getParamMap(point lp.CCMetric) map[string]interface{} {
// EvalCondition evaluates condition cond for metric data from point
func (r *metricRouter) EvalCondition(cond string, point lp.CCMetric) (bool, error) {
expression, err := govaluate.NewEvaluableExpression(cond)
if err != nil {
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
return false, err
}
// Add metric name, tags, meta data, fields and timestamp to the parameter list
params := make(map[string]interface{})
params["metric"] = point
params["name"] = point.Name()
for key, value := range point.Tags() {
params[key] = value
for _, t := range point.TagList() {
params[t.Key] = t.Value
}
for key, value := range point.Meta() {
params[key] = value
for _, m := range point.MetaList() {
params[m.Key] = m.Value
}
for key, value := range point.Fields() {
params[key] = value
for _, f := range point.FieldList() {
params[f.Key] = f.Value
}
params["timestamp"] = point.Time()
return params
// evaluate condition
result, err := expression.Evaluate(params)
if err != nil {
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
return false, err
}
return bool(result.(bool)), err
}
// DoAddTags adds a tag when condition is fullfiled
func (r *metricRouter) DoAddTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.AddTags {
var conditionMatches bool
if m.Condition == "*" {
// Condition is always matched
conditionMatches = true
} else {
// Evaluate condition
var err error
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
conditionMatches, err = r.EvalCondition(m.Condition, point)
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
@@ -181,15 +184,14 @@ func (r *metricRouter) DoAddTags(point lp.CCMetric) {
// DoDelTags removes a tag when condition is fullfiled
func (r *metricRouter) DoDelTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.DelTags {
var conditionMatches bool
if m.Condition == "*" {
// Condition is always matched
conditionMatches = true
} else {
// Evaluate condition
var err error
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
conditionMatches, err = r.EvalCondition(m.Condition, point)
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
@@ -201,31 +203,9 @@ func (r *metricRouter) DoDelTags(point lp.CCMetric) {
}
}
// Conditional test whether a metric should be dropped
func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
// Simple drop check
if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
return conditionMatches
}
// Checking the dropping conditions
for _, m := range r.config.DropMetricsIf {
conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
}
if conditionMatches {
return conditionMatches
}
}
// No dropping condition met
return false
}
// Start starts the metric router
func (r *metricRouter) Start() {
// start timer if configured
r.timestamp = time.Now()
if r.config.IntervalStamp {
@@ -244,63 +224,17 @@ func (r *metricRouter) Start() {
cclog.ComponentDebug("MetricRouter", "FORWARD", point)
r.DoAddTags(point)
r.DoDelTags(point)
if new, ok := r.config.RenameMetrics[point.Name()]; ok {
point.SetName(new)
}
r.DoAddTags(point)
r.DoDelTags(point)
for _, o := range r.outputs {
o <- point
}
}
// Foward message received from collector channel
coll_forward := func(p lp.CCMetric) {
// receive from metric collector
p.AddTag("hostname", r.hostname)
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
if !r.dropMetric(p) {
forward(p)
}
// even if the metric is dropped, it is stored in the cache for
// aggregations
if r.config.NumCacheIntervals > 0 {
r.cache.Add(p)
}
}
// Forward message received from receivers channel
recv_forward := func(p lp.CCMetric) {
// receive from receive manager
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
if !r.dropMetric(p) {
forward(p)
}
}
// Forward message received from cache channel
cache_forward := func(p lp.CCMetric) {
// receive from metric collector
if !r.dropMetric(p) {
p.AddTag("hostname", r.hostname)
forward(p)
}
}
// Start Metric Cache
if r.config.NumCacheIntervals > 0 {
r.cache.Start()
}
r.cache.Start()
r.wg.Add(1)
go func() {
defer r.wg.Done()
for {
select {
case <-r.done:
@@ -308,22 +242,25 @@ func (r *metricRouter) Start() {
return
case p := <-r.coll_input:
coll_forward(p)
for i := 0; len(r.coll_input) > 0 && i < r.maxForward; i++ {
coll_forward(<-r.coll_input)
// receive from metric collector
p.AddTag("hostname", r.hostname)
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
forward(p)
r.cache.Add(p)
case p := <-r.recv_input:
recv_forward(p)
for i := 0; len(r.recv_input) > 0 && i < r.maxForward; i++ {
recv_forward(<-r.recv_input)
// receive from receive manager
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
forward(p)
case p := <-r.cache_input:
cache_forward(p)
for i := 0; len(r.cache_input) > 0 && i < r.maxForward; i++ {
cache_forward(<-r.cache_input)
}
// receive from metric collector
p.AddTag("hostname", r.hostname)
forward(p)
}
}
}()
@@ -351,21 +288,14 @@ func (r *metricRouter) Close() {
r.done <- true
// wait for close of channel r.done
<-r.done
// stop timer
if r.config.IntervalStamp {
cclog.ComponentDebug("MetricRouter", "TIMER CLOSE")
r.timerdone <- true
// wait for close of channel r.timerdone
<-r.timerdone
}
// stop metric cache
if r.config.NumCacheIntervals > 0 {
cclog.ComponentDebug("MetricRouter", "CACHE CLOSE")
r.cache.Close()
r.cachewg.Wait()
}
r.cache.Close()
r.cachewg.Wait()
}
// New creates a new initialized metric router

View File

@@ -24,6 +24,7 @@ import (
type CentralConfigFile struct {
Interval int `json:"interval"`
Duration int `json:"duration"`
Pidfile string `json:"pidfile,omitempty"`
CollectorConfigFile string `json:"collectors"`
RouterConfigFile string `json:"router"`
SinkConfigFile string `json:"sinks"`
@@ -86,12 +87,14 @@ func ReadCli() map[string]string {
var m map[string]string
cfg := flag.String("config", "./config.json", "Path to configuration file")
logfile := flag.String("log", "stderr", "Path for logfile")
pidfile := flag.String("pidfile", "/var/run/cc-metric-collector.pid", "Path for PID file")
once := flag.Bool("once", false, "Run all collectors only once")
debug := flag.Bool("debug", false, "Activate debug output")
flag.Parse()
m = make(map[string]string)
m["configfile"] = *cfg
m["logfile"] = *logfile
m["pidfile"] = *pidfile
if *once {
m["once"] = "true"
} else {
@@ -122,6 +125,25 @@ func ReadCli() map[string]string {
// return nil
//}
//func CreatePidfile(pidfile string) error {
// file, err := os.OpenFile(pidfile, os.O_CREATE|os.O_RDWR, 0600)
// if err != nil {
// log.Print(err)
// return err
// }
// file.Write([]byte(fmt.Sprintf("%d", os.Getpid())))
// file.Close()
// return nil
//}
//func RemovePidfile(pidfile string) error {
// info, err := os.Stat(pidfile)
// if !os.IsNotExist(err) && !info.IsDir() {
// os.Remove(pidfile)
// }
// return nil
//}
// General shutdownHandler function that gets executed in case of interrupt or graceful shutdownHandler
func shutdownHandler(config *RuntimeConfig, shutdownSignal chan os.Signal) {
defer config.Sync.Done()
@@ -152,6 +174,11 @@ func shutdownHandler(config *RuntimeConfig, shutdownSignal chan os.Signal) {
cclog.Debug("Shutdown SinkManager...")
config.SinkManager.Close()
}
// pidfile := config.ConfigFile.Pidfile
// RemovePidfile(pidfile)
// pidfile = config.CliArgs["pidfile"]
// RemovePidfile(pidfile)
}
func mainFunc() int {
@@ -199,6 +226,8 @@ func mainFunc() int {
return 1
}
// err = CreatePidfile(rcfg.CliArgs["pidfile"])
// Set log file
if logfile := rcfg.CliArgs["logfile"]; logfile != "stderr" {
cclog.SetOutput(logfile)

View File

@@ -1,8 +1,8 @@
{
"natsrecv" : {
[
{
"type": "nats",
"address": "nats://my-url",
"port" : "4222",
"database": "testcluster"
}
}
]

View File

@@ -2,15 +2,10 @@ package receivers
import (
// "time"
"encoding/json"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type defaultReceiverConfig struct {
Type string `json:"type"`
}
type ReceiverConfig struct {
Addr string `json:"address"`
Port string `json:"port"`
@@ -20,13 +15,16 @@ type ReceiverConfig struct {
}
type receiver struct {
typename string
name string
sink chan lp.CCMetric
name string
addr string
port string
database string
organization string
sink chan lp.CCMetric
}
type Receiver interface {
Init(name string, config json.RawMessage) error
Init(config ReceiverConfig) error
Start()
Close()
Name() string
@@ -40,3 +38,19 @@ func (r *receiver) Name() string {
func (r *receiver) SetSink(sink chan lp.CCMetric) {
r.sink = sink
}
func Tags2Map(metric influx.Metric) map[string]string {
tags := make(map[string]string)
for _, t := range metric.TagList() {
tags[t.Key] = t.Value
}
return tags
}
func Fields2Map(metric influx.Metric) map[string]interface{} {
fields := make(map[string]interface{})
for _, f := range metric.FieldList() {
fields[f.Key] = f.Value
}
return fields
}

View File

@@ -1,22 +1,19 @@
package receivers
import (
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"time"
)
type NatsReceiverConfig struct {
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
Subject string `json:"subject"`
Addr string `json:"address"`
Port string `json:"port"`
Database string `json:"database"`
}
type NatsReceiver struct {
@@ -25,35 +22,35 @@ type NatsReceiver struct {
handler *influx.MetricHandler
parser *influx.Parser
meta map[string]string
config NatsReceiverConfig
config ReceiverConfig
}
var DefaultTime = func() time.Time {
return time.Unix(42, 0)
}
func (r *NatsReceiver) Init(name string, config json.RawMessage) error {
r.typename = "NatsReceiver"
r.name = name
r.config.Addr = nats.DefaultURL
r.config.Port = "4222"
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return err
}
}
func (r *NatsReceiver) Init(config ReceiverConfig) error {
r.name = "NatsReceiver"
r.config = config
if len(r.config.Addr) == 0 ||
len(r.config.Port) == 0 ||
len(r.config.Subject) == 0 {
return errors.New("not all configuration variables set required by NatsReceiver")
len(r.config.Database) == 0 {
return errors.New("Not all configuration variables set required by NatsReceiver")
}
r.meta = map[string]string{"source": r.name}
uri := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
cclog.ComponentDebug(r.name, "INIT", uri, "Subject", r.config.Subject)
r.addr = r.config.Addr
if len(r.addr) == 0 {
r.addr = nats.DefaultURL
}
r.port = r.config.Port
if len(r.port) == 0 {
r.port = "4222"
}
uri := fmt.Sprintf("%s:%s", r.addr, r.port)
cclog.ComponentDebug("NatsReceiver", "INIT", uri)
nc, err := nats.Connect(uri)
if err == nil {
r.database = r.config.Database
r.nc = nc
} else {
r.nc = nil
@@ -66,8 +63,8 @@ func (r *NatsReceiver) Init(name string, config json.RawMessage) error {
}
func (r *NatsReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
r.nc.Subscribe(r.config.Subject, r._NatsReceive)
cclog.ComponentDebug("NatsReceiver", "START")
r.nc.Subscribe(r.database, r._NatsReceive)
}
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
@@ -87,7 +84,7 @@ func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
func (r *NatsReceiver) Close() {
if r.nc != nil {
cclog.ComponentDebug(r.name, "CLOSE")
cclog.ComponentDebug("NatsReceiver", "CLOSE")
r.nc.Close()
}
}

View File

@@ -18,12 +18,12 @@ type receiveManager struct {
output chan lp.CCMetric
done chan bool
wg *sync.WaitGroup
config []json.RawMessage
config []ReceiverConfig
}
type ReceiveManager interface {
Init(wg *sync.WaitGroup, receiverConfigFile string) error
AddInput(name string, rawConfig json.RawMessage) error
AddInput(rawConfig json.RawMessage) error
AddOutput(output chan lp.CCMetric)
Start()
Close()
@@ -34,7 +34,7 @@ func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) er
rm.output = nil
rm.done = make(chan bool)
rm.wg = wg
rm.config = make([]json.RawMessage, 0)
rm.config = make([]ReceiverConfig, 0)
configFile, err := os.Open(receiverConfigFile)
if err != nil {
cclog.ComponentError("ReceiveManager", err.Error())
@@ -42,14 +42,14 @@ func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) er
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
var rawConfigs map[string]json.RawMessage
var rawConfigs []json.RawMessage
err = jsonParser.Decode(&rawConfigs)
if err != nil {
cclog.ComponentError("ReceiveManager", err.Error())
return err
}
for name, raw := range rawConfigs {
rm.AddInput(name, raw)
for _, raw := range rawConfigs {
rm.AddInput(raw)
}
return nil
}
@@ -64,8 +64,8 @@ func (rm *receiveManager) Start() {
cclog.ComponentDebug("ReceiveManager", "STARTED")
}
func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error {
var config defaultReceiverConfig
func (rm *receiveManager) AddInput(rawConfig json.RawMessage) error {
var config ReceiverConfig
err := json.Unmarshal(rawConfig, &config)
if err != nil {
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "JSON config error:", err.Error())
@@ -76,13 +76,13 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
return err
}
r := AvailableReceivers[config.Type]
err = r.Init(name, rawConfig)
err = r.Init(config)
if err != nil {
cclog.ComponentError("ReceiveManager", "SKIP", r.Name(), "initialization failed:", err.Error())
return err
}
rm.inputs = append(rm.inputs, r)
rm.config = append(rm.config, rawConfig)
rm.config = append(rm.config, config)
cclog.ComponentDebug("ReceiveManager", "ADD RECEIVER", r.Name())
return nil
}

View File

@@ -15,3 +15,6 @@ CONF_DIR=/etc/cc-metric-collector
CONF_FILE=/etc/cc-metric-collector/cc-metric-collector.json
RESTART_ON_UPGRADE=true
# Only used on systemd systems
PID_FILE_DIR=/var/run

View File

@@ -14,7 +14,11 @@ Restart=on-failure
WorkingDirectory=/tmp
RuntimeDirectory=cc-metric-collector
RuntimeDirectoryMode=0750
ExecStart=/usr/sbin/cc-metric-collector --config=${CONF_FILE}
ExecStart=/usr/sbin/cc-metric-collector \
--config=${CONF_FILE} \
--pidfile=${PID_FILE_DIR}/cc-metric-collector.pid
LimitNOFILE=10000
TimeoutStopSec=20
UMask=0027

View File

@@ -1,5 +1,5 @@
Name: cc-metric-collector
Version: 0.2
Version: 0.1
Release: 1%{?dist}
Summary: Metric collection daemon from the ClusterCockpit suite
@@ -26,14 +26,10 @@ make
%install
install -Dpm 0750 %{name} %{buildroot}%{_sbindir}/%{name}
install -Dpm 0755 %{name} %{buildroot}%{_sbindir}/%{name}
install -Dpm 0600 config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json
install -Dpm 0600 collectors.json %{buildroot}%{_sysconfdir}/%{name}/collectors.json
install -Dpm 0600 sinks.json %{buildroot}%{_sysconfdir}/%{name}/sinks.json
install -Dpm 0600 receivers.json %{buildroot}%{_sysconfdir}/%{name}/receivers.json
install -Dpm 0600 router.json %{buildroot}%{_sysconfdir}/%{name}/router.json
install -Dpm 0644 scripts/%{name}.service %{buildroot}%{_unitdir}/%{name}.service
install -Dpm 0600 scripts/%{name}.config %{buildroot}%{_sysconfdir}/default/%{name}
install -Dpm 644 scripts/%{name}.service %{buildroot}%{_unitdir}/%{name}.service
install -Dpm 600 scripts/%{name}.config %{buildroot}%{_sysconfdir}/default/%{name}
%check
@@ -50,15 +46,9 @@ install -Dpm 0600 scripts/%{name}.config %{buildroot}%{_sysconfdir}/default/%{na
%{_sbindir}/%{name}
%{_unitdir}/%{name}.service
%{_sysconfdir}/default/%{name}
%attr(0600,root,root) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
%attr(0600,root,root) %config(noreplace) %{_sysconfdir}/%{name}/collectors.json
%attr(0600,root,root) %config(noreplace) %{_sysconfdir}/%{name}/sinks.json
%attr(0600,root,root) %config(noreplace) %{_sysconfdir}/%{name}/receivers.json
%attr(0600,root,root) %config(noreplace) %{_sysconfdir}/%{name}/router.json
%config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
%changelog
* Mon Feb 14 2022 Thomas Gruber - 0.2
- Add component specific configuration files
- Add %attr to config files
* Mon Nov 22 2021 Thomas Gruber - 0.1
- Initial spec file

View File

@@ -1,83 +0,0 @@
#!/usr/bin/env python3
import os, os.path, sys, getopt, re, json
def which(cmd):
ospath = os.environ.get("PATH", "")
for p in ospath.split(":"):
testcmd = os.path.join(p, cmd)
if os.access(testcmd, os.X_OK):
return testcmd
return None
def group_to_json(groupfile):
gdata = []
with open(groupfile, "r") as fp:
gdata = fp.read().strip().split("\n")
events = {}
metrics = []
parse_events = False
parse_metrics = False
for line in gdata:
if line == "EVENTSET":
parse_events = True
parse_metrics = False
continue
if line == "METRICS":
parse_events = False
parse_metrics = True
continue
if len(line) == 0 or line.startswith("SHORT") or line == "LONG":
parse_events = False
parse_metrics = False
continue
if parse_events:
m = re.match("([\w\d]+)\s+([\w\d_]+)", line)
if m:
events[m.group(1)] = m.group(2)
if parse_metrics:
llist = re.split("\s+", line)
calc = llist[-1]
metric = " ".join(llist[:-1])
scope = "hwthread"
if "BOX" in calc:
scope = "socket"
if "PWR" in calc:
scope = "socket"
m = {"name" : metric, "calc": calc, "scope" : scope, "publish" : True}
metrics.append(m)
return {"events" : events, "metrics" : metrics}
if len(sys.argv) != 3:
print("Usage: $0 <likwid-arch> <group-name>")
sys.exit(1)
arch = sys.argv[1]
group = sys.argv[2]
ltopo = which("likwid-topology")
if not ltopo:
print("Cannot find LIKWID installation. Please add LIKWID bin folder to your PATH.")
sys.exit(1)
bindir = os.path.dirname(ltopo)
groupdir = os.path.normpath(os.path.join(bindir, "../share/likwid/perfgroups"))
if not os.path.exists(groupdir):
print("Cannot find LIKWID performance groups in default install location")
sys.exit(1)
archdir = os.path.join(groupdir, arch)
if not os.path.exists(archdir):
print("Cannot find LIKWID performance groups for architecture {}".format(arch))
sys.exit(1)
groupfile = os.path.join(archdir, "{}.txt".format(group))
if not os.path.exists(groupfile):
print("Cannot find LIKWID performance group {} for architecture {}".format(group, arch))
sys.exit(1)
gdata = group_to_json(groupfile)
print(json.dumps(gdata, sort_keys=True, indent=2))

View File

@@ -1,6 +1,6 @@
{
"mystdout" : {
[
{
"type" : "stdout",
"meta_as_tags" : true
}
}
]

View File

@@ -1,12 +0,0 @@
all: libganglia.so
libganglia.so:
@find /usr ! -readable -prune -o -type d ! -executable -prune -o -name "$@*" -print0 | \
xargs --null --no-run-if-empty --replace \
ln --symbolic --verbose --force '{}' "$@"
clean:
rm -f libganglia.so
.PHONY: clean

View File

@@ -2,25 +2,17 @@
This folder contains the SinkManager and sink implementations for the cc-metric-collector.
# Available sinks:
- [`stdout`](./stdoutSink.md): Print all metrics to `stdout`, `stderr` or a file
- [`http`](./httpSink.md): Send metrics to an HTTP server as POST requests
- [`influxdb`](./influxSink.md): Send metrics to an [InfluxDB](https://www.influxdata.com/products/influxdb/) database
- [`nats`](./natsSink.md): Publish metrics to the [NATS](https://nats.io/) network overlay system
- [`ganglia`](./gangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) using the `gmetric` CLI tool
- [`libganglia`](./libgangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) directly using `libganglia.so`
# Configuration
The configuration file for the sinks is a list of configurations. The `type` field in each specifies which sink to initialize.
```json
[
"mystdout" : {
{
"type" : "stdout",
"meta_as_tags" : false
},
"metricstore" : {
{
"type" : "http",
"host" : "localhost",
"port" : "4123",
@@ -30,12 +22,74 @@ The configuration file for the sinks is a list of configurations. The `type` fie
]
```
This example initializes two sinks, the `stdout` sink printing all metrics to the STDOUT and the `http` sink with the given `host`, `port`, `database` and `password`.
If `meta_as_tags` is set, all meta information attached to CCMetric are printed out as tags.
## Type `stdout`
```json
{
"type" : "stdout",
"meta_as_tags" : <true|false>
}
```
The `stdout` sink dumps all metrics to the STDOUT.
## Type `http`
```json
{
"type" : "http",
"host" : "<hostname>",
"port" : "<portnumber>",
"database" : "<database name>",
"password" : "<jwt token>",
"meta_as_tags" : <true|false>
}
```
The sink uses POST requests to send metrics to `http://<host>:<port>/<database>` using the JWT token as a JWT in the 'Authorization' header.
## Type `nats`
```json
{
"type" : "nats",
"host" : "<hostname>",
"port" : "<portnumber>",
"user" : "<username>",
"password" : "<password>",
"database" : "<database name>"
"meta_as_tags" : <true|false>
}
```
This sink publishes the CCMetric in a NATS environment using `host`, `port`, `user` and `password` for connecting. The metrics are published using the topic `database`.
## Type `influxdb`
```json
{
"type" : "influxdb",
"host" : "<hostname>",
"port" : "<portnumber>",
"user" : "<username>",
"password" : "<password or API key>",
"database" : "<database name>"
"organization": "<InfluxDB v2 organization>",
"ssl" : <true|false>,
"meta_as_tags" : <true|false>
}
```
This sink submits the CCMetrics to an InfluxDB time-series database. It uses `host`, `port` and `ssl` for connecting. For authentification, it uses either `user:password` if `user` is set and only `password` as API key. The `organization` and `database` are used for writing to the correct database.
# Contributing own sinks
A sink contains four functions and is derived from the type `sink`:
* `Init(config json.RawMessage) error`
A sink contains three functions and is derived from the type `Sink`:
* `Init(config SinkConfig) error`
* `Write(point CCMetric) error`
* `Flush() error`
* `Close()`
@@ -43,52 +97,3 @@ A sink contains four functions and is derived from the type `sink`:
The data structures should be set up in `Init()` like opening a file or server connection. The `Write()` function writes/sends the data. For non-blocking sinks, the `Flush()` method tells the sink to drain its internal buffers. The `Close()` function should tear down anything created in `Init()`.
Finally, the sink needs to be registered in the `sinkManager.go`. There is a list of sinks called `AvailableSinks` which is a map (`sink_type_string` -> `pointer to sink interface`). Add a new entry with a descriptive name and the new sink.
## Sample sink
```go
package sinks
import (
"encoding/json"
"log"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type SampleSinkConfig struct {
defaultSinkConfig // defines JSON tags for 'name' and 'meta_as_tags'
}
type SampleSink struct {
sink // declarate 'name' and 'meta_as_tags'
config StdoutSinkConfig // entry point to the SampleSinkConfig
}
// Initialize the sink by giving it a name and reading in the config JSON
func (s *SampleSink) Init(config json.RawMessage) error {
s.name = "SampleSink" // Always specify a name here
// Read in the config JSON
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
}
return nil
}
// Code to submit a single CCMetric to the sink
func (s *SampleSink) Write(point lp.CCMetric) error {
log.Print(point)
return nil
}
// If the sink uses batched sends internally, you can tell to flush its buffers
func (s *SampleSink) Flush() error {
return nil
}
// Close sink: close network connection, close files, close libraries, ...
func (s *SampleSink) Close() {}
```

View File

@@ -1,50 +0,0 @@
package sinks
import (
"strings"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
func GangliaMetricName(point lp.CCMetric) string {
name := point.Name()
metricType, typeOK := point.GetTag("type")
metricTid, tidOk := point.GetTag("type-id")
gangliaType := metricType + metricTid
if strings.Contains(name, metricType) && tidOk {
name = strings.Replace(name, metricType, gangliaType, -1)
} else if typeOK && tidOk {
name = metricType + metricTid + "_" + name
} else if point.HasTag("device") {
device, _ := point.GetTag("device")
name = name + "_" + device
}
return name
}
func GangliaMetricRename(point lp.CCMetric) string {
name := point.Name()
if name == "mem_total" || name == "swap_total" {
return name
} else if name == "net_bytes_in" {
return "bytes_in"
} else if name == "net_bytes_out" {
return "bytes_out"
} else if name == "net_pkts_in" {
return "pkts_in"
} else if name == "net_pkts_out" {
return "pkts_out"
} else if name == "cpu_iowait" {
return "cpu_wio"
}
return name
}
func GangliaSlopeType(point lp.CCMetric) uint {
name := point.Name()
if name == "mem_total" || name == "swap_total" {
return 0
}
return 3
}

View File

@@ -1,69 +1,27 @@
package sinks
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
// "time"
"os/exec"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const GMETRIC_EXEC = `gmetric`
const GMETRIC_CONFIG = `/etc/ganglia/gmond.conf`
type GangliaSinkConfig struct {
defaultSinkConfig
GmetricPath string `json:"gmetric_path,omitempty"`
GmetricConfig string `json:"gmetric_config,omitempty"`
AddGangliaGroup bool `json:"add_ganglia_group,omitempty"`
AddTagsAsDesc bool `json:"add_tags_as_desc,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
AddTypeToName bool `json:"add_type_to_name,omitempty"`
}
type GangliaSink struct {
sink
gmetric_path string
gmetric_config string
config GangliaSinkConfig
Sink
gmetric_path string
}
func (s *GangliaSink) Init(config json.RawMessage) error {
var err error = nil
s.name = "GangliaSink"
s.config.AddTagsAsDesc = false
s.config.AddGangliaGroup = false
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return err
}
}
s.gmetric_path = ""
s.gmetric_config = ""
if len(s.config.GmetricPath) > 0 {
p, err := exec.LookPath(s.config.GmetricPath)
if err == nil {
s.gmetric_path = p
}
}
if len(s.gmetric_path) == 0 {
p, err := exec.LookPath(string(GMETRIC_EXEC))
if err == nil {
s.gmetric_path = p
}
}
if len(s.gmetric_path) == 0 {
err = errors.New("cannot find executable 'gmetric'")
}
if len(s.config.GmetricConfig) > 0 {
s.gmetric_config = s.config.GmetricConfig
func (s *GangliaSink) Init(config sinkConfig) error {
p, err := exec.LookPath(string(GMETRIC_EXEC))
if err == nil {
s.gmetric_path = p
}
return err
}
@@ -72,80 +30,47 @@ func (s *GangliaSink) Write(point lp.CCMetric) error {
var err error = nil
var tagsstr []string
var argstr []string
if s.config.AddGangliaGroup {
if point.HasTag("group") {
g, _ := point.GetTag("group")
argstr = append(argstr, fmt.Sprintf("--group=%s", g))
} else if point.HasMeta("group") {
g, _ := point.GetMeta("group")
argstr = append(argstr, fmt.Sprintf("--group=%s", g))
}
}
for key, value := range point.Tags() {
switch key {
for _, t := range point.TagList() {
switch t.Key {
case "cluster":
argstr = append(argstr, fmt.Sprintf("--cluster=%s", t.Value))
case "unit":
argstr = append(argstr, fmt.Sprintf("--units=%s", value))
argstr = append(argstr, fmt.Sprintf("--units=%s", t.Value))
case "group":
argstr = append(argstr, fmt.Sprintf("--group=%s", t.Value))
default:
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", key, value))
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", t.Key, t.Value))
}
}
if s.config.MetaAsTags {
for key, value := range point.Meta() {
switch key {
case "unit":
argstr = append(argstr, fmt.Sprintf("--units=%s", value))
default:
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", key, value))
}
}
}
if len(s.config.ClusterName) > 0 {
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
}
if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
if len(tagsstr) > 0 {
argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
}
if len(s.gmetric_config) > 0 {
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
}
name := GangliaMetricRename(point)
if s.config.AddTypeToName {
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
} else {
argstr = append(argstr, fmt.Sprintf("--name=%s", name))
}
slope := GangliaSlopeType(point)
slopeStr := "both"
if slope == 0 {
slopeStr = "zero"
}
argstr = append(argstr, fmt.Sprintf("--slope=%s", slopeStr))
for k, v := range point.Fields() {
if k == "value" {
switch value := v.(type) {
argstr = append(argstr, fmt.Sprintf("--name=%s", point.Name()))
for _, f := range point.FieldList() {
if f.Key == "value" {
switch f.Value.(type) {
case float64:
argstr = append(argstr,
fmt.Sprintf("--value=%v", value), "--type=double")
argstr = append(argstr, fmt.Sprintf("--value=%v", f.Value.(float64)))
argstr = append(argstr, "--type=double")
case float32:
argstr = append(argstr,
fmt.Sprintf("--value=%v", value), "--type=float")
argstr = append(argstr, fmt.Sprintf("--value=%v", f.Value.(float32)))
argstr = append(argstr, "--type=float")
case int:
argstr = append(argstr,
fmt.Sprintf("--value=%d", value), "--type=int32")
argstr = append(argstr, fmt.Sprintf("--value=%d", f.Value.(int)))
argstr = append(argstr, "--type=int32")
case int64:
argstr = append(argstr,
fmt.Sprintf("--value=%d", value), "--type=int32")
argstr = append(argstr, fmt.Sprintf("--value=%d", f.Value.(int64)))
argstr = append(argstr, "--type=int32")
case string:
argstr = append(argstr,
fmt.Sprintf("--value=%q", value), "--type=string")
argstr = append(argstr, fmt.Sprintf("--value=%q", f.Value.(string)))
argstr = append(argstr, "--type=string")
}
}
}
command := exec.Command(s.gmetric_path, argstr...)
command.Wait()
_, err = command.Output()
log.Print(s.gmetric_path, " ", strings.Join(argstr, " "))
// command := exec.Command(string(GMETRIC_EXEC), strings.Join(argstr, " "))
// command.Wait()
// _, err := command.Output()
return err
}

View File

@@ -1,21 +0,0 @@
## `ganglia` sink
The `ganglia` sink uses the `gmetric` tool of the [Ganglia Monitoring System](http://ganglia.info/) to submit the metrics
### Configuration structure
```json
{
"<name>": {
"type": "ganglia",
"meta_as_tags" : true,
"gmetric_path" : "/path/to/gmetric",
"add_ganglia_group" : true
}
}
```
- `type`: makes the sink an `ganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmetric_path`: Path to `gmetric` executable (optional). If not given, the sink searches in `$PATH` for `gmetric`.
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.

View File

@@ -2,86 +2,33 @@ package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type HttpSinkConfig struct {
defaultSinkConfig
URL string `json:"url,omitempty"`
JWT string `json:"jwt,omitempty"`
Timeout string `json:"timeout,omitempty"`
MaxIdleConns int `json:"max_idle_connections,omitempty"`
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
FlushDelay string `json:"flush_delay,omitempty"`
}
type HttpSink struct {
sink
client *http.Client
encoder *influx.Encoder
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
buffer *bytes.Buffer
flushTimer *time.Timer
config HttpSinkConfig
maxIdleConns int
idleConnTimeout time.Duration
timeout time.Duration
flushDelay time.Duration
client *http.Client
url, jwt string
encoder *influx.Encoder
buffer *bytes.Buffer
}
func (s *HttpSink) Init(config json.RawMessage) error {
// Set default values
func (s *HttpSink) Init(config sinkConfig) error {
s.name = "HttpSink"
s.config.MaxIdleConns = 10
s.config.IdleConnTimeout = "5s"
s.config.Timeout = "5s"
s.config.FlushDelay = "1s"
if len(config.Host) == 0 || len(config.Port) == 0 || len(config.Database) == 0 {
return errors.New("`host`, `port` and `database` config options required for TCP sink")
}
// Read config
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
}
if len(s.config.URL) == 0 {
return errors.New("`url` config option is required for HTTP sink")
}
if s.config.MaxIdleConns > 0 {
s.maxIdleConns = s.config.MaxIdleConns
}
if len(s.config.IdleConnTimeout) > 0 {
t, err := time.ParseDuration(s.config.IdleConnTimeout)
if err == nil {
s.idleConnTimeout = t
}
}
if len(s.config.Timeout) > 0 {
t, err := time.ParseDuration(s.config.Timeout)
if err == nil {
s.timeout = t
}
}
if len(s.config.FlushDelay) > 0 {
t, err := time.ParseDuration(s.config.FlushDelay)
if err == nil {
s.flushDelay = t
}
}
tr := &http.Transport{
MaxIdleConns: s.maxIdleConns,
IdleConnTimeout: s.idleConnTimeout,
}
s.client = &http.Client{Transport: tr, Timeout: s.timeout}
s.client = &http.Client{}
s.url = fmt.Sprintf("http://%s:%s/%s", config.Host, config.Port, config.Database)
s.port = config.Port
s.jwt = config.Password
s.buffer = &bytes.Buffer{}
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
@@ -89,73 +36,29 @@ func (s *HttpSink) Init(config json.RawMessage) error {
return nil
}
func (s *HttpSink) Write(m lp.CCMetric) error {
if s.buffer.Len() == 0 && s.flushDelay != 0 {
// This is the first write since the last flush, start the flushTimer!
if s.flushTimer != nil && s.flushTimer.Stop() {
cclog.ComponentDebug("HttpSink", "unexpected: the flushTimer was already running?")
}
// Run a batched flush for all lines that have arrived in the last second
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
if err := s.Flush(); err != nil {
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
}
})
}
p := m.ToPoint(s.config.MetaAsTags)
s.lock.Lock()
_, err := s.encoder.Encode(p)
s.lock.Unlock() // defer does not work here as Flush() takes the lock as well
if err != nil {
return err
}
// Flush synchronously if "flush_delay" is zero
if s.flushDelay == 0 {
return s.Flush()
}
func (s *HttpSink) Write(point lp.CCMetric) error {
_, err := s.encoder.Encode(point)
return err
}
func (s *HttpSink) Flush() error {
// buffer is read by client.Do, prevent concurrent modifications
s.lock.Lock()
defer s.lock.Unlock()
// Do not flush empty buffer
if s.buffer.Len() == 0 {
return nil
}
// Create new request to send buffer
req, err := http.NewRequest(http.MethodPost, s.config.URL, s.buffer)
req, err := http.NewRequest(http.MethodPost, s.url, s.buffer)
if err != nil {
return err
}
// Set authorization header
if len(s.config.JWT) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
if len(s.jwt) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.jwt))
}
// Send
res, err := s.client.Do(req)
// Clear buffer
s.buffer.Reset()
// Handle transport/tcp errors
if err != nil {
return err
}
// Handle application errors
if res.StatusCode != http.StatusOK {
if res.StatusCode != 200 {
return errors.New(res.Status)
}
@@ -163,9 +66,5 @@ func (s *HttpSink) Flush() error {
}
func (s *HttpSink) Close() {
s.flushTimer.Stop()
if err := s.Flush(); err != nil {
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
}
s.client.CloseIdleConnections()
}

View File

@@ -1,29 +0,0 @@
## `http` sink
The `http` sink uses POST requests to a HTTP server to submit the metrics in the InfluxDB line-protocol format. It uses JSON web tokens for authentification. The sink creates batches of metrics before sending, to reduce the HTTP traffic.
### Configuration structure
```json
{
"<name>": {
"type": "http",
"meta_as_tags" : true,
"url" : "https://my-monitoring.example.com:1234/api/write",
"jwt" : "blabla.blabla.blabla",
"timeout": "5s",
"max_idle_connections" : 10,
"idle_connection_timeout" : "5s",
"flush_delay": "2s",
}
}
```
- `type`: makes the sink an `http` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `url`: The full URL of the endpoint
- `jwt`: JSON web tokens for authentification (Using the *Bearer* scheme)
- `timeout`: General timeout for the HTTP client (default '5s')
- `max_idle_connections`: Maximally idle connections (default 10)
- `idle_connection_timeout`: Timeout for idle connections (default '5s')
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)

View File

@@ -1,120 +0,0 @@
package sinks
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
)
type InfluxAsyncSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
RetentionPol string `json:"retention_policy,omitempty"`
// Maximum number of points sent to server in single request. Default 5000
BatchSize uint `json:"batch_size,omitempty"`
// Interval, in ms, in which is buffer flushed if it has not been already written (by reaching batch size) . Default 1000ms
FlushInterval uint `json:"flush_interval,omitempty"`
}
type InfluxAsyncSink struct {
sink
client influxdb2.Client
writeApi influxdb2Api.WriteAPI
retPolicy string
errors <-chan error
config InfluxAsyncSinkConfig
}
func (s *InfluxAsyncSink) connect() error {
var auth string
var uri string
if s.config.SSL {
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
} else {
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
}
if len(s.config.User) == 0 {
auth = s.config.Password
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
}
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
clientOptions := influxdb2.DefaultOptions()
if s.config.BatchSize != 0 {
clientOptions.SetBatchSize(s.config.BatchSize)
}
if s.config.FlushInterval != 0 {
clientOptions.SetFlushInterval(s.config.FlushInterval)
}
clientOptions.SetTLSConfig(
&tls.Config{
InsecureSkipVerify: true,
},
)
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPI(s.config.Organization, s.config.Database)
return nil
}
func (s *InfluxAsyncSink) Init(config json.RawMessage) error {
s.name = "InfluxSink"
// Set default for maximum number of points sent to server in single request.
s.config.BatchSize = 100
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
}
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 ||
len(s.config.Organization) == 0 ||
len(s.config.Password) == 0 {
return errors.New("not all configuration variables set required by InfluxAsyncSink")
}
// Connect to InfluxDB server
err := s.connect()
// Start background: Read from error channel
s.errors = s.writeApi.Errors()
go func() {
for err := range s.errors {
cclog.ComponentError(s.name, err.Error())
}
}()
return err
}
func (s *InfluxAsyncSink) Write(m lp.CCMetric) error {
s.writeApi.WritePoint(
m.ToPoint(s.config.MetaAsTags),
)
return nil
}
func (s *InfluxAsyncSink) Flush() error {
s.writeApi.Flush()
return nil
}
func (s *InfluxAsyncSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
s.writeApi.Flush()
s.client.Close()
}

View File

@@ -1,34 +0,0 @@
## `influxasync` sink
The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **non-blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
### Configuration structure
```json
{
"<name>": {
"type": "influxasync",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw",
"organization": "myorg",
"ssl": true,
"batch_size": 200,
}
}
```
- `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server
- `user`: Username for basic authentification
- `password`: Password for basic authentification
- `organization`: Organization in the InfluxDB
- `ssl`: Use SSL connection
- `batch_size`: batch up metrics internally, default 100

View File

@@ -3,86 +3,77 @@ package sinks
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
"log"
)
type InfluxSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
RetentionPol string `json:"retention_policy,omitempty"`
}
type InfluxSink struct {
sink
client influxdb2.Client
writeApi influxdb2Api.WriteAPIBlocking
config InfluxSinkConfig
client influxdb2.Client
writeApi influxdb2Api.WriteAPIBlocking
retPolicy string
}
func (s *InfluxSink) connect() error {
var auth string
var uri string
if s.config.SSL {
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
if s.ssl {
uri = fmt.Sprintf("https://%s:%s", s.host, s.port)
} else {
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
uri = fmt.Sprintf("http://%s:%s", s.host, s.port)
}
if len(s.config.User) == 0 {
auth = s.config.Password
if len(s.user) == 0 {
auth = s.password
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
auth = fmt.Sprintf("%s:%s", s.user, s.password)
}
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
clientOptions := influxdb2.DefaultOptions()
clientOptions.SetTLSConfig(
&tls.Config{
InsecureSkipVerify: true,
},
)
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPIBlocking(s.config.Organization, s.config.Database)
log.Print("Using URI ", uri, " Org ", s.organization, " Bucket ", s.database)
s.client = influxdb2.NewClientWithOptions(uri, auth,
influxdb2.DefaultOptions().SetTLSConfig(&tls.Config{InsecureSkipVerify: true}))
s.writeApi = s.client.WriteAPIBlocking(s.organization, s.database)
return nil
}
func (s *InfluxSink) Init(config json.RawMessage) error {
func (s *InfluxSink) Init(config sinkConfig) error {
s.name = "InfluxSink"
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
if len(config.Host) == 0 ||
len(config.Port) == 0 ||
len(config.Database) == 0 ||
len(config.Organization) == 0 ||
len(config.Password) == 0 {
return errors.New("Not all configuration variables set required by InfluxSink")
}
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 ||
len(s.config.Organization) == 0 ||
len(s.config.Password) == 0 {
return errors.New("not all configuration variables set required by InfluxSink")
}
// Connect to InfluxDB server
s.host = config.Host
s.port = config.Port
s.database = config.Database
s.organization = config.Organization
s.user = config.User
s.password = config.Password
s.ssl = config.SSL
s.meta_as_tags = config.MetaAsTags
return s.connect()
}
func (s *InfluxSink) Write(m lp.CCMetric) error {
err :=
s.writeApi.WritePoint(
context.Background(),
m.ToPoint(s.config.MetaAsTags),
)
func (s *InfluxSink) Write(point lp.CCMetric) error {
tags := map[string]string{}
fields := map[string]interface{}{}
for _, t := range point.TagList() {
tags[t.Key] = t.Value
}
if s.meta_as_tags {
for _, m := range point.MetaList() {
tags[m.Key] = m.Value
}
}
for _, f := range point.FieldList() {
fields[f.Key] = f.Value
}
p := influxdb2.NewPoint(point.Name(), tags, fields, point.Time())
err := s.writeApi.WritePoint(context.Background(), p)
return err
}
@@ -91,6 +82,6 @@ func (s *InfluxSink) Flush() error {
}
func (s *InfluxSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
log.Print("Closing InfluxDB connection")
s.client.Close()
}

View File

@@ -1,32 +0,0 @@
## `influxdb` sink
The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
### Configuration structure
```json
{
"<name>": {
"type": "influxdb",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw",
"organization": "myorg",
"ssl": true,
}
}
```
- `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server
- `user`: Username for basic authentification
- `password`: Password for basic authentification
- `organization`: Organization in the InfluxDB
- `ssl`: Use SSL connection

View File

@@ -1,309 +0,0 @@
package sinks
/*
#cgo CFLAGS: -DGM_PROTOCOL_GUARD
#cgo LDFLAGS: -L. -Wl,--unresolved-symbols=ignore-in-object-files
#include <stdlib.h>
// This is a copy&paste snippet of ganglia.h (BSD-3 license)
// See https://github.com/ganglia/monitor-core
// for further information
enum ganglia_slope {
GANGLIA_SLOPE_ZERO = 0,
GANGLIA_SLOPE_POSITIVE,
GANGLIA_SLOPE_NEGATIVE,
GANGLIA_SLOPE_BOTH,
GANGLIA_SLOPE_UNSPECIFIED,
GANGLIA_SLOPE_DERIVATIVE,
GANGLIA_SLOPE_LAST_LEGAL_VALUE=GANGLIA_SLOPE_DERIVATIVE
};
typedef enum ganglia_slope ganglia_slope_t;
typedef struct Ganglia_pool* Ganglia_pool;
typedef struct Ganglia_gmond_config* Ganglia_gmond_config;
typedef struct Ganglia_udp_send_channels* Ganglia_udp_send_channels;
struct Ganglia_metric {
Ganglia_pool pool;
struct Ganglia_metadata_message *msg;
char *value;
void *extra;
};
typedef struct Ganglia_metric * Ganglia_metric;
#ifdef __cplusplus
extern "C" {
#endif
Ganglia_gmond_config Ganglia_gmond_config_create(char *path, int fallback_to_default);
//void Ganglia_gmond_config_destroy(Ganglia_gmond_config config);
Ganglia_udp_send_channels Ganglia_udp_send_channels_create(Ganglia_pool p, Ganglia_gmond_config config);
void Ganglia_udp_send_channels_destroy(Ganglia_udp_send_channels channels);
int Ganglia_udp_send_message(Ganglia_udp_send_channels channels, char *buf, int len );
Ganglia_metric Ganglia_metric_create( Ganglia_pool parent_pool );
int Ganglia_metric_set( Ganglia_metric gmetric, char *name, char *value, char *type, char *units, unsigned int slope, unsigned int tmax, unsigned int dmax);
int Ganglia_metric_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
//int Ganglia_metadata_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
//int Ganglia_metadata_send_real( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels, char *override_string );
void Ganglia_metadata_add( Ganglia_metric gmetric, char *name, char *value );
//int Ganglia_value_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
void Ganglia_metric_destroy( Ganglia_metric gmetric );
Ganglia_pool Ganglia_pool_create( Ganglia_pool parent );
void Ganglia_pool_destroy( Ganglia_pool pool );
//ganglia_slope_t cstr_to_slope(const char* str);
//const char* slope_to_cstr(unsigned int slope);
#ifdef __cplusplus
}
#endif
*/
import "C"
import (
"encoding/json"
"errors"
"fmt"
"unsafe"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/dl"
)
const (
GANGLIA_LIB_NAME = "libganglia.so"
GANGLIA_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
GMOND_CONFIG_FILE = `/etc/ganglia/gmond.conf`
)
type LibgangliaSinkSpecialMetric struct {
MetricName string `json:"metric_name,omitempty"`
NewName string `json:"new_name,omitempty"`
Slope string `json:"slope,omitempty"`
}
type LibgangliaSinkConfig struct {
defaultSinkConfig
GangliaLib string `json:"libganglia_path,omitempty"`
GmondConfig string `json:"gmond_config,omitempty"`
AddGangliaGroup bool `json:"add_ganglia_group,omitempty"`
AddTypeToName bool `json:"add_type_to_name,omitempty"`
AddUnits bool `json:"add_units,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
SpecialMetrics map[string]LibgangliaSinkSpecialMetric `json:"rename_metrics,omitempty"` // Map to rename metric name from key to value
//AddTagsAsDesc bool `json:"add_tags_as_desc,omitempty"`
}
type LibgangliaSink struct {
sink
config LibgangliaSinkConfig
global_context C.Ganglia_pool
gmond_config C.Ganglia_gmond_config
send_channels C.Ganglia_udp_send_channels
cstrCache map[string]*C.char
}
func (s *LibgangliaSink) Init(config json.RawMessage) error {
var err error = nil
s.name = "LibgangliaSink"
//s.config.AddTagsAsDesc = false
s.config.AddGangliaGroup = false
s.config.AddTypeToName = false
s.config.AddUnits = true
s.config.GmondConfig = string(GMOND_CONFIG_FILE)
s.config.GangliaLib = string(GANGLIA_LIB_NAME)
if len(config) > 0 {
err = json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
return err
}
}
lib := dl.New(s.config.GangliaLib, GANGLIA_LIB_DL_FLAGS)
if lib == nil {
return fmt.Errorf("error instantiating DynamicLibrary for %s", s.config.GangliaLib)
}
err = lib.Open()
if err != nil {
return fmt.Errorf("error opening %s: %v", s.config.GangliaLib, err)
}
// Set up cache for the C strings
s.cstrCache = make(map[string]*C.char)
// s.cstrCache["globals"] = C.CString("globals")
// s.cstrCache["override_hostname"] = C.CString("override_hostname")
// s.cstrCache["override_ip"] = C.CString("override_ip")
// Add some constant strings
s.cstrCache["GROUP"] = C.CString("GROUP")
s.cstrCache["CLUSTER"] = C.CString("CLUSTER")
s.cstrCache[""] = C.CString("")
// Add cluster name for lookup in Write()
if len(s.config.ClusterName) > 0 {
s.cstrCache[s.config.ClusterName] = C.CString(s.config.ClusterName)
}
// Add supported types for later lookup in Write()
s.cstrCache["double"] = C.CString("double")
s.cstrCache["int32"] = C.CString("int32")
s.cstrCache["string"] = C.CString("string")
// Create Ganglia pool
s.global_context = C.Ganglia_pool_create(nil)
// Load Ganglia configuration
s.cstrCache[s.config.GmondConfig] = C.CString(s.config.GmondConfig)
s.gmond_config = C.Ganglia_gmond_config_create(s.cstrCache[s.config.GmondConfig], 0)
//globals := C.cfg_getsec(gmond_config, s.cstrCache["globals"])
//override_hostname := C.cfg_getstr(globals, s.cstrCache["override_hostname"])
//override_ip := C.cfg_getstr(globals, s.cstrCache["override_ip"])
s.send_channels = C.Ganglia_udp_send_channels_create(s.global_context, s.gmond_config)
return nil
}
func (s *LibgangliaSink) Write(point lp.CCMetric) error {
var err error = nil
var c_name *C.char
var c_value *C.char
var c_type *C.char
var c_unit *C.char
// helper function for looking up C strings in the cache
lookup := func(key string) *C.char {
if _, exist := s.cstrCache[key]; !exist {
s.cstrCache[key] = C.CString(key)
}
return s.cstrCache[key]
}
// Get metric name
metricname := GangliaMetricRename(point)
if s.config.AddTypeToName {
c_name = lookup(GangliaMetricName(point))
} else {
c_name = lookup(metricname)
}
// Get the value C string and lookup the type string in the cache
value, ok := point.GetField("value")
if !ok {
return fmt.Errorf("metric %s has no 'value' field", metricname)
}
switch real := value.(type) {
case float64:
c_value = C.CString(fmt.Sprintf("%f", real))
c_type = lookup("double")
case float32:
c_value = C.CString(fmt.Sprintf("%f", real))
c_type = lookup("float")
case int64:
c_value = C.CString(fmt.Sprintf("%d", real))
c_type = lookup("int32")
case int32:
c_value = C.CString(fmt.Sprintf("%d", real))
c_type = lookup("int32")
case int:
c_value = C.CString(fmt.Sprintf("%d", real))
c_type = lookup("int32")
case string:
c_value = C.CString(real)
c_type = lookup("string")
default:
return fmt.Errorf("metric %s has invalid 'value' type for %s", point.Name(), s.name)
}
// Add unit
if s.config.AddUnits {
if tagunit, tagok := point.GetTag("unit"); tagok {
c_unit = lookup(tagunit)
} else if metaunit, metaok := point.GetMeta("unit"); metaok {
c_unit = lookup(metaunit)
} else {
c_unit = lookup("")
}
} else {
c_unit = lookup("")
}
// Determine the slope of the metric. Ganglia's own collector mostly use
// 'both' but the mem and swap total uses 'zero'.
slope := GangliaSlopeType(point)
slope_type := C.GANGLIA_SLOPE_BOTH
switch slope {
case 0:
slope_type = C.GANGLIA_SLOPE_ZERO
}
// Create a new Ganglia metric
gmetric := C.Ganglia_metric_create(s.global_context)
// Set name, value, type and unit in the Ganglia metric
// Since we don't have this information from the collectors,
// we assume that the metric value can go up and down (slope),
// and there is no maximum for 'dmax' and 'tmax'.
// Ganglia's collectors set 'tmax' but not 'dmax'
rval := C.int(0)
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), 0, 0)
switch rval {
case 1:
C.free(unsafe.Pointer(c_value))
return errors.New("invalid parameters")
case 2:
C.free(unsafe.Pointer(c_value))
return errors.New("one of your parameters has an invalid character '\"'")
case 3:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the type parameter \"%s\" is not a valid type", C.GoString(c_type))
case 4:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the value parameter \"%s\" does not represent a number", C.GoString(c_value))
default:
}
// Set the cluster name, otherwise it takes it from the configuration file
if len(s.config.ClusterName) > 0 {
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
}
// Set the group metadata in the Ganglia metric if configured
if group, ok := point.GetMeta("group"); ok && s.config.AddGangliaGroup {
c_group := lookup(group)
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
}
// Now we send the metric
// gmetric does provide some more options like description and other options
// but they are not provided by the collectors
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
if rval != 0 {
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
// fall throuph to use Ganglia_metric_destroy from common cleanup
}
// Cleanup Ganglia metric
C.Ganglia_metric_destroy(gmetric)
// Free the value C string, the only one not stored in the cache
C.free(unsafe.Pointer(c_value))
return err
}
func (s *LibgangliaSink) Flush() error {
return nil
}
func (s *LibgangliaSink) Close() {
// Destroy Ganglia configration struct
// (not done by gmetric, I thought I am more clever but no...)
//C.Ganglia_gmond_config_destroy(s.gmond_config)
// Destroy Ganglia pool
C.Ganglia_pool_destroy(s.global_context)
// Cleanup C string cache
for _, cstr := range s.cstrCache {
C.free(unsafe.Pointer(cstr))
}
}

View File

@@ -1,41 +0,0 @@
## `libganglia` sink
The `libganglia` sink interacts directly with the library of the [Ganglia Monitoring System](http://ganglia.info/) to submit the metrics. Consequently, it needs to be installed on all nodes. But this is commonly the case if you want to use Ganglia, because it requires at least a node daemon (`gmond` or `ganglia-monitor`) to work.
The `libganglia` sink has probably less overhead compared to the `ganglia` sink because it does not require any process generation but initializes the environment and UDP connections only once.
### Configuration structure
```json
{
"<name>": {
"type": "libganglia",
"gmetric_config" : "/path/to/gmetric/config",
"cluster_name": "MyCluster",
"add_ganglia_group" : true,
"add_type_to_name": true,
"add_units" : true
}
}
```
- `type`: makes the sink an `libganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmond_config`: Path to the Ganglia configuration file `gmond.conf` (default: `/etc/ganglia/gmond.conf`)
- `cluster_name`: Set a cluster name for the metric. If not set, it is taken from `gmond_config`
- `add_ganglia_group`: Add a Ganglia metric group based on meta information. Some old versions of `gmetric` do not support the `--group` option
- `add_type_to_name`: Ganglia commonly uses only node-level metrics but with cc-metric-collector, there are metrics for cpus, memory domains, CPU sockets and the whole node. In order to get eeng, this option prefixes the metric name with `<type><type-id>_` or `device_` depending on the metric tags and meta information. For metrics of the whole node `type=node`, no prefix is added
- `add_units`: Add metric value unit if there is a `unit` entry in the metric tags or meta information
### Ganglia Installation
My development system is Ubuntu 20.04. To install the required libraries with `apt`:
```
$ sudo apt install libganglia1
```
The `libganglia.so` gets installed in `/usr/lib`. The Ganglia headers `libganglia1-dev` are **not** required.
I added a `Makefile` in the `sinks` subfolder that searches for the library in `/usr` and creates a symlink (`sinks/libganglia.so`) for running/building the cc-metric-collector. So just type `make` before running/building in the main folder or the `sinks` subfolder.

View File

@@ -1,23 +1,36 @@
package sinks
import (
"encoding/json"
// "time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type defaultSinkConfig struct {
MetaAsTags bool `json:"meta_as_tags,omitempty"`
Type string `json:"type"`
type sinkConfig struct {
Type string `json:"type"`
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
MetaAsTags bool `json:"meta_as_tags,omitempty"`
}
type sink struct {
host string
port string
user string
password string
database string
organization string
ssl bool
meta_as_tags bool
name string
}
type Sink interface {
Init(config json.RawMessage) error
Init(config sinkConfig) error
Write(point lp.CCMetric) error
Flush() error
Close()

View File

@@ -2,71 +2,49 @@ package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
"log"
"time"
)
type NatsSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
}
type NatsSink struct {
sink
client *nats.Conn
encoder *influx.Encoder
buffer *bytes.Buffer
config NatsSinkConfig
}
func (s *NatsSink) connect() error {
var err error
var uinfo nats.Option = nil
var nc *nats.Conn
if len(s.config.User) > 0 && len(s.config.Password) > 0 {
uinfo = nats.UserInfo(s.config.User, s.config.Password)
}
uri := fmt.Sprintf("nats://%s:%s", s.config.Host, s.config.Port)
cclog.ComponentDebug(s.name, "Connect to", uri)
uinfo := nats.UserInfo(s.user, s.password)
uri := fmt.Sprintf("nats://%s:%s", s.host, s.port)
log.Print("Using URI ", uri)
s.client = nil
if uinfo != nil {
nc, err = nats.Connect(uri, uinfo)
} else {
nc, err = nats.Connect(uri)
}
nc, err := nats.Connect(uri, uinfo)
if err != nil {
cclog.ComponentError(s.name, "Connect to", uri, "failed:", err.Error())
log.Fatal(err)
return err
}
s.client = nc
return nil
}
func (s *NatsSink) Init(config json.RawMessage) error {
func (s *NatsSink) Init(config sinkConfig) error {
s.name = "NatsSink"
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return err
}
}
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 {
return errors.New("not all configuration variables set required by NatsSink")
if len(config.Host) == 0 ||
len(config.Port) == 0 ||
len(config.Database) == 0 {
return errors.New("Not all configuration variables set required by NatsSink")
}
s.host = config.Host
s.port = config.Port
s.database = config.Database
s.organization = config.Organization
s.user = config.User
s.password = config.Password
// Setup Influx line protocol
s.buffer = &bytes.Buffer{}
s.buffer.Grow(1025)
@@ -77,11 +55,11 @@ func (s *NatsSink) Init(config json.RawMessage) error {
return s.connect()
}
func (s *NatsSink) Write(m lp.CCMetric) error {
func (s *NatsSink) Write(point lp.CCMetric) error {
if s.client != nil {
_, err := s.encoder.Encode(m.ToPoint(s.config.MetaAsTags))
_, err := s.encoder.Encode(point)
if err != nil {
cclog.ComponentError(s.name, "Write:", err.Error())
log.Print(err)
return err
}
}
@@ -90,8 +68,7 @@ func (s *NatsSink) Write(m lp.CCMetric) error {
func (s *NatsSink) Flush() error {
if s.client != nil {
if err := s.client.Publish(s.config.Database, s.buffer.Bytes()); err != nil {
cclog.ComponentError(s.name, "Flush:", err.Error())
if err := s.client.Publish(s.database, s.buffer.Bytes()); err != nil {
return err
}
s.buffer.Reset()
@@ -100,8 +77,8 @@ func (s *NatsSink) Flush() error {
}
func (s *NatsSink) Close() {
log.Print("Closing Nats connection")
if s.client != nil {
cclog.ComponentDebug(s.name, "Close")
s.client.Close()
}
}

View File

@@ -1,28 +0,0 @@
## `nats` sink
The `nats` sink publishes all metrics into a NATS network. The publishing key is the database name provided in the configuration file
### Configuration structure
```json
{
"<name>": {
"type": "nats",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw"
}
}
```
- `type`: makes the sink an `nats` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are published with this subject
- `host`: Hostname of the NATS server
- `port`: Portnumber (as string) of the NATS server
- `user`: Username for basic authentification
- `password`: Password for basic authentification

View File

@@ -2,7 +2,6 @@ package sinks
import (
"encoding/json"
"fmt"
"os"
"sync"
@@ -10,90 +9,76 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const SINK_MAX_FORWARD = 50
// Map of all available sinks
var AvailableSinks = map[string]Sink{
"influxdb": new(InfluxSink),
"stdout": new(StdoutSink),
"nats": new(NatsSink),
"http": new(HttpSink),
"ganglia": new(GangliaSink),
"influxasync": new(InfluxAsyncSink),
"libganglia": new(LibgangliaSink),
"influxdb": new(InfluxSink),
"stdout": new(StdoutSink),
"nats": new(NatsSink),
"http": new(HttpSink),
"ganglia": new(GangliaSink),
}
// Metric collector manager data structure
type sinkManager struct {
input chan lp.CCMetric // input channel
done chan bool // channel to finish / stop metric sink manager
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
sinks map[string]Sink // Mapping sink name to sink
maxForward int // number of metrics to write maximally in one iteration
input chan lp.CCMetric // input channel
outputs []Sink // List of sinks to use
done chan bool // channel to finish / stop metric sink manager
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
config []sinkConfig // json encoded config for sink manager
}
// Sink manager access functions
type SinkManager interface {
Init(wg *sync.WaitGroup, sinkConfigFile string) error
AddInput(input chan lp.CCMetric)
AddOutput(name string, config json.RawMessage) error
AddOutput(config json.RawMessage) error
Start()
Close()
}
// Init initializes the sink manager by:
// * Reading its configuration file
// * Adding the configured sinks and providing them with the corresponding config
func (sm *sinkManager) Init(wg *sync.WaitGroup, sinkConfigFile string) error {
sm.input = nil
sm.outputs = make([]Sink, 0)
sm.done = make(chan bool)
sm.wg = wg
sm.sinks = make(map[string]Sink, 0)
sm.maxForward = SINK_MAX_FORWARD
if len(sinkConfigFile) == 0 {
return nil
}
sm.config = make([]sinkConfig, 0)
// Read sink config file
configFile, err := os.Open(sinkConfigFile)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
return err
}
defer configFile.Close()
// Parse config
jsonParser := json.NewDecoder(configFile)
var rawConfigs map[string]json.RawMessage
err = jsonParser.Decode(&rawConfigs)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
return err
}
// Start sinks
for name, raw := range rawConfigs {
err = sm.AddOutput(name, raw)
if len(sinkConfigFile) > 0 {
configFile, err := os.Open(sinkConfigFile)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
continue
return err
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
var rawConfigs []json.RawMessage
err = jsonParser.Decode(&rawConfigs)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
return err
}
for _, raw := range rawConfigs {
err = sm.AddOutput(raw)
if err != nil {
continue
}
}
}
return nil
}
// Start starts the sink managers background task, which
// distributes received metrics to the sinks
func (sm *sinkManager) Start() {
batchcount := 20
sm.wg.Add(1)
go func() {
defer sm.wg.Done()
// Sink manager is done
done := func() {
for _, s := range sm.sinks {
for _, s := range sm.outputs {
s.Flush()
s.Close()
}
@@ -101,16 +86,6 @@ func (sm *sinkManager) Start() {
cclog.ComponentDebug("SinkManager", "DONE")
}
toTheSinks := func(p lp.CCMetric) {
// Send received metric to all outputs
cclog.ComponentDebug("SinkManager", "WRITE", p)
for _, s := range sm.sinks {
if err := s.Write(p); err != nil {
cclog.ComponentError("SinkManager", "WRITE", s.Name(), "write failed:", err.Error())
}
}
}
for {
select {
case <-sm.done:
@@ -118,11 +93,21 @@ func (sm *sinkManager) Start() {
return
case p := <-sm.input:
toTheSinks(p)
for i := 0; len(sm.input) > 0 && i < sm.maxForward; i++ {
p := <-sm.input
toTheSinks(p)
// Send received metric to all outputs
cclog.ComponentDebug("SinkManager", "WRITE", p)
for _, s := range sm.outputs {
s.Write(p)
}
// Flush all outputs
if batchcount == 0 {
cclog.ComponentDebug("SinkManager", "FLUSH")
for _, s := range sm.outputs {
s.Flush()
}
batchcount = 20
}
batchcount--
}
}
}()
@@ -136,27 +121,29 @@ func (sm *sinkManager) AddInput(input chan lp.CCMetric) {
sm.input = input
}
func (sm *sinkManager) AddOutput(name string, rawConfig json.RawMessage) error {
func (sm *sinkManager) AddOutput(rawConfig json.RawMessage) error {
var err error
var sinkConfig defaultSinkConfig
if len(rawConfig) > 0 {
err := json.Unmarshal(rawConfig, &sinkConfig)
var config sinkConfig
if len(rawConfig) > 3 {
err = json.Unmarshal(rawConfig, &config)
if err != nil {
cclog.ComponentError("SinkManager", "SKIP", config.Type, "JSON config error:", err.Error())
return err
}
}
if _, found := AvailableSinks[sinkConfig.Type]; !found {
cclog.ComponentError("SinkManager", "SKIP", name, "unknown sink:", sinkConfig.Type)
if _, found := AvailableSinks[config.Type]; !found {
cclog.ComponentError("SinkManager", "SKIP", config.Type, "unknown sink:", err.Error())
return err
}
s := AvailableSinks[sinkConfig.Type]
err = s.Init(rawConfig)
s := AvailableSinks[config.Type]
err = s.Init(config)
if err != nil {
cclog.ComponentError("SinkManager", "SKIP", s.Name(), "initialization failed:", err.Error())
return err
}
sm.sinks[name] = s
cclog.ComponentDebug("SinkManager", "ADD SINK", s.Name(), "with name", fmt.Sprintf("'%s'", name))
sm.outputs = append(sm.outputs, s)
sm.config = append(sm.config, config)
cclog.ComponentDebug("SinkManager", "ADD SINK", s.Name())
return nil
}
@@ -170,7 +157,7 @@ func (sm *sinkManager) Close() {
// New creates a new initialized sink manager
func New(wg *sync.WaitGroup, sinkConfigFile string) (SinkManager, error) {
sm := new(sinkManager)
sm := &sinkManager{}
err := sm.Init(wg, sinkConfigFile)
if err != nil {
return nil, err

256
sinks/sqliteSink.go Normal file
View File

@@ -0,0 +1,256 @@
package sinks
import (
"database/sql"
"errors"
"fmt"
"log"
"sort"
"strings"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
_ "github.com/mattn/go-sqlite3"
)
const SQLITE3_TIMESTAMP_NAME = `timestamp`
const SQLITE3_TIMESTAMP_TYPE = `TIMESTAMP NOT NULL`
type SqliteTable struct {
columns []string
coltypes []string
createQuery string
insertQuery string
primkeys []string
}
type SqliteSink struct {
sink
db *sql.DB
tables map[string]SqliteTable
}
type StrList []string
func (list StrList) Len() int { return len(list) }
func (list StrList) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
func (list StrList) Less(i, j int) bool {
var si string = list[i]
var sj string = list[j]
var si_lower = strings.ToLower(si)
var sj_lower = strings.ToLower(sj)
if si_lower == sj_lower {
return si < sj
}
return si_lower < sj_lower
}
func (s *SqliteSink) Init(config sinkConfig) error {
var err error
if len(config.Database) == 0 {
return errors.New("not all configuration variables set required by SqliteSink")
}
s.host = config.Host
s.port = config.Port
s.database = config.Database
s.organization = config.Organization
s.user = config.User
s.password = config.Password
log.Print("Opening Sqlite3 database ", s.database)
uri := fmt.Sprintf("file:./%s.db", s.database)
if len(s.user) > 0 && len(s.password) > 0 {
uri += fmt.Sprintf("?_auth&_auth_user=%s&_auth_pass=%s", s.user, s.password)
}
s.db, err = sql.Open("sqlite3", uri)
if err != nil {
log.Fatal(err)
s.db = nil
return err
}
s.tables = make(map[string]SqliteTable)
return nil
}
func getkeylist(point lp.CCMetric, include_meta bool) []string {
keys := make([]string, 0)
for k := range point.Tags() {
keys = append(keys, k)
}
if include_meta {
for k := range point.Meta() {
keys = append(keys, k)
}
}
for k := range point.Fields() {
keys = append(keys, k)
}
keys = append(keys, SQLITE3_TIMESTAMP_NAME)
sort.Sort(StrList(keys))
return keys
}
func getvaluelist(point lp.CCMetric, keys []string) []string {
values := make([]string, 0)
for _, key := range keys {
if key == SQLITE3_TIMESTAMP_NAME {
values = append(values, point.Time().String())
} else if val, ok := point.GetTag(key); ok {
values = append(values, val)
} else if val, ok := point.GetMeta(key); ok {
values = append(values, val)
} else if ival, ok := point.GetField(key); ok {
values = append(values, fmt.Sprintf("%v", ival))
} else {
values = append(values, "NULL")
}
}
return values
}
func gettypelist(point lp.CCMetric, keys []string) []string {
types := make([]string, 0)
for _, key := range keys {
if key == SQLITE3_TIMESTAMP_NAME {
types = append(types, SQLITE3_TIMESTAMP_TYPE)
continue
}
if point.HasTag(key) {
types = append(types, "TEXT")
continue
}
if point.HasMeta(key) {
types = append(types, "TEXT")
continue
}
ival, ok := point.GetField(key)
if ok {
switch ival.(type) {
case float64:
types = append(types, "DOUBLE")
case float32:
types = append(types, "FLOAT")
case string:
types = append(types, "TEXT")
case int:
types = append(types, "INT")
case int64:
types = append(types, "INT8")
}
}
}
return types
}
func getprimkey(keys []string) []string {
primkeys := make([]string, 0)
primkeys = append(primkeys, SQLITE3_TIMESTAMP_NAME)
for _, key := range keys {
switch key {
case "hostname":
primkeys = append(primkeys, "hostname")
case "type":
primkeys = append(primkeys, "type")
case "type-id":
primkeys = append(primkeys, "type-id")
}
}
return primkeys
}
func newCreateQuery(tablename string, keys []string, types []string, primkeys []string) string {
keytypelist := make([]string, 0)
for i, key := range keys {
keytypelist = append(keytypelist, fmt.Sprintf("%s %s", key, types[i]))
}
keytypelist = append(keytypelist, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(primkeys, ",")))
stmt := fmt.Sprintf("create table if not exists %s (%s);", tablename, keytypelist)
return stmt
}
func newInsertQuery(tablename string, keys []string) string {
v := strings.Repeat("?,", len(keys)) + "?"
stmt := fmt.Sprintf("insert into %s (%s) values(%s);", tablename, strings.Join(keys, ","), v)
return stmt
}
func (s *SqliteSink) Write(point lp.CCMetric) error {
if s.db != nil {
measurement := point.Name()
if tab, ok := s.tables[measurement]; !ok {
var tab SqliteTable
tab.columns = getkeylist(point, s.meta_as_tags)
tab.coltypes = gettypelist(point, tab.columns)
tab.primkeys = getprimkey(tab.columns)
tab.createQuery = newCreateQuery(measurement, tab.columns, tab.coltypes, tab.primkeys)
tab.insertQuery = newInsertQuery(measurement, tab.columns)
tx, err := s.db.Begin()
if err != nil {
cclog.ComponentError("SqliteSink", "Init DB session failed:", err.Error())
return err
}
_, err = tx.Exec(tab.createQuery)
if err != nil {
cclog.ComponentError("SqliteSink", "Execute CreateQuery failed:", err.Error())
return err
}
stmt, err := tx.Prepare(tab.insertQuery)
if err != nil {
cclog.ComponentError("SqliteSink", "Prepare InsertQuery failed:", err.Error())
return err
}
defer stmt.Close()
_, err = stmt.Exec(getvaluelist(point, tab.columns))
if err != nil {
cclog.ComponentError("SqliteSink", "Execute InsertQuery failed:", err.Error())
return err
}
tx.Commit()
s.tables[measurement] = tab
} else {
keys := getkeylist(point, s.meta_as_tags)
if len(keys) > len(tab.columns) {
cclog.ComponentDebug("SqliteSink", "Metric", measurement, "has different keys as creation keys, ignoring addition keys")
} else if len(keys) < len(tab.columns) {
cclog.ComponentDebug("SqliteSink", "Metric", measurement, "has different keys as creation keys, setting missing values with 'NULL'")
}
values := getvaluelist(point, tab.columns)
tx, err := s.db.Begin()
if err != nil {
cclog.ComponentError("SqliteSink", "Init DB session failed:", err.Error())
return err
}
stmt, err := tx.Prepare(tab.insertQuery)
if err != nil {
cclog.ComponentError("SqliteSink", "Prepare InsertQuery failed:", err.Error())
return err
}
defer stmt.Close()
_, err = stmt.Exec(values)
if err != nil {
cclog.ComponentError("SqliteSink", "Execute InsertQuery failed:", err.Error())
return err
}
tx.Commit()
}
}
return nil
}
func (s *SqliteSink) Flush() error {
return nil
}
func (s *SqliteSink) Close() {
log.Print("Closing Sqlite3 database ", s.database)
if s.db != nil {
s.db.Close()
}
}

View File

@@ -1,9 +1,8 @@
package sinks
import (
"encoding/json"
"fmt"
"os"
"math"
"strings"
// "time"
@@ -11,57 +10,60 @@ import (
)
type StdoutSink struct {
sink // meta_as_tags, name
output *os.File
config struct {
defaultSinkConfig
Output string `json:"output_file,omitempty"`
}
sink
}
func (s *StdoutSink) Init(config json.RawMessage) error {
func (s *StdoutSink) Init(config sinkConfig) error {
s.name = "StdoutSink"
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
}
s.output = os.Stdout
if len(s.config.Output) > 0 {
switch strings.ToLower(s.config.Output) {
case "stdout":
s.output = os.Stdout
case "stderr":
s.output = os.Stderr
default:
f, err := os.OpenFile(s.config.Output, os.O_CREATE|os.O_WRONLY, os.FileMode(0600))
if err != nil {
return err
}
s.output = f
}
}
s.meta_as_tags = s.config.MetaAsTags
s.meta_as_tags = config.MetaAsTags
return nil
}
func (s *StdoutSink) Write(m lp.CCMetric) error {
fmt.Fprint(
s.output,
m.ToLineProtocol(s.meta_as_tags),
)
func (s *StdoutSink) Write(point lp.CCMetric) error {
var tagsstr []string
var fieldstr []string
for _, t := range point.TagList() {
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", t.Key, t.Value))
}
if s.meta_as_tags {
for _, m := range point.MetaList() {
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", m.Key, m.Value))
}
}
for _, f := range point.FieldList() {
switch f.Value.(type) {
case float64:
if !math.IsNaN(f.Value.(float64)) {
fieldstr = append(fieldstr, fmt.Sprintf("%s=%v", f.Key, f.Value.(float64)))
} else {
fieldstr = append(fieldstr, fmt.Sprintf("%s=0.0", f.Key))
}
case float32:
if !math.IsNaN(float64(f.Value.(float32))) {
fieldstr = append(fieldstr, fmt.Sprintf("%s=%v", f.Key, f.Value.(float32)))
} else {
fieldstr = append(fieldstr, fmt.Sprintf("%s=0.0", f.Key))
}
case int:
fieldstr = append(fieldstr, fmt.Sprintf("%s=%d", f.Key, f.Value.(int)))
case int64:
fieldstr = append(fieldstr, fmt.Sprintf("%s=%d", f.Key, f.Value.(int64)))
case string:
fieldstr = append(fieldstr, fmt.Sprintf("%s=%q", f.Key, f.Value.(string)))
default:
fieldstr = append(fieldstr, fmt.Sprintf("%s=%v", f.Key, f.Value))
}
}
if len(tagsstr) > 0 {
fmt.Printf("%s,%s %s %d\n", point.Name(), strings.Join(tagsstr, ","), strings.Join(fieldstr, ","), point.Time().Unix())
} else {
fmt.Printf("%s %s %d\n", point.Name(), strings.Join(fieldstr, ","), point.Time().Unix())
}
return nil
}
func (s *StdoutSink) Flush() error {
s.output.Sync()
return nil
}
func (s *StdoutSink) Close() {
if s.output != os.Stdout && s.output != os.Stderr {
s.output.Close()
}
}
func (s *StdoutSink) Close() {}

View File

@@ -1,22 +0,0 @@
## `stdout` sink
The `stdout` sink is the most simple sink provided by cc-metric-collector. It writes all metrics in InfluxDB line-procol format to the configurable output file or the common special files `stdout` and `stderr`.
### Configuration structure
```json
{
"<name>": {
"type": "stdout",
"meta_as_tags" : true,
"output_file" : "mylogfile.log"
}
}
```
- `type`: makes the sink an `stdout` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `output_file`: Write all data to the selected file (optional). There are two 'special' files: `stdout` and `stderr`. If this option is not provided, the default value is `stdout`