Compare commits

..

7 Commits

Author SHA1 Message Date
Thomas Roehl
0ca87ea0be Fix SqliteSink 2022-01-31 13:24:39 +01:00
Thomas Roehl
4195786242 Add all CCMetric functions to interface 2022-01-31 06:04:30 +01:00
Thomas Roehl
328d26bf3c Merge branch 'develop' into sqlite3_sink 2022-01-31 05:59:25 +01:00
Thomas Roehl
2b07798af2 Fix Write() arguments 2021-11-26 19:21:18 +01:00
Thomas Roehl
aa842a8a9c Add Flush method 2021-11-26 19:13:48 +01:00
Thomas Roehl
06ab58dc92 Merge branch 'main' into sqlite3_sink 2021-11-25 18:23:04 +01:00
Thomas Roehl
40855b1164 Sqlite3 sink 2021-05-18 15:53:20 +02:00
103 changed files with 2722 additions and 7882 deletions

View File

@@ -1 +1 @@
{}
[]

View File

@@ -1,6 +1,6 @@
{
"testoutput" : {
[
{
"type" : "stdout",
"meta_as_tags" : true
}
}
]

View File

@@ -1,184 +0,0 @@
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
# Workflow name
name: Release
# Run on tag push
on:
push:
tags:
- '**'
jobs:
#
# Build on AlmaLinux 8.5 using go-toolset
#
AlmaLinux-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:8.5
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
outputs:
rpm : ${{steps.rpmrename.outputs.RPM}}
srpm : ${{steps.rpmrename.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes group install "Development Tools" "RPM Development Tools"
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use dnf to install build dependencies
- name: Install build dependencies
run: dnf --assumeyes builddep scripts/cc-metric-collector.spec
- name: RPM build MetricCollector
id: rpmbuild
run: make RPM
# AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el8' in the RPM file names
# This step replaces the substring 'el8' to 'alma85'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8.5 container contains the
# 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el8/alma85/)
id: rpmrename
run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el8/alma85}"
NEW_SRPM=${OLD_SRPM/el8/alma85}
mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "::set-output name=SRPM::${NEW_SRPM}"
echo "::set-output name=RPM::${NEW_RPM}"
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector RPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.RPM }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector SRPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.SRPM }}
#
# Build on UBI 8 using go-toolset
#
UBI-8-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
# The job outputs link to the outputs of the 'rpmbuild' step
outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}}
srpm : ${{steps.rpmbuild.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use dnf to install build dependencies
- name: Install build dependencies
run: dnf --assumeyes --disableplugin=subscription-manager builddep scripts/cc-metric-collector.spec
- name: RPM build MetricCollector
id: rpmbuild
run: make RPM
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector RPM for UBI 8
path: ${{ steps.rpmbuild.outputs.RPM }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector SRPM for UBI 8
path: ${{ steps.rpmbuild.outputs.SRPM }}
#
# Create release with fresh RPMs
#
Release:
runs-on: ubuntu-latest
# We need the RPMs, so add dependency
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build]
steps:
# See: https://github.com/actions/download-artifact
- name: Download AlmaLinux 8.5 RPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector RPM for AlmaLinux 8.5
- name: Download AlmaLinux 8.5 SRPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector SRPM for AlmaLinux 8.5
- name: Download UBI 8 RPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector RPM for UBI 8
- name: Download UBI 8 SRPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector SRPM for UBI 8
# The download actions do not publish the name of the downloaded file,
# so we re-use the job outputs of the parent jobs. The files are all
# downloaded to the current folder.
# The gh-release action afterwards does not accept file lists but all
# files have to be listed at 'files'. The step creates one output per
# RPM package (2 per distro)
- name: Set RPM variables
id: files
run: |
ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
echo "ALMA_85_RPM::${ALMA_85_RPM}"
echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "UBI_8_RPM::${UBI_8_RPM}"
echo "UBI_8_SRPM::${UBI_8_SRPM}"
echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
# See: https://github.com/softprops/action-gh-release
- name: Release
uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/')
with:
name: cc-metric-collector-${{github.ref_name}}
files: |
${{ steps.files.outputs.ALMA_85_RPM }}
${{ steps.files.outputs.ALMA_85_SRPM }}
${{ steps.files.outputs.UBI_8_RPM }}
${{ steps.files.outputs.UBI_8_SRPM }}

61
.github/workflows/rpmbuild.yml vendored Normal file
View File

@@ -0,0 +1,61 @@
name: Run RPM Build
on: push
jobs:
build-centos8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@master
id: rpm
name: Build RPM package on CentOS8
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM CentOS8
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM CentOS8
path: ${{ steps.rpm.outputs.source_rpm_path }}
build-centos-latest:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@centos_latest
id: rpm
name: Build RPM package on CentOS 'Latest'
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM CentOS 'Latest'
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM CentOS 'Latest'
path: ${{ steps.rpm.outputs.source_rpm_path }}
build-alma-8_5:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: TomTheBear/rpmbuild@alma8.5
id: rpm
name: Build RPM package on AlmaLinux 8.5
with:
spec_file: "./scripts/cc-metric-collector.spec"
- name: Save RPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector RPM AlmaLinux 8.5
path: ${{ steps.rpm.outputs.rpm_dir_path }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v1.0.0
with:
name: cc-metric-collector SRPM AlmaLinux 8.5
path: ${{ steps.rpm.outputs.source_rpm_path }}

View File

@@ -1,68 +1,20 @@
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
# Workflow name
name: Run Test
# Run on event push
on: push
jobs:
#
# Job build-1-17
# Build on latest Ubuntu using golang version 1.17
#
build-1-17:
build:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/checkout@v2
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v2
uses: actions/setup-go@v2.1.5
with:
go-version: '^1.17.7'
# Install libganglia
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
go-version: '^1.17.6'
- name: Build MetricCollector
run: make
- name: Run MetricCollector once
run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Job build-1-16
# Build on latest Ubuntu using golang version 1.16
#
build-1-16:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v2
with:
go-version: '^1.16.7' # The version AlmaLinux 8.5 uses
# Install libganglia
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
- name: Build MetricCollector
run: make
- name: Run MetricCollector once
- name: Run MetricCollector
run: ./cc-metric-collector --once --config .github/ci-config.json

4
.gitmodules vendored
View File

@@ -1,4 +0,0 @@
[submodule ".github/actions/rpmbuild-centos8-golang"]
path = .github/actions/rpmbuild-centos8-golang
url = https://github.com/naveenrajm7/rpmbuild.git
branch = centos8

View File

@@ -1,21 +1,10 @@
APP = cc-metric-collector
GOSRC_APP := cc-metric-collector.go
GOSRC_APP := metric-collector.go
GOSRC_COLLECTORS := $(wildcard collectors/*.go)
GOSRC_SINKS := $(wildcard sinks/*.go)
GOSRC_RECEIVERS := $(wildcard receivers/*.go)
GOSRC_INTERNAL := $(wildcard internal/*/*.go)
GOSRC := $(GOSRC_APP) $(GOSRC_COLLECTORS) $(GOSRC_SINKS) $(GOSRC_RECEIVERS) $(GOSRC_INTERNAL)
COMPONENT_DIRS := collectors \
sinks \
receivers \
internal/metricRouter \
internal/ccMetric \
internal/metricAggregator \
internal/ccLogger \
internal/ccTopology \
internal/multiChanTicker
BINDIR = bin
.PHONY: all
@@ -26,27 +15,9 @@ $(APP): $(GOSRC)
go get
go build -o $(APP) $(GOSRC_APP)
install: $(APP)
@WORKSPACE=$(PREFIX)
@if [ -z "$${WORKSPACE}" ]; then exit 1; fi
@mkdir --parents --verbose $${WORKSPACE}/usr/$(BINDIR)
@install -Dpm 755 $(APP) $${WORKSPACE}/usr/$(BINDIR)/$(APP)
@mkdir --parents --verbose $${WORKSPACE}/etc/cc-metric-collector $${WORKSPACE}/etc/default $${WORKSPACE}/etc/systemd/system $${WORKSPACE}/etc/init.d
@install -Dpm 600 config.json $${WORKSPACE}/etc/cc-metric-collector/cc-metric-collector.json
@sed -i -e s+"\"./"+"\"/etc/cc-metric-collector/"+g $${WORKSPACE}/etc/cc-metric-collector/cc-metric-collector.json
@install -Dpm 600 sinks.json $${WORKSPACE}/etc/cc-metric-collector/sinks.json
@install -Dpm 600 collectors.json $${WORKSPACE}/etc/cc-metric-collector/collectors.json
@install -Dpm 600 router.json $${WORKSPACE}/etc/cc-metric-collector/router.json
@install -Dpm 600 receivers.json $${WORKSPACE}/etc/cc-metric-collector/receivers.json
@install -Dpm 600 scripts/cc-metric-collector.config $${WORKSPACE}/etc/default/cc-metric-collector
@install -Dpm 644 scripts/cc-metric-collector.service $${WORKSPACE}/etc/systemd/system/cc-metric-collector.service
@install -Dpm 644 scripts/cc-metric-collector.init $${WORKSPACE}/etc/init.d/cc-metric-collector
.PHONY: clean
.ONESHELL:
clean:
@for COMP in $(COMPONENT_DIRS); do if [ -e $$COMP/Makefile ]; then make -C $$COMP clean; fi; done
make -C collectors clean
rm -f $(APP)
.PHONY: fmt
@@ -70,58 +41,3 @@ vet:
staticcheck:
go install honnef.co/go/tools/cmd/staticcheck@latest
$$(go env GOPATH)/bin/staticcheck ./...
.ONESHELL:
.PHONY: RPM
RPM: scripts/cc-metric-collector.spec
@WORKSPACE="$${PWD}"
@SPECFILE="$${WORKSPACE}/scripts/cc-metric-collector.spec"
# Setup RPM build tree
@eval $$(rpm --eval "ARCH='%{_arch}' RPMDIR='%{_rpmdir}' SOURCEDIR='%{_sourcedir}' SPECDIR='%{_specdir}' SRPMDIR='%{_srcrpmdir}' BUILDDIR='%{_builddir}'")
@mkdir --parents --verbose "$${RPMDIR}" "$${SOURCEDIR}" "$${SPECDIR}" "$${SRPMDIR}" "$${BUILDDIR}"
# Create source tarball
@COMMITISH="HEAD"
@VERS=$$(git describe --tags $${COMMITISH})
@VERS=$${VERS#v}
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}")
@PREFIX="$${NAME}-$${VERSION}"
@FORMAT="tar.gz"
@SRCFILE="$${SOURCEDIR}/$${PREFIX}.$${FORMAT}"
@git archive --verbose --format "$${FORMAT}" --prefix="$${PREFIX}/" --output="$${SRCFILE}" $${COMMITISH}
# Build RPM and SRPM
@rpmbuild -ba --define="VERS $${VERS}" --rmsource --clean "$${SPECFILE}"
# Report RPMs and SRPMs when in GitHub Workflow
@if [[ "$${GITHUB_ACTIONS}" == true ]]; then
@ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm"
@ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm"
@ echo "RPM: $${RPMFILE}"
@ echo "SRPM: $${SRPMFILE}"
@ echo "::set-output name=SRPM::$${SRPMFILE}"
@ echo "::set-output name=RPM::$${RPMFILE}"
@fi
.PHONY: DEB
DEB: scripts/cc-metric-collector.deb.control $(APP)
@BASEDIR=$${PWD}
@WORKSPACE=$${PWD}/.dpkgbuild
@DEBIANDIR=$${WORKSPACE}/debian
@DEBIANBINDIR=$${WORKSPACE}/DEBIAN
@mkdir --parents --verbose $$WORKSPACE $$DEBIANBINDIR
#@mkdir --parents --verbose $$DEBIANDIR
@CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control"
@COMMITISH="HEAD"
@VERS=$$(git describe --tags --abbrev=0 $${COMMITISH})
@VERS=$${VERS#v}
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@ARCH=$$(uname -m)
@ARCH=$$(echo $$ARCH | sed -e s+'_'+'-'+g)
@PREFIX="$${NAME}-$${VERSION}_$${ARCH}"
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$$WORKSPACE"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
@SIZE="$$(awk -v size="$$SIZE_BYTES" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
#@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANDIR}/control
@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANBINDIR}/control
@make PREFIX=$${WORKSPACE} install
@DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb"
@dpkg-deb -b $${WORKSPACE} "$$DEB_FILE"
@rm -r "$${WORKSPACE}"

View File

@@ -39,15 +39,14 @@ See the component READMEs for their configuration:
```
$ git clone git@github.com:ClusterCockpit/cc-metric-collector.git
$ make (downloads LIKWID, builds it as static library with 'direct' accessmode and copies all required files for the collector)
$ go get (requires at least golang 1.16)
$ make
$ go get (requires at least golang 1.13)
$ go build metric-collector
```
# Running
```
$ ./cc-metric-collector --help
$ ./metric-collector --help
Usage of metric-collector:
-config string
Path to configuration file (default "./config.json")
@@ -55,42 +54,10 @@ Usage of metric-collector:
Path for logfile (default "stderr")
-once
Run all collectors only once
```
# Scenarios
The metric collector was designed with flexibility in mind, so it can be used in many scenarios. Here are a few:
```mermaid
flowchart TD
subgraph a ["Cluster A"]
nodeA[NodeA with CC collector]
nodeB[NodeB with CC collector]
nodeC[NodeC with CC collector]
end
a --> db[(Database)]
db <--> ccweb("Webfrontend")
-pidfile string
Path for PID file (default "/var/run/cc-metric-collector.pid")
```
``` mermaid
flowchart TD
subgraph a [ClusterA]
direction LR
nodeA[NodeA with CC collector]
nodeB[NodeB with CC collector]
nodeC[NodeC with CC collector]
end
subgraph b [ClusterB]
direction LR
nodeD[NodeD with CC collector]
nodeE[NodeE with CC collector]
nodeF[NodeF with CC collector]
end
a --> ccrecv{"CC collector as receiver"}
b --> ccrecv
ccrecv --> db[("Database1")]
ccrecv -.-> db2[("Database2")]
db <-.-> ccweb("Webfrontend")
```
# Contributing
The ClusterCockpit ecosystem is designed to be used by different HPC computing centers. Since configurations and setups differ between the centers, the centers likely have to put some work into the cc-metric-collector to gather all desired metrics.

View File

@@ -1,37 +1,15 @@
{
"cpufreq": {},
"cpufreq_cpuinfo": {},
"gpfs": {
"exclude_filesystem": [
"test_fs"
]
},
"ibstat": {},
"loadavg": {
"exclude_metrics": [
"proc_total"
]
},
"netstat": {
"include_devices": [
"enp5s0"
],
"send_derived_values": true
},
"numastats": {},
"nvidia": {},
"tempstat": {
"report_max_temperature": true,
"report_critical_temperature": true,
"tag_override": {
"hwmon0": {
"type": "socket",
"type-id": "0"
"hwmon0" : {
"type" : "socket",
"type-id" : "0"
},
"hwmon1": {
"type": "socket",
"type-id": "1"
"hwmon1" : {
"type" : "socket",
"type-id" : "1"
}
}
}
}

View File

@@ -1,25 +1,82 @@
# Use central installation
CENTRAL_INSTALL = false
# How to access hardware performance counters through LIKWID.
# Recommended is 'direct' mode
ACCESSMODE = direct
all: likwid
# LIKWID version
#######################################################################
# if CENTRAL_INSTALL == true
#######################################################################
# Path to central installation (if CENTRAL_INSTALL=true)
LIKWID_BASE=/apps/likwid/5.2.1
# LIKWID version (should be same major version as central installation, 5.2.x)
LIKWID_VERSION = 5.2.1
.ONESHELL:
.PHONY: likwid
likwid:
INSTALL_FOLDER="$${PWD}/likwid"
BUILD_FOLDER="$${PWD}/likwidbuild"
if [ -d $${INSTALL_FOLDER} ]; then rm -r $${INSTALL_FOLDER}; fi
mkdir --parents --verbose $${INSTALL_FOLDER} $${BUILD_FOLDER}
wget -P "$${BUILD_FOLDER}" ftp://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz
tar -C $${BUILD_FOLDER} -xf $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION).tar.gz
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/likwid*.h $${INSTALL_FOLDER}/
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/bstrlib.h $${INSTALL_FOLDER}/
rm -r $${BUILD_FOLDER}
#######################################################################
# if CENTRAL_INSTALL == false and ACCESSMODE == accessdaemon
#######################################################################
# Where to install the accessdaemon
DAEMON_INSTALLDIR = /usr/local
# Which user to use for the accessdaemon
DAEMON_USER = root
# Which group to use for the accessdaemon
DAEMON_GROUP = root
clean:
#################################################
# No need to change anything below this line
#################################################
INSTALL_FOLDER = ./likwid
BUILD_FOLDER = ./likwid/build
ifneq ($(strip $(CENTRAL_INSTALL)),true)
LIKWID_BASE := $(shell pwd)/$(INSTALL_FOLDER)
DAEMON_BASE := $(LIKWID_BASE)
GROUPS_BASE := $(LIKWID_BASE)/groups
all: $(INSTALL_FOLDER)/liblikwid.a cleanup
else
DAEMON_BASE= $(LIKWID_BASE)/sbin
all: $(INSTALL_FOLDER)/liblikwid.a cleanup
endif
$(BUILD_FOLDER)/likwid-$(LIKWID_VERSION).tar.gz: $(BUILD_FOLDER)
wget -P $(BUILD_FOLDER) ftp://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz
$(BUILD_FOLDER):
mkdir -p $(BUILD_FOLDER)
$(INSTALL_FOLDER):
mkdir -p $(INSTALL_FOLDER)
$(BUILD_FOLDER)/likwid-$(LIKWID_VERSION): $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION).tar.gz
tar -C $(BUILD_FOLDER) -xf $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION).tar.gz
$(INSTALL_FOLDER)/liblikwid.a: $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION) $(INSTALL_FOLDER)
sed -i -e s+"PREFIX ?= .*"+"PREFIX = $(LIKWID_BASE)"+g \
-e s+"SHARED_LIBRARY = .*"+"SHARED_LIBRARY = false"+g \
-e s+"ACCESSMODE = .*"+"ACCESSMODE = $(ACCESSMODE)"+g \
-e s+"INSTALLED_ACCESSDAEMON = .*"+"INSTALLED_ACCESSDAEMON = $(DAEMON_INSTALLDIR)/likwid-accessD"+g \
$(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/config.mk
cd $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION) && make
cp $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/liblikwid.a $(INSTALL_FOLDER)
cp $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/ext/hwloc/liblikwid-hwloc.a $(INSTALL_FOLDER)
cp $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/src/includes/likwid*.h $(INSTALL_FOLDER)
cp $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/src/includes/bstrlib.h $(INSTALL_FOLDER)
$(DAEMON_INSTALLDIR)/likwid-accessD: $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/likwid-accessD
sudo -u $(DAEMON_USER) -g $(DAEMON_GROUP) install -m 4775 $(BUILD_FOLDER)/likwid-$(LIKWID_VERSION)/likwid-accessD $(DAEMON_INSTALLDIR)/likwid-accessD
prepare_collector: likwidMetric.go
cp likwidMetric.go likwidMetric.go.orig
sed -i -e s+"const GROUPPATH =.*"+"const GROUPPATH = \`$(GROUPS_BASE)\`"+g likwidMetric.go
cleanup:
rm -rf $(BUILD_FOLDER)
clean: cleanup
rm -rf likwid
.PHONY: clean

View File

@@ -18,30 +18,21 @@ In contrast to the configuration files for sinks and receivers, the collectors c
* [`cpustat`](./cpustatMetric.md)
* [`memstat`](./memstatMetric.md)
* [`iostat`](./iostatMetric.md)
* [`diskstat`](./diskstatMetric.md)
* [`loadavg`](./loadavgMetric.md)
* [`netstat`](./netstatMetric.md)
* [`ibstat`](./infinibandMetric.md)
* [`ibstat_perfquery`](./infinibandPerfQueryMetric.md)
* [`tempstat`](./tempMetric.md)
* [`lustrestat`](./lustreMetric.md)
* [`lustre`](./lustreMetric.md)
* [`likwid`](./likwidMetric.md)
* [`nvidia`](./nvidiaMetric.md)
* [`customcmd`](./customCmdMetric.md)
* [`ipmistat`](./ipmiMetric.md)
* [`topprocs`](./topprocsMetric.md)
* [`nfs3stat`](./nfs3Metric.md)
* [`nfs4stat`](./nfs4Metric.md)
* [`cpufreq`](./cpufreqMetric.md)
* [`cpufreq_cpuinfo`](./cpufreqCpuinfoMetric.md)
* [`numastat`](./numastatMetric.md)
* [`gpfs`](./gpfsMetric.md)
* [`beegfs_meta`](./beegfsmetaMetric.md)
* [`beegfs_storage`](./beegfsstorageMetric.md)
## Todos
* [ ] Exclude devices for `diskstat` collector
* [ ] Aggreate metrics to higher topology entity (sum hwthread metrics to socket metric, ...). Needs to be configurable
# Contributing own collectors
@@ -80,11 +71,6 @@ type SampleCollector struct {
}
func (m *SampleCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "SampleCollector"
m.setup()
if len(config) > 0 {
@@ -105,15 +91,10 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric)
}
// tags for the metric, if type != node use proper type and type-id
tags := map[string]string{"type" : "node"}
x, err := GetMetric()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Read(): %v", err))
}
// Each metric has exactly one field: value !
value := map[string]interface{}{"value": int64(x)}
if y, err := lp.New("sample_metric", tags, m.meta, value, time.Now()); err == nil {
value := map[string]interface{}{"value": int(x)}
y, err := lp.New("sample_metric", tags, m.meta, value, time.Now())
if err == nil {
output <- y
}
}

View File

@@ -1,229 +0,0 @@
package collectors
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/user"
"regexp"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
// Struct for the collector-specific JSON config
type BeegfsMetaCollectorConfig struct {
Beegfs string `json:"beegfs_path"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem"`
}
type BeegfsMetaCollector struct {
metricCollector
tags map[string]string
matches map[string]string
config BeegfsMetaCollectorConfig
skipFS map[string]struct{}
}
func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
// Metrics
var nodeMdstat_array = [39]string{
"sum", "ack", "close", "entInf",
"fndOwn", "mkdir", "create", "rddir",
"refrEn", "mdsInf", "rmdir", "rmLnk",
"mvDirIns", "mvFiIns", "open", "ren",
"sChDrct", "sAttr", "sDirPat", "stat",
"statfs", "trunc", "symlnk", "unlnk",
"lookLI", "statLI", "revalLI", "openLI",
"createLI", "hardlnk", "flckAp", "flckEn",
"flckRg", "dirparent", "listXA", "getXA",
"rmXA", "setXA", "mirror"}
m.name = "BeegfsMetaCollector"
m.setup()
// Set default beegfs-ctl binary
m.config.Beegfs = DEFAULT_BEEGFS_CMD
// Read JSON configuration
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
//create map with possible variables
m.matches = make(map[string]string)
for _, value := range nodeMdstat_array {
_, skip := stringArrayContains(m.config.ExcludeMetrics, value)
if skip {
m.matches["other"] = "0"
} else {
m.matches["beegfs_cmeta_"+value] = "0"
}
}
m.meta = map[string]string{
"source": m.name,
"group": "BeegfsMeta",
}
m.tags = map[string]string{
"type": "node",
"filesystem": "",
}
m.skipFS = make(map[string]struct{})
for _, fs := range m.config.ExcludeFilesystem {
m.skipFS[fs] = struct{}{}
}
// Beegfs file system statistics can only be queried by user root
user, err := user.Current()
if err != nil {
return fmt.Errorf("BeegfsMetaCollector.Init(): Failed to get current user: %v", err)
}
if user.Uid != "0" {
return fmt.Errorf("BeegfsMetaCollector.Init(): BeeGFS file system statistics can only be queried by user root")
}
// Check if beegfs-ctl is in executable search path
_, err = exec.LookPath(m.config.Beegfs)
if err != nil {
return fmt.Errorf("BeegfsMetaCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err)
}
m.init = true
return nil
}
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
//get mounpoint
buffer, _ := ioutil.ReadFile(string("/proc/mounts"))
mounts := strings.Split(string(buffer), "\n")
var mountpoints []string
for _, line := range mounts {
if len(line) == 0 {
continue
}
f := strings.Fields(line)
if strings.Contains(f[0], "beegfs_ondemand") {
// Skip excluded filesystems
if _, skip := m.skipFS[f[1]]; skip {
continue
}
mountpoints = append(mountpoints, f[1])
}
}
if len(mountpoints) == 0 {
return
}
for _, mountpoint := range mountpoints {
m.tags["filesystem"] = mountpoint
// bwwgfs-ctl:
// --clientstats: Show client IO statistics.
// --nodetype=meta: The node type to query (meta, storage).
// --interval:
// --mount=/mnt/beeond/: Which mount point
//cmd := exec.Command(m.config.Beegfs, "/root/mc/test.txt")
mountoption := "--mount=" + mountpoint
cmd := exec.Command(m.config.Beegfs, "--clientstats",
"--nodetype=meta", mountoption, "--allstats")
cmd.Stdin = strings.NewReader("\n")
cmdStdout := new(bytes.Buffer)
cmdStderr := new(bytes.Buffer)
cmd.Stdout = cmdStdout
cmd.Stderr = cmdStderr
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stdout: \"%s\"\n", string(data))
return
}
// Read I/O statistics
scanner := bufio.NewScanner(cmdStdout)
sumLine := regexp.MustCompile(`^Sum:\s+\d+\s+\[[a-zA-Z]+\]+`)
//Line := regexp.MustCompile(`^(.*)\s+(\d)+\s+\[([a-zA-Z]+)\]+`)
statsLine := regexp.MustCompile(`^(.*?)\s+?(\d.*?)$`)
singleSpacePattern := regexp.MustCompile(`\s+`)
removePattern := regexp.MustCompile(`[\[|\]]`)
for scanner.Scan() {
readLine := scanner.Text()
//fmt.Println(readLine)
// Jump few lines, we only want the I/O stats from nodes
if !sumLine.MatchString(readLine) {
continue
}
match := statsLine.FindStringSubmatch(readLine)
// nodeName = "Sum:" or would be nodes
// nodeName := match[1]
//Remove multiple whitespaces
dummy := removePattern.ReplaceAllString(match[2], " ")
metaStats := strings.TrimSpace(singleSpacePattern.ReplaceAllString(dummy, " "))
split := strings.Split(metaStats, " ")
// fill map with values
// split[i+1] = mdname
// split[i] = amount of md operations
for i := 0; i <= len(split)-1; i += 2 {
if _, ok := m.matches[split[i+1]]; ok {
m.matches["beegfs_cmeta_"+split[i+1]] = split[i]
} else {
f1, err := strconv.ParseFloat(m.matches["other"], 32)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Metric (other): Failed to convert str written '%s' to float: %v", m.matches["other"], err))
continue
}
f2, err := strconv.ParseFloat(split[i], 32)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Metric (other): Failed to convert str written '%s' to float: %v", m.matches["other"], err))
continue
}
//mdStat["other"] = fmt.Sprintf("%f", f1+f2)
m.matches["beegfs_cstorage_other"] = fmt.Sprintf("%f", f1+f2)
}
}
for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32)
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
output <- y
}
}
}
}
}
func (m *BeegfsMetaCollector) Close() {
m.init = false
}

View File

@@ -1,75 +0,0 @@
## `BeeGFS on Demand` collector
This Collector is to collect BeeGFS on Demand (BeeOND) metadata clientstats.
```json
"beegfs_meta": {
"beegfs_path": "/usr/bin/beegfs-ctl",
"exclude_filesystem": [
"/mnt/ignore_me"
],
"exclude_metrics": [
"ack",
"entInf",
"fndOwn"
]
}
```
The `BeeGFS On Demand (BeeOND)` collector uses the `beegfs-ctl` command to read performance metrics for
BeeGFS filesystems.
The reported filesystems can be filtered with the `exclude_filesystem` option
in the configuration.
The path to the `beegfs-ctl` command can be configured with the `beegfs_path` option
in the configuration.
When using the `exclude_metrics` option, the excluded metrics are summed as `other`.
Important: The metrics listed below, are similar to the naming of BeeGFS. The Collector prefixes these with `beegfs_cstorage`(beegfs client storage).
For example beegfs metric `open`-> `beegfs_cstorage_open`
Available Metrics:
* sum
* ack
* close
* entInf
* fndOwn
* mkdir
* create
* rddir
* refrEnt
* mdsInf
* rmdir
* rmLnk
* mvDirIns
* mvFiIns
* open
* ren
* sChDrct
* sAttr
* sDirPat
* stat
* statfs
* trunc
* symlnk
* unlnk
* lookLI
* statLI
* revalLI
* openLI
* createLI
* hardlnk
* flckAp
* flckEn
* flckRg
* dirparent
* listXA
* getXA
* rmXA
* setXA
* mirror
The collector adds a `filesystem` tag to all metrics

View File

@@ -1,221 +0,0 @@
package collectors
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/user"
"regexp"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// Struct for the collector-specific JSON config
type BeegfsStorageCollectorConfig struct {
Beegfs string `json:"beegfs_path"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem"`
}
type BeegfsStorageCollector struct {
metricCollector
tags map[string]string
matches map[string]string
config BeegfsStorageCollectorConfig
skipFS map[string]struct{}
}
func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
// Metrics
var storageStat_array = [18]string{
"sum", "ack", "sChDrct", "getFSize",
"sAttr", "statfs", "trunc", "close",
"fsync", "ops-rd", "MiB-rd/s", "ops-wr",
"MiB-wr/s", "gendbg", "hrtbeat", "remNode",
"storInf", "unlnk"}
m.name = "BeegfsStorageCollector"
m.setup()
// Set default beegfs-ctl binary
m.config.Beegfs = DEFAULT_BEEGFS_CMD
// Read JSON configuration
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
println(m.config.Beegfs)
//create map with possible variables
m.matches = make(map[string]string)
for _, value := range storageStat_array {
_, skip := stringArrayContains(m.config.ExcludeMetrics, value)
if skip {
m.matches["other"] = "0"
} else {
m.matches["beegfs_cstorage_"+value] = "0"
}
}
m.meta = map[string]string{
"source": m.name,
"group": "BeegfsStorage",
}
m.tags = map[string]string{
"type": "node",
"filesystem": "",
}
m.skipFS = make(map[string]struct{})
for _, fs := range m.config.ExcludeFilesystem {
m.skipFS[fs] = struct{}{}
}
// Beegfs file system statistics can only be queried by user root
user, err := user.Current()
if err != nil {
return fmt.Errorf("BeegfsStorageCollector.Init(): Failed to get current user: %v", err)
}
if user.Uid != "0" {
return fmt.Errorf("BeegfsStorageCollector.Init(): BeeGFS file system statistics can only be queried by user root")
}
// Check if beegfs-ctl is in executable search path
_, err = exec.LookPath(m.config.Beegfs)
if err != nil {
return fmt.Errorf("BeegfsStorageCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err)
}
m.init = true
return nil
}
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
//get mounpoint
buffer, _ := ioutil.ReadFile(string("/proc/mounts"))
mounts := strings.Split(string(buffer), "\n")
var mountpoints []string
for _, line := range mounts {
if len(line) == 0 {
continue
}
f := strings.Fields(line)
if strings.Contains(f[0], "beegfs_ondemand") {
// Skip excluded filesystems
if _, skip := m.skipFS[f[1]]; skip {
continue
}
mountpoints = append(mountpoints, f[1])
}
}
if len(mountpoints) == 0 {
return
}
// collects stats for each BeeGFS on Demand FS
for _, mountpoint := range mountpoints {
m.tags["filesystem"] = mountpoint
// bwwgfs-ctl:
// --clientstats: Show client IO statistics.
// --nodetype=meta: The node type to query (meta, storage).
// --interval:
// --mount=/mnt/beeond/: Which mount point
//cmd := exec.Command(m.config.Beegfs, "/root/mc/test.txt")
mountoption := "--mount=" + mountpoint
cmd := exec.Command(m.config.Beegfs, "--clientstats",
"--nodetype=storage", mountoption, "--allstats")
cmd.Stdin = strings.NewReader("\n")
cmdStdout := new(bytes.Buffer)
cmdStderr := new(bytes.Buffer)
cmd.Stdout = cmdStdout
cmd.Stderr = cmdStderr
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stdout: \"%s\"\n", string(data))
return
}
// Read I/O statistics
scanner := bufio.NewScanner(cmdStdout)
sumLine := regexp.MustCompile(`^Sum:\s+\d+\s+\[[a-zA-Z]+\]+`)
//Line := regexp.MustCompile(`^(.*)\s+(\d)+\s+\[([a-zA-Z]+)\]+`)
statsLine := regexp.MustCompile(`^(.*?)\s+?(\d.*?)$`)
singleSpacePattern := regexp.MustCompile(`\s+`)
removePattern := regexp.MustCompile(`[\[|\]]`)
for scanner.Scan() {
readLine := scanner.Text()
//fmt.Println(readLine)
// Jump few lines, we only want the I/O stats from nodes
if !sumLine.MatchString(readLine) {
continue
}
match := statsLine.FindStringSubmatch(readLine)
// nodeName = "Sum:" or would be nodes
// nodeName := match[1]
//Remove multiple whitespaces
dummy := removePattern.ReplaceAllString(match[2], " ")
metaStats := strings.TrimSpace(singleSpacePattern.ReplaceAllString(dummy, " "))
split := strings.Split(metaStats, " ")
// fill map with values
// split[i+1] = mdname
// split[i] = amount of operations
for i := 0; i <= len(split)-1; i += 2 {
if _, ok := m.matches[split[i+1]]; ok {
m.matches["beegfs_cstorage_"+split[i+1]] = split[i]
//m.matches[split[i+1]] = split[i]
} else {
f1, err := strconv.ParseFloat(m.matches["other"], 32)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Metric (other): Failed to convert str written '%s' to float: %v", m.matches["other"], err))
continue
}
f2, err := strconv.ParseFloat(split[i], 32)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Metric (other): Failed to convert str written '%s' to float: %v", m.matches["other"], err))
continue
}
m.matches["beegfs_cstorage_other"] = fmt.Sprintf("%f", f1+f2)
}
}
for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32)
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
output <- y
}
}
}
}
}
func (m *BeegfsStorageCollector) Close() {
m.init = false
}

View File

@@ -1,55 +0,0 @@
## `BeeGFS on Demand` collector
This Collector is to collect BeeGFS on Demand (BeeOND) storage stats.
```json
"beegfs_storage": {
"beegfs_path": "/usr/bin/beegfs-ctl",
"exclude_filesystem": [
"/mnt/ignore_me"
],
"exclude_metrics": [
"ack",
"storInf",
"unlnk"
]
}
```
The `BeeGFS On Demand (BeeOND)` collector uses the `beegfs-ctl` command to read performance metrics for BeeGFS filesystems.
The reported filesystems can be filtered with the `exclude_filesystem` option
in the configuration.
The path to the `beegfs-ctl` command can be configured with the `beegfs_path` option
in the configuration.
When using the `exclude_metrics` option, the excluded metrics are summed as `other`.
Important: The metrics listed below, are similar to the naming of BeeGFS. The Collector prefixes these with `beegfs_cstorage_`(beegfs client meta).
For example beegfs metric `open`-> `beegfs_cstorage_`
Note: BeeGFS FS offers many Metadata Information. Probably it makes sense to exlcude most of them. Nevertheless, these excluded metrics will be summed as `beegfs_cstorage_other`.
Available Metrics:
* "sum"
* "ack"
* "sChDrct"
* "getFSize"
* "sAttr"
* "statfs"
* "trunc"
* "close"
* "fsync"
* "ops-rd"
* "MiB-rd/s"
* "ops-wr"
* "MiB-wr/s"
* "endbg"
* "hrtbeat"
* "remNode"
* "storInf"
* "unlnk"
The collector adds a `filesystem` tag to all metrics

View File

@@ -19,23 +19,19 @@ var AvailableCollectors = map[string]MetricCollector{
"memstat": new(MemstatCollector),
"netstat": new(NetstatCollector),
"ibstat": new(InfinibandCollector),
"ibstat_perfquery": new(InfinibandPerfQueryCollector),
"lustrestat": new(LustreCollector),
"cpustat": new(CpustatCollector),
"topprocs": new(TopProcsCollector),
"nvidia": new(NvidiaCollector),
"customcmd": new(CustomCmdCollector),
"iostat": new(IOstatCollector),
"diskstat": new(DiskstatCollector),
"tempstat": new(TempCollector),
"ipmistat": new(IpmiCollector),
"gpfs": new(GpfsCollector),
"cpufreq": new(CPUFreqCollector),
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
"nfs3stat": new(Nfs3Collector),
"nfs4stat": new(Nfs4Collector),
"numastats": new(NUMAStatsCollector),
"beegfs_meta": new(BeegfsMetaCollector),
"beegfs_storage": new(BeegfsStorageCollector),
"nfsstat": new(NfsCollector),
}
// Metric collector manager data structure

View File

@@ -5,13 +5,13 @@ import (
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
@@ -23,55 +23,45 @@ import (
type CPUFreqCpuInfoCollectorTopology struct {
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
coreID_int int
physicalPackageID string // socket / package ID
physicalPackageID_int int64
physicalPackageID_int int
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
numPhysicalPackages_int int
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
numNonHT_int int
tagSet map[string]string
}
type CPUFreqCpuInfoCollector struct {
metricCollector
topology []*CPUFreqCpuInfoCollectorTopology
topology []CPUFreqCpuInfoCollectorTopology
}
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.setup()
m.name = "CPUFreqCpuInfoCollector"
m.meta = map[string]string{
"source": m.name,
"group": "CPU",
"unit": "MHz",
"group": "cpufreq",
}
const cpuInfoFile = "/proc/cpuinfo"
file, err := os.Open(cpuInfoFile)
if err != nil {
return fmt.Errorf("failed to open file '%s': %v", cpuInfoFile, err)
return fmt.Errorf("Failed to open '%s': %v", cpuInfoFile, err)
}
defer file.Close()
// Collect topology information from file cpuinfo
foundFreq := false
processor := ""
var numNonHT_int int64 = 0
numNonHT_int := 0
coreID := ""
physicalPackageID := ""
var maxPhysicalPackageID int64 = 0
m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
maxPhysicalPackageID := 0
m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0)
coreSeenBefore := make(map[string]bool)
// Read cpuinfo file, line by line
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lineSplit := strings.Split(scanner.Text(), ":")
@@ -97,41 +87,39 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
len(coreID) > 0 &&
len(physicalPackageID) > 0 {
topology := new(CPUFreqCpuInfoCollectorTopology)
// Processor
topology.processor = processor
// Core ID
topology.coreID = coreID
topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
coreID_int, err := strconv.Atoi(coreID)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
return fmt.Errorf("Unable to convert coreID to int: %v", err)
}
// Physical package ID
topology.physicalPackageID = physicalPackageID
topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
physicalPackageID_int, err := strconv.Atoi(physicalPackageID)
if err != nil {
return fmt.Errorf("unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
return fmt.Errorf("Unable to convert physicalPackageID to int: %v", err)
}
// increase maximun socket / package ID, when required
if topology.physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = topology.physicalPackageID_int
if physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = physicalPackageID_int
}
// is hyperthread?
globalID := physicalPackageID + ":" + coreID
topology.isHT = coreSeenBefore[globalID]
isHT := coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
if !topology.isHT {
if !isHT {
// increase number on non hyper thread cores
numNonHT_int++
}
// store collected topology information
m.topology = append(m.topology, topology)
m.topology = append(
m.topology,
CPUFreqCpuInfoCollectorTopology{
processor: processor,
coreID: coreID,
coreID_int: coreID_int,
physicalPackageID: physicalPackageID,
physicalPackageID_int: physicalPackageID_int,
isHT: isHT,
})
// reset topology information
foundFreq = false
@@ -144,7 +132,8 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
numPhysicalPackageID_int := maxPhysicalPackageID + 1
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
numNonHT := fmt.Sprint(numNonHT_int)
for _, t := range m.topology {
for i := range m.topology {
t := &m.topology[i]
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
@@ -152,7 +141,9 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"num_core": t.numNonHT,
"package_id": t.physicalPackageID,
"num_package": t.numPhysicalPackages,
}
}
@@ -160,18 +151,15 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
return nil
}
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
const cpuInfoFile = "/proc/cpuinfo"
file, err := os.Open(cpuInfoFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to open file '%s': %v", cpuInfoFile, err))
log.Printf("Failed to open '%s': %v", cpuInfoFile, err)
return
}
defer file.Close()
@@ -186,16 +174,15 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
// frequency
if key == "cpu MHz" {
t := m.topology[processorCounter]
t := &m.topology[processorCounter]
if !t.isHT {
value, err := strconv.ParseFloat(strings.TrimSpace(lineSplit[1]), 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
log.Printf("Failed to convert cpu MHz to float: %v", err)
return
}
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
output <- y
}
}

View File

@@ -1,10 +0,0 @@
## `cpufreq_cpuinfo` collector
```json
"cpufreq_cpuinfo": {}
```
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **cpu** metrics.
Metrics:
* `cpufreq`

View File

@@ -1,30 +1,48 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"golang.org/x/sys/unix"
)
//
// readOneLine reads one line from a file.
// It returns ok when file was successfully read.
// In this case text contains the first line of the files contents.
//
func readOneLine(filename string) (text string, ok bool) {
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
ok = scanner.Scan()
text = scanner.Text()
return
}
type CPUFreqCollectorTopology struct {
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
coreID_int int
physicalPackageID string // socket / package ID
physicalPackageID_int int64
physicalPackageID_int int
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
numPhysicalPackages_int int
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
numNonHT_int int
scalingCurFreqFile string
tagSet map[string]string
}
@@ -46,11 +64,6 @@ type CPUFreqCollector struct {
}
func (m *CPUFreqCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "CPUFreqCollector"
m.setup()
if len(config) > 0 {
@@ -61,8 +74,7 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
m.meta = map[string]string{
"source": m.name,
"group": "CPU",
"unit": "MHz",
"group": "CPU Frequency",
}
// Loop for all CPU directories
@@ -70,50 +82,48 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
cpuDirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s': %v", globPattern, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to glob files with pattern %s: %v", globPattern, err)
}
if cpuDirs == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
return fmt.Errorf("CPUFreqCollector.Init() unable to find any files with pattern %s", globPattern)
}
// Initialize CPU topology
m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
for _, cpuDir := range cpuDirs {
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
processor_int, err := strconv.ParseInt(processor, 10, 64)
processor_int, err := strconv.Atoi(processor)
if err != nil {
return fmt.Errorf("unable to convert cpuID '%s' to int64: %v", processor, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert cpuID to int: %v", err)
}
// Read package ID
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
line, err := ioutil.ReadFile(physicalPackageIDFile)
if err != nil {
return fmt.Errorf("unable to read physical package ID from file '%s': %v", physicalPackageIDFile, err)
physicalPackageID, ok := readOneLine(physicalPackageIDFile)
if !ok {
return fmt.Errorf("CPUFreqCollector.Init() unable to read physical package ID from %s", physicalPackageIDFile)
}
physicalPackageID := strings.TrimSpace(string(line))
physicalPackageID_int, err := strconv.ParseInt(physicalPackageID, 10, 64)
physicalPackageID_int, err := strconv.Atoi(physicalPackageID)
if err != nil {
return fmt.Errorf("unable to convert packageID '%s' to int64: %v", physicalPackageID, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert packageID to int: %v", err)
}
// Read core ID
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
line, err = ioutil.ReadFile(coreIDFile)
if err != nil {
return fmt.Errorf("unable to read core ID from file '%s': %v", coreIDFile, err)
coreID, ok := readOneLine(coreIDFile)
if !ok {
return fmt.Errorf("CPUFreqCollector.Init() unable to read core ID from %s", coreIDFile)
}
coreID := strings.TrimSpace(string(line))
coreID_int, err := strconv.ParseInt(coreID, 10, 64)
coreID_int, err := strconv.Atoi(coreID)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to convert coreID to int: %v", err)
}
// Check access to current frequency file
scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
err = unix.Access(scalingCurFreqFile, unix.R_OK)
if err != nil {
return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err)
return fmt.Errorf("CPUFreqCollector.Init() unable to access %s: %v", scalingCurFreqFile, err)
}
t := &m.topology[processor_int]
@@ -136,8 +146,8 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
// number of non hyper thread cores and packages / sockets
var numNonHT_int int64 = 0
var maxPhysicalPackageID int64 = 0
numNonHT_int := 0
maxPhysicalPackageID := 0
for i := range m.topology {
t := &m.topology[i]
@@ -163,7 +173,9 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"num_core": t.numNonHT,
"package_id": t.physicalPackageID,
"num_package": t.numPhysicalPackages,
}
}
@@ -172,7 +184,6 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
}
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
@@ -187,22 +198,19 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric)
}
// Read current frequency
line, err := ioutil.ReadFile(t.scalingCurFreqFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", t.scalingCurFreqFile, err))
line, ok := readOneLine(t.scalingCurFreqFile)
if !ok {
log.Printf("CPUFreqCollector.Read(): Failed to read one line from file '%s'", t.scalingCurFreqFile)
continue
}
cpuFreq, err := strconv.ParseInt(strings.TrimSpace(string(line)), 10, 64)
cpuFreq, err := strconv.Atoi(line)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert CPU frequency '%s' to int64: %v", line, err))
log.Printf("CPUFreqCollector.Read(): Failed to convert CPU frequency '%s': %v", line, err)
continue
}
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now)
if err == nil {
output <- y
}
}

View File

@@ -1,11 +0,0 @@
## `cpufreq_cpuinfo` collector
```json
"cpufreq": {
"exclude_metrics": []
}
```
The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **cpu** metrics.
Metrics:
* `cpufreq`

View File

@@ -1,15 +1,12 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"io/ioutil"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
@@ -22,126 +19,71 @@ type CpustatCollectorConfig struct {
type CpustatCollector struct {
metricCollector
config CpustatCollectorConfig
matches map[string]int
cputags map[string]map[string]string
nodetags map[string]string
}
func (m *CpustatCollector) Init(config json.RawMessage) error {
m.name = "CpustatCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
m.nodetags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "CPU"}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
matches := map[string]int{
"cpu_user": 1,
"cpu_nice": 2,
"cpu_system": 3,
"cpu_idle": 4,
"cpu_iowait": 5,
"cpu_irq": 6,
"cpu_softirq": 7,
"cpu_steal": 8,
"cpu_guest": 9,
"cpu_guest_nice": 10,
}
m.matches = make(map[string]int)
for match, index := range matches {
doExclude := false
for _, exclude := range m.config.ExcludeMetrics {
if match == exclude {
doExclude = true
break
}
}
if !doExclude {
m.matches[match] = index
}
}
// Check input file
file, err := os.Open(string(CPUSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
// Pre-generate tags for all CPUs
num_cpus := 0
m.cputags = make(map[string]map[string]string)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
cpustr := strings.TrimLeft(linefields[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
m.cputags[linefields[0]] = map[string]string{"type": "cpu", "type-id": fmt.Sprintf("%d", cpu)}
num_cpus++
}
}
m.init = true
return nil
}
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric) {
values := make(map[string]float64)
total := 0.0
for match, index := range m.matches {
if len(match) > 0 {
x, err := strconv.ParseInt(linefields[index], 0, 64)
func (c *CpustatCollector) parseStatLine(line string, cpu int, exclude []string, output chan lp.CCMetric) {
ls := strings.Fields(line)
matches := []string{"", "cpu_user", "cpu_nice", "cpu_system", "cpu_idle", "cpu_iowait", "cpu_irq", "cpu_softirq", "cpu_steal", "cpu_guest", "cpu_guest_nice"}
for _, ex := range exclude {
matches, _ = RemoveFromStringList(matches, ex)
}
var tags map[string]string
if cpu < 0 {
tags = map[string]string{"type": "node"}
} else {
tags = map[string]string{"type": "cpu", "type-id": fmt.Sprintf("%d", cpu)}
}
for i, m := range matches {
if len(m) > 0 {
x, err := strconv.ParseInt(ls[i], 0, 64)
if err == nil {
values[match] = float64(x)
total += values[match]
}
}
}
t := time.Now()
for name, value := range values {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": (value * 100.0) / total}, t)
y, err := lp.New(m, tags, c.meta, map[string]interface{}{"value": int(x)}, time.Now())
if err == nil {
output <- y
}
}
}
}
}
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
num_cpus := 0
file, err := os.Open(string(CPUSTATFILE))
buffer, err := ioutil.ReadFile(string(CPUSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.Compare(linefields[0], "cpu") == 0 {
m.parseStatLine(linefields, m.nodetags, output)
} else if strings.HasPrefix(linefields[0], "cpu") {
m.parseStatLine(linefields, m.cputags[linefields[0]], output)
num_cpus++
}
return
}
num_cpus_metric, err := lp.New("num_cpus",
m.nodetags,
m.meta,
map[string]interface{}{"value": int(num_cpus)},
time.Now(),
)
if err == nil {
output <- num_cpus_metric
ll := strings.Split(string(buffer), "\n")
for _, line := range ll {
if len(line) == 0 {
continue
}
ls := strings.Fields(line)
if strings.Compare(ls[0], "cpu") == 0 {
m.parseStatLine(line, -1, m.config.ExcludeMetrics, output)
} else if strings.HasPrefix(ls[0], "cpu") {
cpustr := strings.TrimLeft(ls[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
m.parseStatLine(line, cpu, m.config.ExcludeMetrics, output)
}
}
}

View File

@@ -61,7 +61,7 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
}
}
if len(m.files) == 0 && len(m.commands) == 0 {
return errors.New("no metrics to collect")
return errors.New("No metrics to collect")
}
m.handler = influx.NewMetricHandler()
m.parser = influx.NewParser(m.handler)
@@ -97,8 +97,7 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetri
if skip {
continue
}
y := lp.FromInfluxMetric(c)
y, err := lp.New(c.Name(), Tags2Map(c), m.meta, Fields2Map(c), c.Time())
if err == nil {
output <- y
}
@@ -120,7 +119,7 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetri
if skip {
continue
}
y := lp.FromInfluxMetric(f)
y, err := lp.New(f.Name(), Tags2Map(f), m.meta, Fields2Map(f), f.Time())
if err == nil {
output <- y
}

View File

@@ -1,21 +1,18 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
"syscall"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"io/ioutil"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
// "log"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
)
// "log"
const MOUNTFILE = `/proc/self/mounts`
const DISKSTATFILE = `/proc/diskstats`
const DISKSTAT_SYSFSPATH = `/sys/block`
type DiskstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
@@ -23,89 +20,93 @@ type DiskstatCollectorConfig struct {
type DiskstatCollector struct {
metricCollector
//matches map[string]int
config IOstatCollectorConfig
//devices map[string]IOstatCollectorEntry
matches map[int]string
config DiskstatCollectorConfig
}
func (m *DiskstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "DiskstatCollector"
m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup()
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
file, err := os.Open(string(MOUNTFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
// https://www.kernel.org/doc/html/latest/admin-guide/iostats.html
matches := map[int]string{
3: "reads",
4: "reads_merged",
5: "read_sectors",
6: "read_ms",
7: "writes",
8: "writes_merged",
9: "writes_sectors",
10: "writes_ms",
11: "ioops",
12: "ioops_ms",
13: "ioops_weighted_ms",
14: "discards",
15: "discards_merged",
16: "discards_sectors",
17: "discards_ms",
18: "flushes",
19: "flushes_ms",
}
defer file.Close()
m.matches = make(map[int]string)
for k, v := range matches {
_, skip := stringArrayContains(m.config.ExcludeMetrics, v)
if !skip {
m.matches[k] = v
}
}
if len(m.matches) == 0 {
return errors.New("No metrics to collect")
}
_, err = ioutil.ReadFile(string(DISKSTATFILE))
if err == nil {
m.init = true
return nil
}
return err
}
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var lines []string
if !m.init {
return
}
file, err := os.Open(string(MOUNTFILE))
buffer, err := ioutil.ReadFile(string(DISKSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
lines = strings.Split(string(buffer), "\n")
part_max_used := uint64(0)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
for _, line := range lines {
if len(line) == 0 {
continue
}
if !strings.HasPrefix(line, "/dev") {
f := strings.Fields(line)
if strings.Contains(f[2], "loop") {
continue
}
linefields := strings.Fields(line)
if strings.Contains(linefields[0], "loop") {
continue
tags := map[string]string{
"device": f[2],
"type": "node",
}
if strings.Contains(linefields[1], "boot") {
continue
}
path := strings.Replace(linefields[1], `\040`, " ", -1)
stat := syscall.Statfs_t{}
err := syscall.Statfs(path, &stat)
if err != nil {
fmt.Println(err.Error())
return
}
tags := map[string]string{"type": "node", "device": linefields[0]}
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
for idx, name := range m.matches {
if idx < len(f) {
x, err := strconv.ParseInt(f[idx], 0, 64)
if err == nil {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": int(x)}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
}
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
}
perc := (100 * (total - free)) / total
if perc > part_max_used {
part_max_used = perc
}
}
y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
if err == nil {
y.AddMeta("unit", "percent")
output <- y
}
}
}

View File

@@ -4,18 +4,31 @@
```json
"diskstat": {
"exclude_metrics": [
"disk_total"
"read_ms"
],
}
```
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
Metrics per device (with `device` tag):
* `disk_total` (unit `GBytes`)
* `disk_free` (unit `GBytes`)
Global metrics:
* `part_max_used` (unit `percent`)
Metrics:
* `reads`
* `reads_merged`
* `read_sectors`
* `read_ms`
* `writes`
* `writes_merged`
* `writes_sectors`
* `writes_ms`
* `ioops`
* `ioops_ms`
* `ioops_weighted_ms`
* `discards`
* `discards_merged`
* `discards_sectors`
* `discards_ms`
* `flushes`
* `flushes_ms`
The device name is added as tag `device`.

View File

@@ -7,48 +7,31 @@ import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const DEFAULT_GPFS_CMD = "mmpmon"
type GpfsCollectorLastState struct {
bytesRead int64
bytesWritten int64
}
type GpfsCollector struct {
metricCollector
tags map[string]string
config struct {
Mmpmon string `json:"mmpmon_path,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
SendBandwidths bool `json:"send_bandwidths"`
Mmpmon string `json:"mmpmon"`
}
skipFS map[string]struct{}
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
lastState map[string]GpfsCollectorLastState
}
func (m *GpfsCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
var err error
m.name = "GpfsCollector"
m.setup()
// Set default mmpmon binary
m.config.Mmpmon = DEFAULT_GPFS_CMD
m.config.Mmpmon = "/usr/lpp/mmfs/bin/mmpmon"
// Read JSON configuration
if len(config) > 0 {
@@ -66,44 +49,31 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
"type": "node",
"filesystem": "",
}
m.skipFS = make(map[string]struct{})
for _, fs := range m.config.ExcludeFilesystem {
m.skipFS[fs] = struct{}{}
}
// GPFS / IBM Spectrum Scale file system statistics can only be queried by user root
user, err := user.Current()
if err != nil {
return fmt.Errorf("failed to get current user: %v", err)
return fmt.Errorf("GpfsCollector.Init(): Failed to get current user: %v", err)
}
if user.Uid != "0" {
return fmt.Errorf("GPFS file system statistics can only be queried by user root")
return fmt.Errorf("GpfsCollector.Init(): GPFS file system statistics can only be queried by user root")
}
// Check if mmpmon is in executable search path
p, err := exec.LookPath(m.config.Mmpmon)
_, err = exec.LookPath(m.config.Mmpmon)
if err != nil {
return fmt.Errorf("failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err)
return fmt.Errorf("GpfsCollector.Init(): Failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err)
}
m.config.Mmpmon = p
m.init = true
return nil
}
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
// Current time stamp
now := time.Now()
// time difference to last time stamp
timeDiff := now.Sub(m.lastTimestamp).Seconds()
// Save current timestamp
m.lastTimestamp = now
// mmpmon:
// -p: generate output that can be parsed
// -s: suppress the prompt on input
@@ -116,15 +86,12 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
cmd.Stderr = cmdStderr
err := cmd.Run()
if err != nil {
dataStdErr, _ := ioutil.ReadAll(cmdStderr)
dataStdOut, _ := ioutil.ReadAll(cmdStdout)
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to execute command \"%s\": %v\n", cmd.String(), err),
fmt.Sprintf("Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode()),
fmt.Sprintf("Read(): command stderr: \"%s\"\n", string(dataStdErr)),
fmt.Sprintf("Read(): command stdout: \"%s\"\n", string(dataStdOut)),
)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): command stdout: \"%s\"\n", string(data))
return
}
@@ -132,12 +99,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
scanner := bufio.NewScanner(cmdStdout)
for scanner.Scan() {
lineSplit := strings.Fields(scanner.Text())
// Only process lines starting with _fs_io_s_
if lineSplit[0] != "_fs_io_s_" {
continue
}
if lineSplit[0] == "_fs_io_s_" {
key_value := make(map[string]string)
for i := 1; i < len(lineSplit); i += 2 {
key_value[lineSplit[i]] = lineSplit[i+1]
@@ -151,52 +113,35 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
filesystem, ok := key_value["_fs_"]
if !ok {
cclog.ComponentError(
m.name,
"Read(): Failed to get filesystem name.")
continue
}
// Skip excluded filesystems
if _, skip := m.skipFS[filesystem]; skip {
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to get filesystem name.\n")
continue
}
m.tags["filesystem"] = filesystem
if _, ok := m.lastState[filesystem]; !ok {
m.lastState[filesystem] = GpfsCollectorLastState{
bytesRead: -1,
bytesWritten: -1,
}
}
// return code
rc, err := strconv.Atoi(key_value["_rc_"])
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert return code '%s' to int: %v", key_value["_rc_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert return code: %s\n", err.Error())
continue
}
if rc != 0 {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Filesystem '%s' is not ok.", filesystem))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Filesystem %s not ok.", filesystem)
continue
}
sec, err := strconv.ParseInt(key_value["_t_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert seconds '%s' to int64: %v", key_value["_t_"], err))
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert seconds to int '%s': %v\n",
key_value["_t_"], err)
continue
}
msec, err := strconv.ParseInt(key_value["_tu_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert micro seconds '%s' to int64: %v", key_value["_tu_"], err))
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert micro seconds to int '%s': %v\n",
key_value["_tu_"], err)
continue
}
timestamp := time.Unix(sec, msec*1000)
@@ -204,122 +149,100 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// bytes read
bytesRead, err := strconv.ParseInt(key_value["_br_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert bytes read '%s': %s\n",
key_value["_br_"], err.Error())
continue
}
if y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp); err == nil {
y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp)
if err == nil {
output <- y
}
if m.config.SendBandwidths {
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
bwRead := float64(bytesRead-lastBytesRead) / timeDiff
if y, err := lp.New("gpfs_bw_read", m.tags, m.meta, map[string]interface{}{"value": bwRead}, timestamp); err == nil {
output <- y
}
}
}
// bytes written
bytesWritten, err := strconv.ParseInt(key_value["_bw_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert bytes written '%s': %s\n",
key_value["_bw_"], err.Error())
continue
}
if y, err := lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp); err == nil {
output <- y
}
if m.config.SendBandwidths {
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
if y, err := lp.New("gpfs_bw_write", m.tags, m.meta, map[string]interface{}{"value": bwWrite}, timestamp); err == nil {
output <- y
}
}
}
if m.config.SendBandwidths {
m.lastState[filesystem] = GpfsCollectorLastState{
bytesRead: bytesRead,
bytesWritten: bytesWritten,
}
y, err = lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp)
if err == nil {
output <- y
}
// number of opens
numOpens, err := strconv.ParseInt(key_value["_oc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
fmt.Fprintf(os.Stderr,
"GpfsCollector.Read(): Failed to convert number of opens '%s': %s\n",
key_value["_oc_"], err.Error())
continue
}
if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
y, err = lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp)
if err == nil {
output <- y
}
// number of closes
numCloses, err := strconv.ParseInt(key_value["_cc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of closes: %s\n", err.Error())
continue
}
if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
y, err = lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp)
if err == nil {
output <- y
}
// number of reads
numReads, err := strconv.ParseInt(key_value["_rdc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of reads: %s\n", err.Error())
continue
}
if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
y, err = lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp)
if err == nil {
output <- y
}
// number of writes
numWrites, err := strconv.ParseInt(key_value["_wc_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of writes: %s\n", err.Error())
continue
}
if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
y, err = lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp)
if err == nil {
output <- y
}
// number of read directories
numReaddirs, err := strconv.ParseInt(key_value["_dir_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of read directories: %s\n", err.Error())
continue
}
if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
y, err = lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp)
if err == nil {
output <- y
}
// Number of inode updates
numInodeUpdates, err := strconv.ParseInt(key_value["_iu_"], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert Number of inode updates: %s\n", err.Error())
continue
}
if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
y, err = lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp)
if err == nil {
output <- y
}
}
}
}
func (m *GpfsCollector) Close() {

View File

@@ -1,34 +0,0 @@
## `gpfs` collector
```json
"ibstat": {
"mmpmon_path": "/path/to/mmpmon",
"exclude_filesystem": [
"fs1"
],
"send_bandwidths" : true
}
```
The `gpfs` collector uses the `mmpmon` command to read performance metrics for
GPFS / IBM Spectrum Scale filesystems.
The reported filesystems can be filtered with the `exclude_filesystem` option
in the configuration.
The path to the `mmpmon` command can be configured with the `mmpmon_path` option
in the configuration. If nothing is set, the collector searches in `$PATH` for `mmpmon`.
Metrics:
* `gpfs_bytes_read`
* `gpfs_bytes_written`
* `gpfs_num_opens`
* `gpfs_num_closes`
* `gpfs_num_reads`
* `gpfs_num_readdirs`
* `gpfs_num_inode_updates`
* `gpfs_bw_read` (if `send_bandwidths == true`)
* `gpfs_bw_write` (if `send_bandwidths == true`)
The collector adds a `filesystem` tag to all metrics

View File

@@ -2,10 +2,8 @@ package collectors
import (
"fmt"
"io/ioutil"
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"golang.org/x/sys/unix"
@@ -16,36 +14,44 @@ import (
"time"
)
const IB_BASEPATH = "/sys/class/infiniband/"
const IB_BASEPATH = `/sys/class/infiniband/`
type InfinibandCollectorInfo struct {
LID string // IB local Identifier (LID)
device string // IB device
port string // IB device port
portCounterFiles map[string]string // mapping counter name -> sysfs file
portCounterFiles map[string]string // mapping counter name -> file
tagSet map[string]string // corresponding tag list
lastState map[string]int64 // State from last measurement
}
type InfinibandCollector struct {
metricCollector
config struct {
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem
SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates
}
info []*InfinibandCollectorInfo
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
info []InfinibandCollectorInfo
}
func (m *InfinibandCollector) Help() {
fmt.Println("This collector includes all devices that can be found below ", IB_BASEPATH)
fmt.Println("and where any of the ports provides a 'lid' file (glob ", IB_BASEPATH, "/<dev>/ports/<port>/lid).")
fmt.Println("The devices can be filtered with the 'exclude_devices' option in the configuration.")
fmt.Println("For each found LIDs the collector calls the 'perfquery' command")
fmt.Println("")
fmt.Println("Full configuration object:")
fmt.Println("\"ibstat\" : {")
fmt.Println(" \"exclude_devices\" : [\"dev1\"]")
fmt.Println("}")
fmt.Println("")
fmt.Println("Metrics:")
fmt.Println("- ib_recv")
fmt.Println("- ib_xmit")
fmt.Println("- ib_recv_pkts")
fmt.Println("- ib_xmit_pkts")
}
// Init initializes the Infiniband collector by walking through files below IB_BASEPATH
func (m *InfinibandCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
var err error
m.name = "InfinibandCollector"
m.setup()
@@ -53,11 +59,6 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
"source": m.name,
"group": "Network",
}
// Set default configuration,
m.config.SendAbsoluteValues = true
m.config.SendDerivedValues = false
// Read configuration file, allow overwriting default config
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
@@ -69,21 +70,17 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
globPattern := filepath.Join(IB_BASEPATH, "*", "ports", "*")
ibDirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern %s: %v", globPattern, err)
return fmt.Errorf("Unable to glob files with pattern %s: %v", globPattern, err)
}
if ibDirs == nil {
return fmt.Errorf("unable to find any directories with pattern %s", globPattern)
return fmt.Errorf("Unable to find any directories with pattern %s", globPattern)
}
for _, path := range ibDirs {
// Skip, when no LID is assigned
line, err := ioutil.ReadFile(filepath.Join(path, "lid"))
if err != nil {
continue
}
LID := strings.TrimSpace(string(line))
if LID == "0x0" {
LID, ok := readOneLine(path + "/lid")
if !ok || LID == "0x0" {
continue
}
@@ -115,18 +112,12 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
for _, counterFile := range portCounterFiles {
err := unix.Access(counterFile, unix.R_OK)
if err != nil {
return fmt.Errorf("unable to access %s: %v", counterFile, err)
return fmt.Errorf("Unable to access %s: %v", counterFile, err)
}
}
// Initialize last state
lastState := make(map[string]int64)
for counter := range portCounterFiles {
lastState[counter] = -1
}
m.info = append(m.info,
&InfinibandCollectorInfo{
InfinibandCollectorInfo{
LID: LID,
device: device,
port: port,
@@ -137,12 +128,11 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
"port": port,
"lid": LID,
},
lastState: lastState,
})
}
if len(m.info) == 0 {
return fmt.Errorf("found no IB devices")
return fmt.Errorf("Found no IB devices")
}
m.init = true
@@ -157,55 +147,20 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetr
return
}
// Current time stamp
now := time.Now()
// time difference to last time stamp
timeDiff := now.Sub(m.lastTimestamp).Seconds()
// Save current timestamp
m.lastTimestamp = now
for i := range m.info {
for _, info := range m.info {
// device info
info := &m.info[i]
for counterName, counterFile := range info.portCounterFiles {
// Read counter file
line, err := ioutil.ReadFile(counterFile)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read from file '%s': %v", counterFile, err))
continue
}
data := strings.TrimSpace(string(line))
// convert counter to int64
v, err := strconv.ParseInt(data, 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterName, data, err))
continue
}
// Send absolut values
if m.config.SendAbsoluteValues {
if data, ok := readOneLine(counterFile); ok {
if v, err := strconv.ParseInt(data, 10, 64); err == nil {
if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
output <- y
}
}
// Send derived values
if m.config.SendDerivedValues {
if info.lastState[counterName] >= 0 {
rate := float64((v - info.lastState[counterName])) / timeDiff
if y, err := lp.New(counterName+"_bw", info.tagSet, m.meta, map[string]interface{}{"value": rate}, now); err == nil {
output <- y
}
}
// Save current state
info.lastState[counterName] = v
}
}
}
}

View File

@@ -3,30 +3,17 @@
```json
"ibstat": {
"perfquery_path" : "<path to perfquery command>",
"exclude_devices": [
"mlx4"
],
"send_abs_values": true,
"send_derived_values": true
]
}
```
The `ibstat` collector includes all Infiniband devices that can be
found below `/sys/class/infiniband/` and where any of the ports provides a
LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
The devices can be filtered with the `exclude_devices` option in the configuration.
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`.
The `ibstat` collector reads either data through the `perfquery` command or the sysfs files below `/sys/class/infiniband/<device>`.
Metrics:
* `ib_recv`
* `ib_xmit`
* `ib_recv_pkts`
* `ib_xmit_pkts`
* `ib_recv_bw` (if `send_derived_values == true`)
* `ib_xmit_bw` (if `send_derived_values == true`)
* `ib_recv_pkts_bw` (if `send_derived_values == true`)
* `ib_xmit_pkts_bw` (if `send_derived_values == true`)
The collector adds a `device` tag to all metrics

View File

@@ -0,0 +1,250 @@
package collectors
import (
"fmt"
"io/ioutil"
"log"
"os/exec"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
// "os"
"encoding/json"
"errors"
"path/filepath"
"strconv"
"strings"
"time"
)
const PERFQUERY = `/usr/sbin/perfquery`
type InfinibandPerfQueryCollector struct {
metricCollector
tags map[string]string
lids map[string]map[string]string
config struct {
ExcludeDevices []string `json:"exclude_devices,omitempty"`
PerfQueryPath string `json:"perfquery_path"`
}
}
func (m *InfinibandPerfQueryCollector) Help() {
fmt.Println("This collector includes all devices that can be found below ", IB_BASEPATH)
fmt.Println("and where any of the ports provides a 'lid' file (glob ", IB_BASEPATH, "/<dev>/ports/<port>/lid).")
fmt.Println("The devices can be filtered with the 'exclude_devices' option in the configuration.")
fmt.Println("For each found LIDs the collector calls the 'perfquery' command")
fmt.Println("The path to the 'perfquery' command can be configured with the 'perfquery_path' option")
fmt.Println("in the configuration")
fmt.Println("")
fmt.Println("Full configuration object:")
fmt.Println("\"ibstat\" : {")
fmt.Println(" \"perfquery_path\" : \"path/to/perfquery\" # if omitted, it searches in $PATH")
fmt.Println(" \"exclude_devices\" : [\"dev1\"]")
fmt.Println("}")
fmt.Println("")
fmt.Println("Metrics:")
fmt.Println("- ib_recv")
fmt.Println("- ib_xmit")
fmt.Println("- ib_recv_pkts")
fmt.Println("- ib_xmit_pkts")
}
func (m *InfinibandPerfQueryCollector) Init(config json.RawMessage) error {
var err error
m.name = "InfinibandCollectorPerfQuery"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "Network"}
m.tags = map[string]string{"type": "node"}
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
if len(m.config.PerfQueryPath) == 0 {
path, err := exec.LookPath("perfquery")
if err == nil {
m.config.PerfQueryPath = path
}
}
m.lids = make(map[string]map[string]string)
p := fmt.Sprintf("%s/*/ports/*/lid", string(IB_BASEPATH))
files, err := filepath.Glob(p)
for _, f := range files {
lid, err := ioutil.ReadFile(f)
if err == nil {
plist := strings.Split(strings.Replace(f, string(IB_BASEPATH), "", -1), "/")
skip := false
for _, d := range m.config.ExcludeDevices {
if d == plist[0] {
skip = true
}
}
if !skip {
m.lids[plist[0]] = make(map[string]string)
m.lids[plist[0]][plist[2]] = string(lid)
}
}
}
for _, ports := range m.lids {
for port, lid := range ports {
args := fmt.Sprintf("-r %s %s 0xf000", lid, port)
command := exec.Command(m.config.PerfQueryPath, args)
command.Wait()
_, err := command.Output()
if err != nil {
return fmt.Errorf("Failed to execute %s: %v", m.config.PerfQueryPath, err)
}
}
}
if len(m.lids) == 0 {
return errors.New("No usable IB devices")
}
m.init = true
return nil
}
func (m *InfinibandPerfQueryCollector) doPerfQuery(cmd string, dev string, lid string, port string, tags map[string]string, output chan lp.CCMetric) error {
args := fmt.Sprintf("-r %s %s 0xf000", lid, port)
command := exec.Command(cmd, args)
command.Wait()
stdout, err := command.Output()
if err != nil {
log.Print(err)
return err
}
ll := strings.Split(string(stdout), "\n")
for _, line := range ll {
if strings.HasPrefix(line, "PortRcvData") || strings.HasPrefix(line, "RcvData") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_recv", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
if strings.HasPrefix(line, "PortXmitData") || strings.HasPrefix(line, "XmtData") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_xmit", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
if strings.HasPrefix(line, "PortRcvPkts") || strings.HasPrefix(line, "RcvPkts") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_recv_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
if strings.HasPrefix(line, "PortXmitPkts") || strings.HasPrefix(line, "XmtPkts") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_xmit_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
if strings.HasPrefix(line, "PortRcvPkts") || strings.HasPrefix(line, "RcvPkts") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_recv_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
if strings.HasPrefix(line, "PortXmitPkts") || strings.HasPrefix(line, "XmtPkts") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
y, err := lp.New("ib_xmit_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
}
return nil
}
func (m *InfinibandPerfQueryCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if m.init {
for dev, ports := range m.lids {
for port, lid := range ports {
tags := map[string]string{
"type": "node",
"device": dev,
"port": port,
"lid": lid}
path := fmt.Sprintf("%s/%s/ports/%s/counters/", string(IB_BASEPATH), dev, port)
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/port_rcv_data", path))
if err == nil {
data := strings.Replace(string(buffer), "\n", "", -1)
v, err := strconv.ParseFloat(data, 64)
if err == nil {
y, err := lp.New("ib_recv", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
buffer, err = ioutil.ReadFile(fmt.Sprintf("%s/port_xmit_data", path))
if err == nil {
data := strings.Replace(string(buffer), "\n", "", -1)
v, err := strconv.ParseFloat(data, 64)
if err == nil {
y, err := lp.New("ib_xmit", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
buffer, err = ioutil.ReadFile(fmt.Sprintf("%s/port_rcv_packets", path))
if err == nil {
data := strings.Replace(string(buffer), "\n", "", -1)
v, err := strconv.ParseFloat(data, 64)
if err == nil {
y, err := lp.New("ib_recv_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
buffer, err = ioutil.ReadFile(fmt.Sprintf("%s/port_xmit_packets", path))
if err == nil {
data := strings.Replace(string(buffer), "\n", "", -1)
v, err := strconv.ParseFloat(data, 64)
if err == nil {
y, err := lp.New("ib_xmit_pkts", tags, m.meta, map[string]interface{}{"value": float64(v)}, time.Now())
if err == nil {
output <- y
}
}
}
}
}
}
}
func (m *InfinibandPerfQueryCollector) Close() {
m.init = false
}

View File

@@ -1,155 +0,0 @@
package collectors
import (
"bufio"
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
// "log"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
)
const IOSTATFILE = `/proc/diskstats`
const IOSTAT_SYSFSPATH = `/sys/block`
type IOstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
type IOstatCollectorEntry struct {
lastValues map[string]int64
tags map[string]string
}
type IOstatCollector struct {
metricCollector
matches map[string]int
config IOstatCollectorConfig
devices map[string]IOstatCollectorEntry
}
func (m *IOstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "IOstatCollector"
m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup()
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
// https://www.kernel.org/doc/html/latest/admin-guide/iostats.html
matches := map[string]int{
"io_reads": 3,
"io_reads_merged": 4,
"io_read_sectors": 5,
"io_read_ms": 6,
"io_writes": 7,
"io_writes_merged": 8,
"io_writes_sectors": 9,
"io_writes_ms": 10,
"io_ioops": 11,
"io_ioops_ms": 12,
"io_ioops_weighted_ms": 13,
"io_discards": 14,
"io_discards_merged": 15,
"io_discards_sectors": 16,
"io_discards_ms": 17,
"io_flushes": 18,
"io_flushes_ms": 19,
}
m.devices = make(map[string]IOstatCollectorEntry)
m.matches = make(map[string]int)
for k, v := range matches {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, k); !skip {
m.matches[k] = v
}
}
if len(m.matches) == 0 {
return errors.New("no metrics to collect")
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
values := make(map[string]int64)
for m := range m.matches {
values[m] = 0
}
m.devices[device] = IOstatCollectorEntry{
tags: map[string]string{
"device": linefields[2],
"type": "node",
},
lastValues: values,
}
}
m.init = true
return err
}
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
file, err := os.Open(string(IOSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 {
continue
}
linefields := strings.Fields(line)
device := linefields[2]
if strings.Contains(device, "loop") {
continue
}
if _, ok := m.devices[device]; !ok {
continue
}
entry := m.devices[device]
for name, idx := range m.matches {
if idx < len(linefields) {
x, err := strconv.ParseInt(linefields[idx], 0, 64)
if err == nil {
diff := x - entry.lastValues[name]
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
if err == nil {
output <- y
}
}
entry.lastValues[name] = x
}
}
m.devices[device] = entry
}
}
func (m *IOstatCollector) Close() {
m.init = false
}

View File

@@ -1,34 +0,0 @@
## `iostat` collector
```json
"iostat": {
"exclude_metrics": [
"read_ms"
],
}
```
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
Metrics:
* `io_reads`
* `io_reads_merged`
* `io_read_sectors`
* `io_read_ms`
* `io_writes`
* `io_writes_merged`
* `io_writes_sectors`
* `io_writes_ms`
* `io_ioops`
* `io_ioops_ms`
* `io_ioops_weighted_ms`
* `io_discards`
* `io_discards_merged`
* `io_discards_sectors`
* `io_discards_ms`
* `io_flushes`
* `io_flushes_ms`
The device name is added as tag `device`. For more details, see https://www.kernel.org/doc/html/latest/admin-guide/iostats.html

View File

@@ -9,12 +9,11 @@ import (
"strconv"
"strings"
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const IPMITOOL_PATH = `ipmitool`
const IPMISENSORS_PATH = `ipmi-sensors`
const IPMITOOL_PATH = `/usr/bin/ipmitool`
const IPMISENSORS_PATH = `/usr/sbin/ipmi-sensors`
type IpmiCollectorConfig struct {
ExcludeDevices []string `json:"exclude_devices"`
@@ -24,37 +23,31 @@ type IpmiCollectorConfig struct {
type IpmiCollector struct {
metricCollector
//tags map[string]string
//matches map[string]string
tags map[string]string
matches map[string]string
config IpmiCollectorConfig
ipmitool string
ipmisensors string
}
func (m *IpmiCollector) Init(config json.RawMessage) error {
m.name = "IpmiCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "IPMI"}
m.config.IpmitoolPath = string(IPMITOOL_PATH)
m.config.IpmisensorsPath = string(IPMISENSORS_PATH)
m.ipmitool = ""
m.ipmisensors = ""
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
p, err := exec.LookPath(m.config.IpmitoolPath)
if err == nil {
m.ipmitool = p
_, err1 := os.Stat(m.config.IpmitoolPath)
_, err2 := os.Stat(m.config.IpmisensorsPath)
if err1 != nil {
m.config.IpmitoolPath = ""
}
p, err = exec.LookPath(m.config.IpmisensorsPath)
if err == nil {
m.ipmisensors = p
if err2 != nil {
m.config.IpmisensorsPath = ""
}
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
return errors.New("no IPMI reader found")
if err1 != nil && err2 != nil {
return errors.New("No IPMI reader found")
}
m.init = true
return nil

View File

@@ -2,7 +2,7 @@ package collectors
/*
#cgo CFLAGS: -I./likwid
#cgo LDFLAGS: -Wl,--unresolved-symbols=ignore-in-object-files
#cgo LDFLAGS: -L./likwid -llikwid -llikwid-hwloc -lm
#include <stdlib.h>
#include <likwid.h>
*/
@@ -13,35 +13,35 @@ import (
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
topo "github.com/ClusterCockpit/cc-metric-collector/internal/ccTopology"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
"github.com/NVIDIA/go-nvml/pkg/dl"
"gopkg.in/Knetic/govaluate.v2"
)
type MetricScope int
const (
LIKWID_LIB_NAME = "liblikwid.so"
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
LIKWID_DEF_ACCESSMODE = "direct"
METRIC_SCOPE_HWTHREAD = iota
METRIC_SCOPE_SOCKET
METRIC_SCOPE_NUMA
METRIC_SCOPE_NODE
)
func (ms MetricScope) String() string {
return []string{"Head", "Shoulder", "Knee", "Toe"}[ms]
}
type LikwidCollectorMetricConfig struct {
Name string `json:"name"` // Name of the metric
Calc string `json:"calc"` // Calculation for the metric using
Type string `json:"type"` // Metric type (aka node, socket, cpu, ...)
Name string `json:"name"`
Calc string `json:"calc"`
Scope MetricScope `json:"socket_scope"`
Publish bool `json:"publish"`
Unit string `json:"unit"` // Unit of metric if any
}
type LikwidCollectorEventsetConfig struct {
@@ -49,45 +49,30 @@ type LikwidCollectorEventsetConfig struct {
Metrics []LikwidCollectorMetricConfig `json:"metrics"`
}
type LikwidEventsetConfig struct {
internal int
gid C.int
eorder []*C.char
estr *C.char
results map[int]map[string]interface{}
metrics map[int]map[string]float64
}
type LikwidCollectorConfig struct {
Eventsets []LikwidCollectorEventsetConfig `json:"eventsets"`
Metrics []LikwidCollectorMetricConfig `json:"globalmetrics,omitempty"`
ForceOverwrite bool `json:"force_overwrite,omitempty"`
InvalidToZero bool `json:"invalid_to_zero,omitempty"`
AccessMode string `json:"access_mode,omitempty"`
DaemonPath string `json:"accessdaemon_path,omitempty"`
LibraryPath string `json:"liblikwid_path,omitempty"`
Metrics []LikwidCollectorMetricConfig `json:"globalmetrics"`
ExcludeMetrics []string `json:"exclude_metrics"`
ForceOverwrite bool `json:"force_overwrite"`
}
type LikwidCollector struct {
metricCollector
cpulist []C.int
cpu2tid map[int]int
sock2tid map[int]int
metrics map[C.int]map[string]int
groups []C.int
config LikwidCollectorConfig
results map[int]map[int]map[string]interface{}
mresults map[int]map[int]map[string]float64
gmresults map[int]map[string]float64
basefreq float64
running bool
initialized bool
likwidGroups map[C.int]LikwidEventsetConfig
lock sync.Mutex
}
type LikwidMetric struct {
name string
search string
scope string
socket_scope bool
group_idx int
}
@@ -99,451 +84,238 @@ func eventsToEventStr(events map[string]string) string {
return strings.Join(elist, ",")
}
func genLikwidEventSet(input LikwidCollectorEventsetConfig) LikwidEventsetConfig {
tmplist := make([]string, 0)
elist := make([]*C.char, 0)
for k, v := range input.Events {
tmplist = append(tmplist, fmt.Sprintf("%s:%s", v, k))
c_counter := C.CString(k)
elist = append(elist, c_counter)
}
estr := strings.Join(tmplist, ",")
res := make(map[int]map[string]interface{})
met := make(map[int]map[string]float64)
for _, i := range topo.CpuList() {
res[i] = make(map[string]interface{})
for k := range input.Events {
res[i][k] = 0.0
}
met[i] = make(map[string]float64)
for _, v := range input.Metrics {
res[i][v.Name] = 0.0
}
}
return LikwidEventsetConfig{
gid: -1,
eorder: elist,
estr: C.CString(estr),
results: res,
metrics: met,
}
}
func testLikwidMetricFormula(formula string, params []string) bool {
myparams := make(map[string]interface{})
for _, p := range params {
myparams[p] = float64(1.0)
}
_, err := agg.EvalFloat64Condition(formula, myparams)
return err == nil
}
func getBaseFreq() float64 {
files := []string{
"/sys/devices/system/cpu/cpu0/cpufreq/bios_limit",
"/sys/devices/system/cpu/cpu0/cpufreq/base_frequency",
}
var freq float64 = math.NaN()
for _, f := range files {
buffer, err := ioutil.ReadFile(f)
C.power_init(0)
info := C.get_powerInfo()
if float64(info.baseFrequency) != 0 {
freq = float64(info.baseFrequency)
} else {
buffer, err := ioutil.ReadFile("/sys/devices/system/cpu/cpu0/cpufreq/bios_limit")
if err == nil {
data := strings.Replace(string(buffer), "\n", "", -1)
x, err := strconv.ParseInt(data, 0, 64)
if err == nil {
freq = float64(x) * 1e6
freq = float64(x) * 1e3
}
}
}
if math.IsNaN(freq) {
C.power_init(0)
info := C.get_powerInfo()
if float64(info.baseFrequency) != 0 {
freq = float64(info.baseFrequency) * 1e6
}
C.power_finalize()
}
return freq
}
func getSocketCpus() map[C.int]int {
slist := SocketList()
var cpu C.int
outmap := make(map[C.int]int)
for _, s := range slist {
t := C.CString(fmt.Sprintf("S%d", s))
clen := C.cpustr_to_cpulist(t, &cpu, 1)
if int(clen) == 1 {
outmap[cpu] = s
}
}
return outmap
}
func (m *LikwidCollector) Init(config json.RawMessage) error {
var ret C.int
m.name = "LikwidCollector"
m.initialized = false
m.running = false
m.config.AccessMode = LIKWID_DEF_ACCESSMODE
m.config.LibraryPath = LIKWID_LIB_NAME
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
lib := dl.New(m.config.LibraryPath, LIKWID_LIB_DL_FLAGS)
if lib == nil {
return fmt.Errorf("error instantiating DynamicLibrary for %s", m.config.LibraryPath)
}
err := lib.Open()
if err != nil {
return fmt.Errorf("error opening %s: %v", m.config.LibraryPath, err)
}
if m.config.ForceOverwrite {
cclog.ComponentDebug(m.name, "Set LIKWID_FORCE=1")
os.Setenv("LIKWID_FORCE", "1")
}
m.setup()
m.meta = map[string]string{"source": m.name, "group": "PerfCounter"}
cclog.ComponentDebug(m.name, "Get cpulist and init maps and lists")
cpulist := topo.CpuList()
cpulist := CpuList()
m.cpulist = make([]C.int, len(cpulist))
m.cpu2tid = make(map[int]int)
slist := getSocketCpus()
m.sock2tid = make(map[int]int)
// m.numa2tid = make(map[int]int)
for i, c := range cpulist {
m.cpulist[i] = C.int(c)
m.cpu2tid[c] = i
if sid, found := slist[m.cpulist[i]]; found {
m.sock2tid[sid] = i
}
}
m.results = make(map[int]map[int]map[string]interface{})
m.mresults = make(map[int]map[int]map[string]float64)
m.gmresults = make(map[int]map[string]float64)
ret = C.topology_init()
if ret != 0 {
return errors.New("Failed to initialize LIKWID topology")
}
if m.config.ForceOverwrite {
os.Setenv("LIKWID_FORCE", "1")
}
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
if ret != 0 {
C.topology_finalize()
return errors.New("Failed to initialize LIKWID topology")
}
m.likwidGroups = make(map[C.int]LikwidEventsetConfig)
// m.results = make(map[int]map[int]map[string]interface{})
// m.mresults = make(map[int]map[int]map[string]float64)
m.gmresults = make(map[int]map[string]float64)
for _, tid := range m.cpu2tid {
for i, evset := range m.config.Eventsets {
estr := eventsToEventStr(evset.Events)
cstr := C.CString(estr)
gid := C.perfmon_addEventSet(cstr)
if gid >= 0 {
m.groups = append(m.groups, gid)
}
C.free(unsafe.Pointer(cstr))
m.results[i] = make(map[int]map[string]interface{})
m.mresults[i] = make(map[int]map[string]float64)
for tid := range m.cpulist {
m.results[i][tid] = make(map[string]interface{})
m.mresults[i][tid] = make(map[string]float64)
m.gmresults[tid] = make(map[string]float64)
}
// This is for the global metrics computation test
totalMetrics := 0
// Generate parameter list for the metric computing test
params := make([]string, 0)
params = append(params, "time", "inverseClock")
// Generate parameter list for the global metric computing test
globalParams := make([]string, 0)
globalParams = append(globalParams, "time", "inverseClock")
// We test the eventset metrics whether they can be computed at all
for _, evset := range m.config.Eventsets {
if len(evset.Events) > 0 {
params = params[:2]
for counter := range evset.Events {
params = append(params, counter)
}
for _, metric := range evset.Metrics {
// Try to evaluate the metric
if testLikwidMetricFormula(metric.Calc, params) {
// Add the computable metric to the parameter list for the global metrics
globalParams = append(globalParams, metric.Name)
totalMetrics++
} else {
metric.Calc = ""
}
}
} else {
cclog.ComponentError(m.name, "Invalid Likwid eventset config, no events given")
continue
}
}
for _, metric := range m.config.Metrics {
// Try to evaluate the global metric
if !testLikwidMetricFormula(metric.Calc, globalParams) {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed")
metric.Calc = ""
} else {
totalMetrics++
}
}
// If no event set could be added, shut down LikwidCollector
if totalMetrics == 0 {
err := errors.New("no LIKWID eventset or metric usable")
cclog.ComponentError(m.name, err.Error())
return err
if len(m.groups) == 0 {
C.perfmon_finalize()
C.topology_finalize()
return errors.New("No LIKWID performance group initialized")
}
m.basefreq = getBaseFreq()
m.init = true
return nil
}
// take a measurement for 'interval' seconds of event set index 'group'
func (m *LikwidCollector) takeMeasurement(evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
var ret C.int
m.lock.Lock()
if m.initialized {
ret = C.perfmon_setupCounters(evset.gid)
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
ret = C.perfmon_startCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
m.running = true
time.Sleep(interval)
m.running = false
ret = C.perfmon_stopCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
}
m.lock.Unlock()
return false, nil
}
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
invClock := float64(1.0 / m.basefreq)
// Go over events and get the results
for eidx, counter := range evset.eorder {
gctr := C.GoString(counter)
for _, tid := range m.cpu2tid {
res := C.perfmon_getLastResult(evset.gid, C.int(eidx), C.int(tid))
evset.results[tid][gctr] = float64(res)
evset.results[tid]["time"] = interval.Seconds()
evset.results[tid]["inverseClock"] = invClock
}
}
// Go over the event set metrics, derive the value out of the event:counter values and send it
for _, metric := range m.config.Eventsets[evset.internal].Metrics {
// The metric scope is determined in the Init() function
// Get the map scope-id -> tids
scopemap := m.cpu2tid
if metric.Type == "socket" {
scopemap = m.sock2tid
}
for domain, tid := range scopemap {
if tid >= 0 && len(metric.Calc) > 0 {
value, err := agg.EvalFloat64Condition(metric.Calc, evset.results[tid])
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
}
evset.metrics[tid][metric.Name] = value
if m.config.InvalidToZero && math.IsNaN(value) {
value = 0.0
}
if m.config.InvalidToZero && math.IsInf(value, 0) {
value = 0.0
}
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) {
if metric.Publish {
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, map[string]string{"type": metric.Type}, m.meta, fields, time.Now())
if err == nil {
if metric.Type != "node" {
y.AddTag("type-id", fmt.Sprintf("%d", domain))
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
}
}
}
}
return nil
}
// Go over the global metrics, derive the value out of the event sets' metric values and send it
func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan lp.CCMetric) error {
for _, metric := range m.config.Metrics {
scopemap := m.cpu2tid
if metric.Type == "socket" {
scopemap = m.sock2tid
}
for domain, tid := range scopemap {
if tid >= 0 {
// Here we generate parameter list
params := make(map[string]interface{})
for _, evset := range m.likwidGroups {
for mname, mres := range evset.metrics[tid] {
params[mname] = mres
}
}
// Evaluate the metric
value, err := agg.EvalFloat64Condition(metric.Calc, params)
if err != nil {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed:", err.Error())
continue
}
m.gmresults[tid][metric.Name] = value
if m.config.InvalidToZero && math.IsNaN(value) {
value = 0.0
}
if m.config.InvalidToZero && math.IsInf(value, 0) {
value = 0.0
}
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) {
if metric.Publish {
tags := map[string]string{"type": metric.Type}
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, tags, m.meta, fields, time.Now())
if err == nil {
if metric.Type != "node" {
y.AddTag("type-id", fmt.Sprintf("%d", domain))
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
}
}
}
}
return nil
}
func (m *LikwidCollector) LateInit() error {
var ret C.int
switch m.config.AccessMode {
case "direct":
C.HPMmode(0)
case "accessdaemon":
if len(m.config.DaemonPath) > 0 {
p := os.Getenv("PATH")
os.Setenv("PATH", m.config.DaemonPath+":"+p)
}
C.HPMmode(1)
}
cclog.ComponentDebug(m.name, "initialize LIKWID topology")
ret = C.topology_init()
if ret != 0 {
err := errors.New("failed to initialize LIKWID topology")
cclog.ComponentError(m.name, err.Error())
return err
}
m.sock2tid = make(map[int]int)
tmp := make([]C.int, 1)
for _, sid := range topo.SocketList() {
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
if ret > 0 {
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
}
C.free(unsafe.Pointer(cstr))
}
m.basefreq = getBaseFreq()
cclog.ComponentDebug(m.name, "BaseFreq", m.basefreq)
cclog.ComponentDebug(m.name, "initialize LIKWID perfmon module")
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
if ret != 0 {
var err error = nil
C.topology_finalize()
if ret != -22 {
err = errors.New("failed to initialize LIKWID perfmon")
cclog.ComponentError(m.name, err.Error())
} else {
err = errors.New("access to LIKWID perfmon locked")
}
return err
}
// While adding the events, we test the metrics whether they can be computed at all
for i, evset := range m.config.Eventsets {
var gid C.int
if len(evset.Events) > 0 {
likwidGroup := genLikwidEventSet(evset)
// Now we add the list of events to likwid
gid = C.perfmon_addEventSet(likwidGroup.estr)
if gid >= 0 {
likwidGroup.gid = gid
likwidGroup.internal = i
m.likwidGroups[gid] = likwidGroup
}
} else {
cclog.ComponentError(m.name, "Invalid Likwid eventset config, no events given")
continue
}
}
// If no event set could be added, shut down LikwidCollector
if len(m.likwidGroups) == 0 {
C.perfmon_finalize()
C.topology_finalize()
err := errors.New("no LIKWID performance group initialized")
cclog.ComponentError(m.name, err.Error())
return err
}
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGCHLD)
signal.Notify(sigchan, os.Interrupt)
go func() {
<-sigchan
signal.Stop(sigchan)
m.initialized = false
}()
m.initialized = true
return nil
}
// main read function taking multiple measurement rounds, each 'interval' seconds long
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var skip bool = false
var err error
if !m.init {
return
}
var ret C.int
if !m.initialized {
if m.LateInit() != nil {
return
for i, gid := range m.groups {
evset := m.config.Eventsets[i]
ret = C.perfmon_setupCounters(gid)
if ret != 0 {
log.Print("Failed to setup performance group ", C.perfmon_getGroupName(gid))
continue
}
ret = C.perfmon_startCounters()
if ret != 0 {
log.Print("Failed to start performance group ", C.perfmon_getGroupName(gid))
continue
}
if m.initialized && !skip {
for _, evset := range m.likwidGroups {
if !skip {
// measure event set 'i' for 'interval' seconds
skip, err = m.takeMeasurement(evset, interval)
time.Sleep(interval)
ret = C.perfmon_stopCounters()
if ret != 0 {
log.Print("Failed to stop performance group ", C.perfmon_getGroupName(gid))
continue
}
var eidx C.int
for tid := range m.cpulist {
for eidx = 0; int(eidx) < len(evset.Events); eidx++ {
ctr := C.perfmon_getCounterName(gid, eidx)
gctr := C.GoString(ctr)
res := C.perfmon_getLastResult(gid, eidx, C.int(tid))
m.results[i][tid][gctr] = float64(res)
}
m.results[i][tid]["time"] = interval.Seconds()
m.results[i][tid]["inverseClock"] = float64(1.0 / m.basefreq)
for _, metric := range evset.Metrics {
expression, err := govaluate.NewEvaluableExpression(metric.Calc)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
log.Print(err.Error())
continue
}
result, err := expression.Evaluate(m.results[i][tid])
if err != nil {
log.Print(err.Error())
continue
}
m.mresults[i][tid][metric.Name] = float64(result.(float64))
}
}
}
if !skip {
// read measurements and derive event set metrics
m.calcEventsetMetrics(evset, interval, output)
for _, metric := range m.config.Metrics {
for tid := range m.cpulist {
var params map[string]interface{}
expression, err := govaluate.NewEvaluableExpression(metric.Calc)
if err != nil {
log.Print(err.Error())
continue
}
params = make(map[string]interface{})
for j := range m.groups {
for mname, mres := range m.mresults[j][tid] {
params[mname] = mres
}
}
result, err := expression.Evaluate(params)
if err != nil {
log.Print(err.Error())
continue
}
m.gmresults[tid][metric.Name] = float64(result.(float64))
}
}
for i := range m.groups {
evset := m.config.Eventsets[i]
for _, metric := range evset.Metrics {
_, skip := stringArrayContains(m.config.ExcludeMetrics, metric.Name)
if metric.Publish && !skip {
if metric.Scope.String() == "socket" {
for sid, tid := range m.sock2tid {
y, err := lp.New(metric.Name,
map[string]string{"type": "socket",
"type-id": fmt.Sprintf("%d", int(sid))},
m.meta,
map[string]interface{}{"value": m.mresults[i][tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
} else if metric.Scope.String() == "hwthread" {
for tid, cpu := range m.cpulist {
y, err := lp.New(metric.Name,
map[string]string{"type": "cpu",
"type-id": fmt.Sprintf("%d", int(cpu))},
m.meta,
map[string]interface{}{"value": m.mresults[i][tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
}
}
}
}
for _, metric := range m.config.Metrics {
_, skip := stringArrayContains(m.config.ExcludeMetrics, metric.Name)
if metric.Publish && !skip {
if metric.Scope.String() == "socket" {
for sid, tid := range m.sock2tid {
y, err := lp.New(metric.Name,
map[string]string{"type": "socket",
"type-id": fmt.Sprintf("%d", int(sid))},
m.meta,
map[string]interface{}{"value": m.gmresults[tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
} else {
for tid, cpu := range m.cpulist {
y, err := lp.New(metric.Name,
map[string]string{"type": "cpu",
"type-id": fmt.Sprintf("%d", int(cpu))},
m.meta,
map[string]interface{}{"value": m.gmresults[tid][metric.Name]},
time.Now())
if err == nil {
output <- y
}
}
}
if !skip {
// use the event set metrics to derive the global metrics
m.calcGlobalMetrics(interval, output)
}
}
}
@@ -551,17 +323,7 @@ func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric)
func (m *LikwidCollector) Close() {
if m.init {
m.init = false
cclog.ComponentDebug(m.name, "Closing ...")
m.lock.Lock()
if m.initialized {
cclog.ComponentDebug(m.name, "Finalize LIKWID perfmon module")
C.perfmon_finalize()
m.initialized = false
}
m.lock.Unlock()
cclog.ComponentDebug(m.name, "Finalize LIKWID topology module")
C.topology_finalize()
cclog.ComponentDebug(m.name, "Closing done")
}
}

View File

@@ -1,95 +1,7 @@
## `likwid` collector
The `likwid` collector is probably the most complicated collector. The LIKWID library is included as static library with *direct* access mode. The *direct* access mode is suitable if the daemon is executed by a root user. The static library does not contain the performance groups, so all information needs to be provided in the configuration.
The `likwid` configuration consists of two parts, the "eventsets" and "globalmetrics":
- An event set list itself has two parts, the "events" and a set of derivable "metrics". Each of the "events" is a counter:event pair in LIKWID's syntax. The "metrics" are a list of formulas to derive the metric value from the measurements of the "events". Each metric has a name, the formula, a scope and a publish flag. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. The scope tells the Collector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the collector whether a metric should be sent to the router.
- The global metrics are metrics which require data from all event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a scope and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. So, the idea is to derive a metric in the "eventsets" section and reuse it in the "globalmetrics" part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`publish=false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
Additional options:
- `access_mode` : Method to use for hardware performance monitoring (`direct` access as root user, `accessdaemon` for the daemon mode)
- `accessdaemon_path`: Folder with the access daemon `likwid-accessD`, commonly `$LIKWID_INSTALL_LOC/sbin`
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`.
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD`
- `liblikwid_path`: Location of `liblikwid.so`
### Available metric scopes
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the collector provides the specification of a 'scope' for each metric.
- `cpu` : One metric per CPU hardware thread with the tags `"type" : "cpu"` and `"type-id" : "$cpu_id"`
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
**Note:** You cannot specify `socket` scope for a metric that is measured at `cpu` scope, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the scope of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
As a guideline:
- All counters `FIXCx`, `PMCy` and `TMAz` have the scope `cpu`
- All counters names containing `BOX` have the scope `socket`
- All `PWRx` counters have scope `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `cpu` scope
- All `DFCx` counters have scope `socket`
### Help with the configuration
The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`):
```
$ likwid-perfctr -i
[...]
short name: ICX
[...]
$ likwid-perfctr -a
[...]
MEM_DP
MEM
FLOPS_SP
CLOCK
[...]
$ scripts/likwid_perfgroup_to_cc_config.py ICX MEM_DP
{
"events": {
"FIXC0": "INSTR_RETIRED_ANY",
"..." : "..."
},
"metrics" : [
{
"calc": "time",
"name": "Runtime (RDTSC) [s]",
"publish": true,
"unit": "seconds"
"scope": "cpu"
},
{
"..." : "..."
}
]
}
```
You can copy this JSON and add it to the `eventsets` list. If you specify multiple event sets, you can add globally derived metrics in the extra `global_metrics` section with the metric names as variables.
### Mixed usage between daemon and users
LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job):
Before (SLURM prolog, ...)
```
$ chwon $JOBUSER /var/run/likwid.lock
```
After (SLURM epilog, ...)
```
$ chwon $CCUSER /var/run/likwid.lock
```
### Example configuration
```json
"likwid": {
"force_overwrite" : false,
"nan_to_zero" : false,
"eventsets": [
{
"events": {
@@ -108,28 +20,25 @@ $ chwon $CCUSER /var/run/likwid.lock
{
"name": "ipc",
"calc": "PMC0/PMC1",
"type": "cpu",
"socket_scope": false,
"publish": true
},
{
"name": "flops_any",
"calc": "0.000001*PMC2/time",
"unit": "MFlops/s",
"type": "cpu",
"socket_scope": false,
"publish": true
},
{
"name": "clock",
"name": "clock_mhz",
"calc": "0.000001*(FIXC1/FIXC2)/inverseClock",
"type": "cpu",
"unit": "MHz",
"socket_scope": false,
"publish": true
},
{
"name": "mem1",
"calc": "0.000001*(DFC0+DFC1+DFC2+DFC3)*64.0/time",
"unit": "Mbyte/s",
"type": "socket",
"socket_scope": true,
"publish": false
}
]
@@ -147,22 +56,19 @@ $ chwon $CCUSER /var/run/likwid.lock
{
"name": "pwr_core",
"calc": "PWR0/time",
"unit": "Watt"
"type": "socket",
"socket_scope": false,
"publish": true
},
{
"name": "pwr_pkg",
"calc": "PWR1/time",
"type": "socket",
"unit": "Watt"
"socket_scope": true,
"publish": true
},
{
"name": "mem2",
"calc": "0.000001*(DFC0+DFC1+DFC2+DFC3)*64.0/time",
"unit": "Mbyte/s",
"type": "socket",
"socket_scope": true,
"publish": false
}
]
@@ -172,17 +78,16 @@ $ chwon $CCUSER /var/run/likwid.lock
{
"name": "mem_bw",
"calc": "mem1+mem2",
"type": "socket",
"unit": "Mbyte/s",
"socket_scope": true,
"publish": true
}
]
}
```
### How to get the eventsets and metrics from LIKWID
_Example config suitable for AMD Zen3_
The `likwid` collector reads hardware performance counters at a **cpu** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
```
@@ -203,10 +108,12 @@ METRICS -> "metrics": [
IPC PMC0/PMC1 -> {
-> "name" : "IPC",
-> "calc" : "PMC0/PMC1",
-> "scope": "cpu",
-> "socket_scope": false,
-> "publish": true
-> }
-> ]
```
The script `scripts/likwid_perfgroup_to_cc_config.py` might help you.
The `socket_scope` option tells whether it is submitted per socket or per hwthread. If a metric is only used for internal calculations, you can set `publish = false`.
Since some metrics can only be gathered in multiple measurements (like the memory bandwidth on AMD Zen3 chips), configure multiple eventsets like in the example config and use the `globalmetrics` section to combine them. **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases.

View File

@@ -2,36 +2,25 @@ package collectors
import (
"encoding/json"
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
// LoadavgCollector collects:
// * load average of last 1, 5 & 15 minutes
// * number of processes currently runnable
// * total number of processes in system
//
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
//
const LOADAVGFILE = "/proc/loadavg"
const LOADAVGFILE = `/proc/loadavg`
type LoadavgCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
type LoadavgCollector struct {
metricCollector
tags map[string]string
load_matches []string
load_skips []bool
proc_matches []string
proc_skips []bool
config struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
config LoadavgCollectorConfig
}
func (m *LoadavgCollector) Init(config json.RawMessage) error {
@@ -43,82 +32,46 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "LOAD"}
m.meta = map[string]string{"source": m.name, "group": "LOAD"}
m.tags = map[string]string{"type": "node"}
m.load_matches = []string{
"load_one",
"load_five",
"load_fifteen"}
m.load_skips = make([]bool, len(m.load_matches))
m.proc_matches = []string{
"proc_run",
"proc_total"}
m.proc_skips = make([]bool, len(m.proc_matches))
for i, name := range m.load_matches {
_, m.load_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name)
}
for i, name := range m.proc_matches {
_, m.proc_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name)
}
m.load_matches = []string{"load_one", "load_five", "load_fifteen"}
m.proc_matches = []string{"proc_run", "proc_total"}
m.init = true
return nil
}
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var skip bool
if !m.init {
return
}
buffer, err := ioutil.ReadFile(LOADAVGFILE)
buffer, err := ioutil.ReadFile(string(LOADAVGFILE))
if err != nil {
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
}
return
}
now := time.Now()
// Load metrics
ls := strings.Split(string(buffer), ` `)
for i, name := range m.load_matches {
x, err := strconv.ParseFloat(ls[i], 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert '%s' to float64: %v", ls[i], err))
continue
}
if m.load_skips[i] {
continue
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, name)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": float64(x)}, time.Now())
if err == nil && !skip {
output <- y
}
}
// Process metrics
}
lv := strings.Split(ls[3], `/`)
for i, name := range m.proc_matches {
x, err := strconv.ParseInt(lv[i], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert '%s' to float64: %v", lv[i], err))
continue
}
if m.proc_skips[i] {
continue
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
x, err := strconv.ParseFloat(lv[i], 64)
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, name)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": float64(x)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
}

View File

@@ -3,286 +3,27 @@ package collectors
import (
"encoding/json"
"errors"
"fmt"
"os/exec"
"os/user"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const LUSTRE_SYSFS = `/sys/fs/lustre`
const LCTL_CMD = `lctl`
const LCTL_OPTION = `get_param`
const LUSTREFILE = `/proc/fs/lustre/llite/lnec-XXXXXX/stats`
type LustreCollectorConfig struct {
LCtlCommand string `json:"lctl_command,omitempty"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
Sudo bool `json:"use_sudo,omitempty"`
SendAbsoluteValues bool `json:"send_abs_values,omitempty"`
SendDerivedValues bool `json:"send_derived_values,omitempty"`
SendDiffValues bool `json:"send_diff_values,omitempty"`
}
type LustreMetricDefinition struct {
name string
lineprefix string
lineoffset int
unit string
calc string
Procfiles []string `json:"procfiles"`
ExcludeMetrics []string `json:"exclude_metrics"`
}
type LustreCollector struct {
metricCollector
tags map[string]string
matches map[string]map[string]int
devices []string
config LustreCollectorConfig
lctl string
sudoCmd string
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
definitions []LustreMetricDefinition // Combined list without excluded metrics
stats map[string]map[string]int64 // Data for last value per device and metric
}
func (m *LustreCollector) getDeviceDataCommand(device string) []string {
var command *exec.Cmd
statsfile := fmt.Sprintf("llite.%s.stats", device)
if m.config.Sudo {
command = exec.Command(m.sudoCmd, m.lctl, LCTL_OPTION, statsfile)
} else {
command = exec.Command(m.lctl, LCTL_OPTION, statsfile)
}
command.Wait()
stdout, _ := command.Output()
return strings.Split(string(stdout), "\n")
}
func (m *LustreCollector) getDevices() []string {
devices := make([]string, 0)
// //Version reading devices from sysfs
// globPattern := filepath.Join(LUSTRE_SYSFS, "llite/*/stats")
// files, err := filepath.Glob(globPattern)
// if err != nil {
// return devices
// }
// for _, f := range files {
// pathlist := strings.Split(f, "/")
// devices = append(devices, pathlist[4])
// }
data := m.getDeviceDataCommand("*")
for _, line := range data {
if strings.HasPrefix(line, "llite") {
linefields := strings.Split(line, ".")
if len(linefields) > 2 {
devices = append(devices, linefields[1])
}
}
}
return devices
}
func getMetricData(lines []string, prefix string, offset int) (int64, error) {
for _, line := range lines {
if strings.HasPrefix(line, prefix) {
lf := strings.Fields(line)
return strconv.ParseInt(lf[offset], 0, 64)
}
}
return 0, errors.New("no such line in data")
}
// //Version reading the stats data of a device from sysfs
// func (m *LustreCollector) getDeviceDataSysfs(device string) []string {
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
// devdir := filepath.Join(llitedir, device)
// statsfile := filepath.Join(devdir, "stats")
// buffer, err := ioutil.ReadFile(statsfile)
// if err != nil {
// return make([]string, 0)
// }
// return strings.Split(string(buffer), "\n")
// }
var LustreAbsMetrics = []LustreMetricDefinition{
{
name: "lustre_read_requests",
lineprefix: "read_bytes",
lineoffset: 1,
unit: "requests",
calc: "none",
},
{
name: "lustre_write_requests",
lineprefix: "write_bytes",
lineoffset: 1,
unit: "requests",
calc: "none",
},
{
name: "lustre_read_bytes",
lineprefix: "read_bytes",
lineoffset: 6,
unit: "bytes",
calc: "none",
},
{
name: "lustre_write_bytes",
lineprefix: "write_bytes",
lineoffset: 6,
unit: "bytes",
calc: "none",
},
{
name: "lustre_open",
lineprefix: "open",
lineoffset: 1,
unit: "",
calc: "none",
},
{
name: "lustre_close",
lineprefix: "close",
lineoffset: 1,
unit: "",
calc: "none",
},
{
name: "lustre_setattr",
lineprefix: "setattr",
lineoffset: 1,
unit: "",
calc: "none",
},
{
name: "lustre_getattr",
lineprefix: "getattr",
lineoffset: 1,
unit: "",
calc: "none",
},
{
name: "lustre_statfs",
lineprefix: "statfs",
lineoffset: 1,
unit: "",
calc: "none",
},
{
name: "lustre_inode_permission",
lineprefix: "inode_permission",
lineoffset: 1,
unit: "",
calc: "none",
},
}
var LustreDiffMetrics = []LustreMetricDefinition{
{
name: "lustre_read_requests_diff",
lineprefix: "read_bytes",
lineoffset: 1,
unit: "requests",
calc: "difference",
},
{
name: "lustre_write_requests_diff",
lineprefix: "write_bytes",
lineoffset: 1,
unit: "requests",
calc: "difference",
},
{
name: "lustre_read_bytes_diff",
lineprefix: "read_bytes",
lineoffset: 6,
unit: "bytes",
calc: "difference",
},
{
name: "lustre_write_bytes_diff",
lineprefix: "write_bytes",
lineoffset: 6,
unit: "bytes",
calc: "difference",
},
{
name: "lustre_open_diff",
lineprefix: "open",
lineoffset: 1,
unit: "",
calc: "difference",
},
{
name: "lustre_close_diff",
lineprefix: "close",
lineoffset: 1,
unit: "",
calc: "difference",
},
{
name: "lustre_setattr_diff",
lineprefix: "setattr",
lineoffset: 1,
unit: "",
calc: "difference",
},
{
name: "lustre_getattr_diff",
lineprefix: "getattr",
lineoffset: 1,
unit: "",
calc: "difference",
},
{
name: "lustre_statfs_diff",
lineprefix: "statfs",
lineoffset: 1,
unit: "",
calc: "difference",
},
{
name: "lustre_inode_permission_diff",
lineprefix: "inode_permission",
lineoffset: 1,
unit: "",
calc: "difference",
},
}
var LustreDeriveMetrics = []LustreMetricDefinition{
{
name: "lustre_read_requests_rate",
lineprefix: "read_bytes",
lineoffset: 1,
unit: "requests/sec",
calc: "derivative",
},
{
name: "lustre_write_requests_rate",
lineprefix: "write_bytes",
lineoffset: 1,
unit: "requests/sec",
calc: "derivative",
},
{
name: "lustre_read_bw",
lineprefix: "read_bytes",
lineoffset: 6,
unit: "bytes/sec",
calc: "derivative",
},
{
name: "lustre_write_bw",
lineprefix: "write_bytes",
lineoffset: 6,
unit: "bytes/sec",
calc: "derivative",
},
}
func (m *LustreCollector) Init(config json.RawMessage) error {
@@ -297,81 +38,28 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
m.setup()
m.tags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "Lustre"}
// Lustre file system statistics can only be queried by user root
// or with password-less sudo
if !m.config.Sudo {
user, err := user.Current()
if err != nil {
cclog.ComponentError(m.name, "Failed to get current user:", err.Error())
return err
}
if user.Uid != "0" {
cclog.ComponentError(m.name, "Lustre file system statistics can only be queried by user root")
return err
}
} else {
p, err := exec.LookPath("sudo")
if err != nil {
cclog.ComponentError(m.name, "Cannot find 'sudo'")
return err
}
m.sudoCmd = p
}
p, err := exec.LookPath(m.config.LCtlCommand)
if err != nil {
p, err = exec.LookPath(LCTL_CMD)
if err != nil {
return err
}
}
m.lctl = p
m.definitions = []LustreMetricDefinition{}
if m.config.SendAbsoluteValues {
for _, def := range LustreAbsMetrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, def.name); !skip {
m.definitions = append(m.definitions, def)
}
}
}
if m.config.SendDiffValues {
for _, def := range LustreDiffMetrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, def.name); !skip {
m.definitions = append(m.definitions, def)
}
}
}
if m.config.SendDerivedValues {
for _, def := range LustreDeriveMetrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, def.name); !skip {
m.definitions = append(m.definitions, def)
}
}
}
if len(m.definitions) == 0 {
return errors.New("no metrics to collect")
}
devices := m.getDevices()
if len(devices) == 0 {
return errors.New("no Lustre devices found")
}
m.stats = make(map[string]map[string]int64)
for _, d := range devices {
m.stats[d] = make(map[string]int64)
data := m.getDeviceDataCommand(d)
for _, def := range m.definitions {
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
m.matches = map[string]map[string]int{"read_bytes": {"read_bytes": 6, "read_requests": 1},
"write_bytes": {"write_bytes": 6, "write_requests": 1},
"open": {"open": 1},
"close": {"close": 1},
"setattr": {"setattr": 1},
"getattr": {"getattr": 1},
"statfs": {"statfs": 1},
"inode_permission": {"inode_permission": 1}}
m.devices = make([]string, 0)
for _, p := range m.config.Procfiles {
_, err := ioutil.ReadFile(p)
if err == nil {
m.stats[d][def.name] = x
m.devices = append(m.devices, p)
} else {
m.stats[d][def.name] = 0
log.Print(err.Error())
continue
}
}
if len(m.devices) == 0 {
return errors.New("No metrics to collect")
}
m.lastTimestamp = time.Now()
m.init = true
return nil
}
@@ -380,49 +68,40 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric)
if !m.init {
return
}
now := time.Now()
tdiff := now.Sub(m.lastTimestamp)
for device, devData := range m.stats {
data := m.getDeviceDataCommand(device)
for _, def := range m.definitions {
var use_x int64
var err error
var y lp.CCMetric
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
for _, p := range m.devices {
buffer, err := ioutil.ReadFile(p)
if err != nil {
log.Print(err)
return
}
for _, line := range strings.Split(string(buffer), "\n") {
lf := strings.Fields(line)
if len(lf) > 1 {
for match, fields := range m.matches {
if lf[0] == match {
for name, idx := range fields {
_, skip := stringArrayContains(m.config.ExcludeMetrics, name)
if skip {
continue
}
x, err := strconv.ParseInt(lf[idx], 0, 64)
if err == nil {
use_x = x
} else {
use_x = devData[def.name]
}
var value interface{}
switch def.calc {
case "none":
value = use_x
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "difference":
value = use_x - devData[def.name]
if value.(int64) < 0 {
value = 0
}
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "derivative":
value = float64(use_x-devData[def.name]) / tdiff.Seconds()
if value.(float64) < 0 {
value = 0
}
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
}
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, time.Now())
if err == nil {
y.AddTag("device", device)
if len(def.unit) > 0 {
y.AddMeta("unit", def.unit)
if strings.Contains(name, "byte") {
y.AddMeta("unit", "Byte")
}
output <- y
}
devData[def.name] = use_x
}
}
m.lastTimestamp = now
}
}
}
}
}
}
func (m *LustreCollector) Close() {

View File

@@ -3,44 +3,27 @@
```json
"lustrestat": {
"lctl_command": "/path/to/lctl",
"procfiles" : [
"/proc/fs/lustre/llite/lnec-XXXXXX/stats"
],
"exclude_metrics": [
"setattr",
"getattr"
],
"send_abs_values" : true,
"send_derived_values" : true,
"send_diff_values": true,
"use_sudo": false
]
}
```
The `lustrestat` collector uses the `lctl` application with the `get_param` option to get all `llite` metrics (Lustre client). The `llite` metrics are only available for root users. If password-less sudo is configured, you can enable `sudo` in the configuration.
The `lustrestat` collector reads from the procfs stat files for Lustre like `/proc/fs/lustre/llite/lnec-XXXXXX/stats`.
Metrics:
* `lustre_read_bytes` (unit `bytes`)
* `lustre_read_requests` (unit `requests`)
* `lustre_write_bytes` (unit `bytes`)
* `lustre_write_requests` (unit `requests`)
* `lustre_open`
* `lustre_close`
* `lustre_getattr`
* `lustre_setattr`
* `lustre_statfs`
* `lustre_inode_permission`
* `lustre_read_bw` (if `send_derived_values == true`, unit `bytes/sec`)
* `lustre_write_bw` (if `send_derived_values == true`, unit `bytes/sec`)
* `lustre_read_requests_rate` (if `send_derived_values == true`, unit `requests/sec`)
* `lustre_write_requests_rate` (if `send_derived_values == true`, unit `requests/sec`)
* `lustre_read_bytes_diff` (if `send_diff_values == true`, unit `bytes`)
* `lustre_read_requests_diff` (if `send_diff_values == true`, unit `requests`)
* `lustre_write_bytes_diff` (if `send_diff_values == true`, unit `bytes`)
* `lustre_write_requests_diff` (if `send_diff_values == true`, unit `requests`)
* `lustre_open_diff` (if `send_diff_values == true`)
* `lustre_close_diff` (if `send_diff_values == true`)
* `lustre_getattr_diff` (if `send_diff_values == true`)
* `lustre_setattr_diff` (if `send_diff_values == true`)
* `lustre_statfs_diff` (if `send_diff_values == true`)
* `lustre_inode_permission_diff` (if `send_diff_values == true`)
* `read_bytes`
* `read_requests`
* `write_bytes`
* `write_requests`
* `open`
* `close`
* `getattr`
* `setattr`
* `statfs`
* `inode_permission`
This collector adds an `device` tag.

View File

@@ -1,33 +1,22 @@
package collectors
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const MEMSTATFILE = "/proc/meminfo"
const NUMA_MEMSTAT_BASE = "/sys/devices/system/node"
const MEMSTATFILE = `/proc/meminfo`
type MemstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics"`
NodeStats bool `json:"node_stats,omitempty"`
NumaStats bool `json:"numa_stats,omitempty"`
}
type MemstatCollectorNode struct {
file string
tags map[string]string
}
type MemstatCollector struct {
@@ -36,65 +25,22 @@ type MemstatCollector struct {
tags map[string]string
matches map[string]string
config MemstatCollectorConfig
nodefiles map[int]MemstatCollectorNode
sendMemUsed bool
}
type MemstatStats struct {
value float64
unit string
}
func getStats(filename string) map[string]MemstatStats {
stats := make(map[string]MemstatStats)
file, err := os.Open(filename)
if err != nil {
cclog.Error(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if len(linefields) == 3 {
v, err := strconv.ParseFloat(linefields[1], 64)
if err == nil {
stats[strings.Trim(linefields[0], ":")] = MemstatStats{
value: v,
unit: linefields[2],
}
}
} else if len(linefields) == 5 {
v, err := strconv.ParseFloat(linefields[3], 64)
if err == nil {
stats[strings.Trim(linefields[0], ":")] = MemstatStats{
value: v,
unit: linefields[4],
}
}
}
}
return stats
}
func (m *MemstatCollector) Init(config json.RawMessage) error {
var err error
m.name = "MemstatCollector"
m.config.NodeStats = true
m.config.NumaStats = false
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{"source": m.name, "group": "Memory"}
m.meta = map[string]string{"source": m.name, "group": "Memory", "unit": "kByte"}
m.stats = make(map[string]int64)
m.matches = make(map[string]string)
m.tags = map[string]string{"type": "node"}
matches := map[string]string{
"MemTotal": "mem_total",
matches := map[string]string{`MemTotal`: "mem_total",
"SwapTotal": "swap_total",
"SReclaimable": "mem_sreclaimable",
"Slab": "mem_slab",
@@ -102,58 +48,21 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
"Buffers": "mem_buffers",
"Cached": "mem_cached",
"MemAvailable": "mem_available",
"SwapFree": "swap_free",
"MemShared": "mem_shared",
}
"SwapFree": "swap_free"}
for k, v := range matches {
_, skip := stringArrayContains(m.config.ExcludeMetrics, k)
if !skip {
m.matches[k] = v
}
}
m.sendMemUsed = false
if _, skip := stringArrayContains(m.config.ExcludeMetrics, "mem_used"); !skip {
m.sendMemUsed = true
}
if len(m.matches) == 0 {
return errors.New("no metrics to collect")
return errors.New("No metrics to collect")
}
m.setup()
if m.config.NodeStats {
if stats := getStats(MEMSTATFILE); len(stats) == 0 {
return fmt.Errorf("cannot read data from file %s", MEMSTATFILE)
}
}
if m.config.NumaStats {
globPattern := filepath.Join(NUMA_MEMSTAT_BASE, "node[0-9]*", "meminfo")
regex := regexp.MustCompile(filepath.Join(NUMA_MEMSTAT_BASE, "node(\\d+)", "meminfo"))
files, err := filepath.Glob(globPattern)
_, err = ioutil.ReadFile(string(MEMSTATFILE))
if err == nil {
m.nodefiles = make(map[int]MemstatCollectorNode)
for _, f := range files {
if stats := getStats(f); len(stats) == 0 {
return fmt.Errorf("cannot read data from file %s", f)
}
rematch := regex.FindStringSubmatch(f)
if len(rematch) == 2 {
id, err := strconv.Atoi(rematch[1])
if err == nil {
f := MemstatCollectorNode{
file: f,
tags: map[string]string{
"type": "memoryDomain",
"type-id": fmt.Sprintf("%d", id),
},
}
m.nodefiles[id] = f
}
}
}
}
}
m.init = true
}
return err
}
@@ -162,65 +71,56 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric)
return
}
sendStats := func(stats map[string]MemstatStats, tags map[string]string) {
buffer, err := ioutil.ReadFile(string(MEMSTATFILE))
if err != nil {
log.Print(err)
return
}
ll := strings.Split(string(buffer), "\n")
for _, line := range ll {
ls := strings.Split(line, `:`)
if len(ls) > 1 {
lv := strings.Fields(ls[1])
m.stats[ls[0]], err = strconv.ParseInt(lv[0], 0, 64)
}
}
if _, exists := m.stats[`MemTotal`]; !exists {
err = errors.New("Parse error")
log.Print(err)
return
}
for match, name := range m.matches {
var value float64 = 0
var unit string = ""
if v, ok := stats[match]; ok {
value = v.value
if len(v.unit) > 0 {
unit = v.unit
if _, exists := m.stats[match]; !exists {
err = fmt.Errorf("Parse error for %s : %s", match, name)
log.Print(err)
continue
}
}
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": int(float64(m.stats[match]) * 1.0e-3)}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)
}
output <- y
}
}
if m.sendMemUsed {
memUsed := 0.0
unit := ""
if totalVal, total := stats["MemTotal"]; total {
if freeVal, free := stats["MemFree"]; free {
if bufVal, buffers := stats["Buffers"]; buffers {
if cacheVal, cached := stats["Cached"]; cached {
memUsed = totalVal.value - (freeVal.value + bufVal.value + cacheVal.value)
if len(totalVal.unit) > 0 {
unit = totalVal.unit
} else if len(freeVal.unit) > 0 {
unit = freeVal.unit
} else if len(bufVal.unit) > 0 {
unit = bufVal.unit
} else if len(cacheVal.unit) > 0 {
unit = cacheVal.unit
}
}
}
}
}
y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)
}
if _, free := m.stats[`MemFree`]; free {
if _, buffers := m.stats[`Buffers`]; buffers {
if _, cached := m.stats[`Cached`]; cached {
memUsed := m.stats[`MemTotal`] - (m.stats[`MemFree`] + m.stats[`Buffers`] + m.stats[`Cached`])
_, skip := stringArrayContains(m.config.ExcludeMetrics, "mem_used")
y, err := lp.New("mem_used", m.tags, m.meta, map[string]interface{}{"value": int(float64(memUsed) * 1.0e-3)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if m.config.NodeStats {
nodestats := getStats(MEMSTATFILE)
sendStats(nodestats, m.tags)
}
if m.config.NumaStats {
for _, nodeConf := range m.nodefiles {
stats := getStats(nodeConf.file)
sendStats(stats, nodeConf.tags)
if _, found := m.stats[`MemShared`]; found {
_, skip := stringArrayContains(m.config.ExcludeMetrics, "mem_shared")
y, err := lp.New("mem_shared", m.tags, m.meta, map[string]interface{}{"value": int(float64(m.stats[`MemShared`]) * 1.0e-3)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}

View File

@@ -10,33 +10,33 @@ import (
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type MetricCollector interface {
Name() string // Name of the metric collector
Init(config json.RawMessage) error // Initialize metric collector
Initialized() bool // Is metric collector initialized?
Read(duration time.Duration, output chan lp.CCMetric) // Read metrics from metric collector
Close() // Close / finish metric collector
Name() string
Init(config json.RawMessage) error
Initialized() bool
Read(duration time.Duration, output chan lp.CCMetric)
Close()
}
type metricCollector struct {
name string // name of the metric
init bool // is metric collector initialized?
meta map[string]string // static meta data tags
name string
init bool
meta map[string]string
}
// Name returns the name of the metric collector
// Name() returns the name of the metric collector
func (c *metricCollector) Name() string {
return c.name
}
// Setup is for future use
func (c *metricCollector) setup() error {
return nil
}
// Initialized indicates whether the metric collector has been initialized
// Initialized() indicates whether the metric collector has been initialized.
func (c *metricCollector) Initialized() bool {
return c.init
}
@@ -65,7 +65,6 @@ func stringArrayContains(array []string, str string) (int, bool) {
return -1, false
}
// SocketList returns the list of physical sockets as read from /proc/cpuinfo
func SocketList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
@@ -91,7 +90,6 @@ func SocketList() []int {
return packs
}
// CpuList returns the list of physical CPUs (in contrast to logical CPUs) as read from /proc/cpuinfo
func CpuList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
@@ -117,13 +115,31 @@ func CpuList() []int {
return cpulist
}
// Tags2Map stores a InfluxDB list of tags in a map of key value pairs
func Tags2Map(metric influx.Metric) map[string]string {
tags := make(map[string]string)
for _, t := range metric.TagList() {
tags[t.Key] = t.Value
}
return tags
}
// Fields2Map stores a InfluxDB list of fields in a map of key value pairs
func Fields2Map(metric influx.Metric) map[string]interface{} {
fields := make(map[string]interface{})
for _, f := range metric.FieldList() {
fields[f.Key] = f.Value
}
return fields
}
// RemoveFromStringList removes the string r from the array of strings s
// If r is not contained in the array an error is returned
func RemoveFromStringList(s []string, r string) ([]string, error) {
for i := range s {
if r == s[i] {
for i, item := range s {
if r == item {
return append(s[:i], s[i+1:]...), nil
}
}
return s, fmt.Errorf("no such string in list")
return s, fmt.Errorf("No such string in list")
}

View File

@@ -1,217 +1,92 @@
package collectors
import (
"bufio"
"encoding/json"
"errors"
"os"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const NETSTATFILE = "/proc/net/dev"
const NETSTATFILE = `/proc/net/dev`
type NetstatCollectorConfig struct {
IncludeDevices []string `json:"include_devices"`
SendAbsoluteValues bool `json:"send_abs_values"`
SendDerivedValues bool `json:"send_derived_values"`
}
type NetstatCollectorMetric struct {
name string
index int
tags map[string]string
meta map[string]string
meta_rates map[string]string
lastValue int64
ExcludeDevices []string `json:"exclude_devices"`
}
type NetstatCollector struct {
metricCollector
config NetstatCollectorConfig
matches map[string][]NetstatCollectorMetric
lastTimestamp time.Time
matches map[int]string
}
func (m *NetstatCollector) Init(config json.RawMessage) error {
m.name = "NetstatCollector"
m.setup()
m.lastTimestamp = time.Now()
const (
fieldInterface = iota
fieldReceiveBytes
fieldReceivePackets
fieldReceiveErrs
fieldReceiveDrop
fieldReceiveFifo
fieldReceiveFrame
fieldReceiveCompressed
fieldReceiveMulticast
fieldTransmitBytes
fieldTransmitPackets
fieldTransmitErrs
fieldTransmitDrop
fieldTransmitFifo
fieldTransmitColls
fieldTransmitCarrier
fieldTransmitCompressed
)
m.matches = make(map[string][]NetstatCollectorMetric)
// Set default configuration,
m.config.SendAbsoluteValues = true
m.config.SendDerivedValues = false
// Read configuration file, allow overwriting default config
m.meta = map[string]string{"source": m.name, "group": "Memory"}
m.matches = map[int]string{
1: "bytes_in",
9: "bytes_out",
2: "pkts_in",
10: "pkts_out",
}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
log.Print(err.Error())
return err
}
}
// Check access to net statistic file
file, err := os.Open(NETSTATFILE)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
l := scanner.Text()
// Skip lines with no net device entry
if !strings.Contains(l, ":") {
continue
}
// Split line into fields
f := strings.Fields(l)
// Get net device entry
dev := strings.Trim(f[0], ": ")
// Check if device is a included device
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
tags := map[string]string{"device": dev, "type": "node"}
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
meta_unit_pkts_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "packets/sec"}
m.matches[dev] = []NetstatCollectorMetric{
{
name: "net_bytes_in",
index: fieldReceiveBytes,
lastValue: -1,
tags: tags,
meta: meta_unit_byte,
meta_rates: meta_unit_byte_per_sec,
},
{
name: "net_pkts_in",
index: fieldReceivePackets,
lastValue: -1,
tags: tags,
meta: meta_unit_pkts,
meta_rates: meta_unit_pkts_per_sec,
},
{
name: "net_bytes_out",
index: fieldTransmitBytes,
lastValue: -1,
tags: tags,
meta: meta_unit_byte,
meta_rates: meta_unit_byte_per_sec,
},
{
name: "net_pkts_out",
index: fieldTransmitPackets,
lastValue: -1,
tags: tags,
meta: meta_unit_pkts,
meta_rates: meta_unit_pkts_per_sec,
},
}
}
}
if len(m.matches) == 0 {
return errors.New("no devices to collector metrics found")
}
_, err := ioutil.ReadFile(string(NETSTATFILE))
if err == nil {
m.init = true
}
return nil
}
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
// Current time stamp
now := time.Now()
// time difference to last time stamp
timeDiff := now.Sub(m.lastTimestamp).Seconds()
// Save current timestamp
m.lastTimestamp = now
file, err := os.Open(string(NETSTATFILE))
data, err := ioutil.ReadFile(string(NETSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
log.Print(err.Error())
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
l := scanner.Text()
// Skip lines with no net device entry
lines := strings.Split(string(data), "\n")
for _, l := range lines {
if !strings.Contains(l, ":") {
continue
}
// Split line into fields
f := strings.Fields(l)
// Get net device entry
dev := strings.Trim(f[0], ":")
// Check if device is a included device
if devmetrics, ok := m.matches[dev]; ok {
for i := range devmetrics {
metric := &devmetrics[i]
// Read value
v, err := strconv.ParseInt(f[metric.index], 10, 64)
if err != nil {
dev := f[0][0 : len(f[0])-1]
cont := false
for _, d := range m.config.ExcludeDevices {
if d == dev {
cont = true
}
}
if cont {
continue
}
if m.config.SendAbsoluteValues {
if y, err := lp.New(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
tags := map[string]string{"device": dev, "type": "node"}
for i, name := range m.matches {
v, err := strconv.ParseInt(f[i], 10, 0)
if err == nil {
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": int(float64(v) * 1.0e-3)}, time.Now())
if err == nil {
switch {
case strings.Contains(name, "byte"):
y.AddMeta("unit", "Byte")
case strings.Contains(name, "pkt"):
y.AddMeta("unit", "Packets")
}
output <- y
}
}
if m.config.SendDerivedValues {
if metric.lastValue >= 0 {
rate := float64(v-metric.lastValue) / timeDiff
if y, err := lp.New(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
output <- y
}
}
metric.lastValue = v
}
}
}
}
}
func (m *NetstatCollector) Close() {

View File

@@ -3,25 +3,19 @@
```json
"netstat": {
"include_devices": [
"eth0"
],
"send_abs_values" : true,
"send_derived_values" : true
"exclude_devices": [
"lo"
]
}
```
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list.
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. If a device is not required, it can be excluded from forwarding it to the sink. Commonly the `lo` device should be excluded.
Metrics:
* `net_bytes_in` (`unit=bytes`)
* `net_bytes_out` (`unit=bytes`)
* `net_pkts_in` (`unit=packets`)
* `net_pkts_out` (`unit=packets`)
* `net_bytes_in_bw` (`unit=bytes/sec` if `send_derived_values == true`)
* `net_bytes_out_bw` (`unit=bytes/sec` if `send_derived_values == true`)
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
* `bytes_in`
* `bytes_out`
* `pkts_in`
* `pkts_out`
The device name is added as tag `device`.

View File

@@ -1,39 +0,0 @@
## `nfs3stat` collector
```json
"nfs3stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs3_total"
]
}
```
The `nfs3stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs3_total`
* `nfs3_null`
* `nfs3_getattr`
* `nfs3_setattr`
* `nfs3_lookup`
* `nfs3_access`
* `nfs3_readlink`
* `nfs3_read`
* `nfs3_write`
* `nfs3_create`
* `nfs3_mkdir`
* `nfs3_symlink`
* `nfs3_remove`
* `nfs3_rmdir`
* `nfs3_rename`
* `nfs3_link`
* `nfs3_readdir`
* `nfs3_readdirplus`
* `nfs3_fsstat`
* `nfs3_fsinfo`
* `nfs3_pathconf`
* `nfs3_commit`

View File

@@ -1,62 +0,0 @@
## `nfs4stat` collector
```json
"nfs4stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs4_total"
]
}
```
The `nfs4stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs4_total`
* `nfs4_null`
* `nfs4_read`
* `nfs4_write`
* `nfs4_commit`
* `nfs4_open`
* `nfs4_open_conf`
* `nfs4_open_noat`
* `nfs4_open_dgrd`
* `nfs4_close`
* `nfs4_setattr`
* `nfs4_fsinfo`
* `nfs4_renew`
* `nfs4_setclntid`
* `nfs4_confirm`
* `nfs4_lock`
* `nfs4_lockt`
* `nfs4_locku`
* `nfs4_access`
* `nfs4_getattr`
* `nfs4_lookup`
* `nfs4_lookup_root`
* `nfs4_remove`
* `nfs4_rename`
* `nfs4_link`
* `nfs4_symlink`
* `nfs4_create`
* `nfs4_pathconf`
* `nfs4_statfs`
* `nfs4_readlink`
* `nfs4_readdir`
* `nfs4_server_caps`
* `nfs4_delegreturn`
* `nfs4_getacl`
* `nfs4_setacl`
* `nfs4_rel_lkowner`
* `nfs4_exchange_id`
* `nfs4_create_session`
* `nfs4_destroy_session`
* `nfs4_sequence`
* `nfs4_get_lease_time`
* `nfs4_reclaim_comp`
* `nfs4_secinfo_no`
* `nfs4_bind_conn_to_ses`

View File

@@ -14,29 +14,23 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// First part contains the code for the general NfsCollector.
// Later, the general NfsCollector is more limited to Nfs3- and Nfs4Collector.
const NFSSTAT_EXEC = `nfsstat`
type NfsCollectorData struct {
current int64
last int64
}
type nfsCollector struct {
type NfsCollector struct {
metricCollector
tags map[string]string
version string
config struct {
Nfsstats string `json:"nfsstat"`
Nfsutils string `json:"nfsutils"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
data map[string]NfsCollectorData
data map[string]map[string]NfsCollectorData
}
func (m *nfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`, `--all`)
func (m *NfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@@ -45,16 +39,17 @@ func (m *nfsCollector) initStats() error {
if len(lf) != 5 {
continue
}
if lf[1] == m.version {
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
name := strings.Trim(lf[3], ":")
if _, exist := m.data[name]; !exist {
if _, exist := m.data[lf[1]][name]; !exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[name]
x := m.data[lf[1]][name]
x.current = value
x.last = value
m.data[name] = x
}
x.last = 0
m.data[lf[1]][name] = x
}
}
}
@@ -62,8 +57,8 @@ func (m *nfsCollector) initStats() error {
return err
}
func (m *nfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`, `--all`)
func (m *NfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@@ -72,16 +67,17 @@ func (m *nfsCollector) updateStats() error {
if len(lf) != 5 {
continue
}
if lf[1] == m.version {
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
name := strings.Trim(lf[3], ":")
if _, exist := m.data[name]; exist {
if _, exist := m.data[lf[1]][name]; exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[name]
x := m.data[lf[1]][name]
x.last = x.current
x.current = value
m.data[name] = x
}
m.data[lf[1]][name] = x
}
}
}
@@ -89,11 +85,17 @@ func (m *nfsCollector) updateStats() error {
return err
}
func (m *nfsCollector) MainInit(config json.RawMessage) error {
m.config.Nfsstats = string(NFSSTAT_EXEC)
func (m *NfsCollector) Init(config json.RawMessage) error {
var err error
m.name = "NfsCollector"
m.setup()
// Set default mmpmon binary
m.config.Nfsutils = "/usr/sbin/nfsstat"
// Read JSON configuration
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
err = json.Unmarshal(config, &m.config)
if err != nil {
log.Print(err.Error())
return err
@@ -106,69 +108,40 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
m.tags = map[string]string{
"type": "node",
}
// Check if nfsstat is in executable search path
_, err := exec.LookPath(m.config.Nfsstats)
// Check if mmpmon is in executable search path
_, err = exec.LookPath(m.config.Nfsutils)
if err != nil {
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsstats, err)
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsutils, err)
}
m.data = make(map[string]NfsCollectorData)
m.data = make(map[string]map[string]NfsCollectorData)
m.initStats()
m.init = true
return nil
}
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
func (m *NfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
timestamp := time.Now()
m.updateStats()
prefix := ""
switch m.version {
case "v3":
prefix = "nfs3"
case "v4":
prefix = "nfs4"
default:
prefix = "nfs"
}
for name, data := range m.data {
for version, metrics := range m.data {
for name, data := range metrics {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, name); skip {
continue
}
value := data.current - data.last
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New(fmt.Sprintf("nfs_%s", name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddMeta("version", m.version)
y.AddMeta("version", version)
output <- y
}
}
}
}
func (m *nfsCollector) Close() {
func (m *NfsCollector) Close() {
m.init = false
}
type Nfs3Collector struct {
nfsCollector
}
type Nfs4Collector struct {
nfsCollector
}
func (m *Nfs3Collector) Init(config json.RawMessage) error {
m.name = "Nfs3Collector"
m.version = `v3`
m.setup()
return m.MainInit(config)
}
func (m *Nfs4Collector) Init(config json.RawMessage) error {
m.name = "Nfs4Collector"
m.version = `v4`
m.setup()
return m.MainInit(config)
}

View File

@@ -1,139 +0,0 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
//
// Numa policy hit/miss statistics
//
// numa_hit:
// A process wanted to allocate memory from this node, and succeeded.
// numa_miss:
// A process wanted to allocate memory from another node,
// but ended up with memory from this node.
// numa_foreign:
// A process wanted to allocate on this node,
// but ended up with memory from another node.
// local_node:
// A process ran on this node's CPU,
// and got memory from this node.
// other_node:
// A process ran on a different node's CPU
// and got memory from this node.
// interleave_hit:
// Interleaving wanted to allocate from this node
// and succeeded.
//
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
//
type NUMAStatsCollectorTopolgy struct {
file string
tagSet map[string]string
}
type NUMAStatsCollector struct {
metricCollector
topology []NUMAStatsCollectorTopolgy
}
func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "NUMAStatsCollector"
m.setup()
m.meta = map[string]string{
"source": m.name,
"group": "NUMA",
}
// Loop for all NUMA node directories
base := "/sys/devices/system/node/node"
globPattern := base + "[0-9]*"
dirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s'", globPattern)
}
if dirs == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
}
m.topology = make([]NUMAStatsCollectorTopolgy, 0, len(dirs))
for _, dir := range dirs {
node := strings.TrimPrefix(dir, base)
file := filepath.Join(dir, "numastat")
m.topology = append(m.topology,
NUMAStatsCollectorTopolgy{
file: file,
tagSet: map[string]string{"memoryDomain": node},
})
}
m.init = true
return nil
}
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
for i := range m.topology {
// Loop for all NUMA domains
t := &m.topology[i]
now := time.Now()
file, err := os.Open(t.file)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to open file '%s': %v", t.file, err))
return
}
scanner := bufio.NewScanner(file)
// Read line by line
for scanner.Scan() {
split := strings.Fields(scanner.Text())
if len(split) != 2 {
continue
}
key := split[0]
value, err := strconv.ParseInt(split[1], 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
continue
}
y, err := lp.New(
"numastats_"+key,
t.tagSet,
m.meta,
map[string]interface{}{"value": value},
now,
)
if err == nil {
output <- y
}
}
file.Close()
}
}
func (m *NUMAStatsCollector) Close() {
m.init = false
}

View File

@@ -1,15 +0,0 @@
## `numastat` collector
```json
"numastat": {}
```
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
Metrics:
* `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded.
* `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node.
* `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node.
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"log"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/nvml"
)
@@ -15,20 +13,12 @@ import (
type NvidiaCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeDevices []string `json:"exclude_devices,omitempty"`
AddPciInfoTag bool `json:"add_pci_info_tag,omitempty"`
}
type NvidiaCollectorDevice struct {
device nvml.Device
excludeMetrics map[string]bool
tags map[string]string
}
type NvidiaCollector struct {
metricCollector
num_gpus int
config NvidiaCollectorConfig
gpus []NvidiaCollectorDevice
}
func (m *NvidiaCollector) CatchPanic() {
@@ -41,86 +31,26 @@ func (m *NvidiaCollector) CatchPanic() {
func (m *NvidiaCollector) Init(config json.RawMessage) error {
var err error
m.name = "NvidiaCollector"
m.config.AddPciInfoTag = false
m.setup()
m.meta = map[string]string{"source": m.name, "group": "Nvidia"}
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "Nvidia",
}
m.num_gpus = 0
defer m.CatchPanic()
// Initialize NVIDIA Management Library (NVML)
ret := nvml.Init()
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to initialize NVML", err.Error())
return err
}
// Number of NVIDIA GPUs
num_gpus, ret := nvml.DeviceGetCount()
m.num_gpus, ret = nvml.DeviceGetCount()
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get device count", err.Error())
return err
}
// For all GPUs
m.gpus = make([]NvidiaCollectorDevice, num_gpus)
for i := 0; i < num_gpus; i++ {
g := &m.gpus[i]
// Skip excluded devices
str_i := fmt.Sprintf("%d", i)
if _, skip := stringArrayContains(m.config.ExcludeDevices, str_i); skip {
continue
}
// Get device handle
device, ret := nvml.DeviceGetHandleByIndex(i)
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get device at index", i, ":", err.Error())
return err
}
g.device = device
// Add tags
g.tags = map[string]string{
"type": "accelerator",
"type-id": str_i,
}
// Add excluded metrics
g.excludeMetrics = map[string]bool{}
for _, e := range m.config.ExcludeMetrics {
g.excludeMetrics[e] = true
}
// Add PCI info as tag
if m.config.AddPciInfoTag {
pciInfo, ret := nvml.DeviceGetPciInfo(g.device)
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to get PCI info for device at index", i, ":", err.Error())
return err
}
g.tags["pci_identifier"] = fmt.Sprintf(
"%08X:%02X:%02X.0",
pciInfo.Domain,
pciInfo.Bus,
pciInfo.Device)
}
}
m.init = true
return nil
}
@@ -129,338 +59,210 @@ func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMetric)
if !m.init {
return
}
for i := 0; i < m.num_gpus; i++ {
device, ret := nvml.DeviceGetHandleByIndex(i)
if ret != nvml.SUCCESS {
log.Fatalf("Unable to get device at index %d: %v", i, nvml.ErrorString(ret))
return
}
_, skip := stringArrayContains(m.config.ExcludeDevices, fmt.Sprintf("%d", i))
if skip {
continue
}
tags := map[string]string{"type": "accelerator", "type-id": fmt.Sprintf("%d", i)}
for i := range m.gpus {
device := &m.gpus[i]
if !device.excludeMetrics["nv_util"] || !device.excludeMetrics["nv_mem_util"] {
// Retrieves the current utilization rates for the device's major subsystems.
//
// Available utilization rates
// * Gpu: Percent of time over the past sample period during which one or more kernels was executing on the GPU.
// * Memory: Percent of time over the past sample period during which global (device) memory was being read or written
//
// Note:
// * During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings.
// This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization.
// * On MIG-enabled GPUs, querying device utilization rates is not currently supported.
util, ret := nvml.DeviceGetUtilizationRates(device.device)
util, ret := nvml.DeviceGetUtilizationRates(device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_util"] {
y, err := lp.New("nv_util", device.tags, m.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_util")
y, err := lp.New("nv_util", tags, m.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
if err == nil && !skip {
output <- y
}
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_util")
y, err = lp.New("nv_mem_util", tags, m.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
if err == nil && !skip {
output <- y
}
}
if !device.excludeMetrics["nv_mem_util"] {
y, err := lp.New("nv_mem_util", device.tags, m.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
}
}
}
if !device.excludeMetrics["nv_mem_total"] || !device.excludeMetrics["nv_fb_memory"] {
// Retrieves the amount of used, free and total memory available on the device, in bytes.
//
// Enabling ECC reduces the amount of total available memory, due to the extra required parity bits.
//
// The reported amount of used memory is equal to the sum of memory allocated by all active channels on the device.
//
// Available memory info:
// * Free: Unallocated FB memory (in bytes).
// * Total: Total installed FB memory (in bytes).
// * Used: Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping.
//
// Note:
// In MIG mode, if device handle is provided, the API returns aggregate information, only if the caller has appropriate privileges.
// Per-instance information can be queried by using specific MIG device handles.
meminfo, ret := nvml.DeviceGetMemoryInfo(device.device)
meminfo, ret := nvml.DeviceGetMemoryInfo(device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_mem_total"] {
t := float64(meminfo.Total) / (1024 * 1024)
y, err := lp.New("nv_mem_total", device.tags, m.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_total")
y, err := lp.New("nv_mem_total", tags, m.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "MByte")
output <- y
}
}
if !device.excludeMetrics["nv_fb_memory"] {
f := float64(meminfo.Used) / (1024 * 1024)
y, err := lp.New("nv_fb_memory", device.tags, m.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_fb_memory")
y, err = lp.New("nv_fb_memory", tags, m.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "MByte")
output <- y
}
}
}
}
if !device.excludeMetrics["nv_temp"] {
// Retrieves the current temperature readings for the device, in degrees C.
//
// Available temperature sensors:
// * TEMPERATURE_GPU: Temperature sensor for the GPU die.
// * NVML_TEMPERATURE_COUNT
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
temp, ret := nvml.DeviceGetTemperature(device, nvml.TEMPERATURE_GPU)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_temp", device.tags, m.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_temp")
y, err := lp.New("nv_temp", tags, m.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
if err == nil && !skip {
y.AddMeta("unit", "degC")
output <- y
}
}
}
if !device.excludeMetrics["nv_fan"] {
// Retrieves the intended operating speed of the device's fan.
//
// Note: The reported speed is the intended fan speed.
// If the fan is physically blocked and unable to spin, the output will not match the actual fan speed.
//
// For all discrete products with dedicated fans.
//
// The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed.
// This value may exceed 100% in certain cases.
fan, ret := nvml.DeviceGetFanSpeed(device.device)
fan, ret := nvml.DeviceGetFanSpeed(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_fan", device.tags, m.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_fan")
y, err := lp.New("nv_fan", tags, m.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_ecc_mode"] {
// Retrieves the current and pending ECC modes for the device.
//
// For Fermi or newer fully supported devices. Only applicable to devices with ECC.
// Requires NVML_INFOROM_ECC version 1.0 or higher.
//
// Changing ECC modes requires a reboot.
// The "pending" ECC mode refers to the target mode following the next reboot.
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
_, ecc_pend, ret := nvml.DeviceGetEccMode(device)
if ret == nvml.SUCCESS {
var y lp.CCMetric
var err error
switch ecc_pend {
case nvml.FEATURE_DISABLED:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "OFF"}, time.Now())
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("OFF")}, time.Now())
case nvml.FEATURE_ENABLED:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "ON"}, time.Now())
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("ON")}, time.Now())
default:
y, err = lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
y, err = lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("UNKNOWN")}, time.Now())
}
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_mode")
if err == nil && !skip {
output <- y
}
} else if ret == nvml.ERROR_NOT_SUPPORTED {
y, err := lp.New("nv_ecc_mode", device.tags, m.meta, map[string]interface{}{"value": "N/A"}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_mode")
y, err := lp.New("nv_ecc_mode", tags, m.meta, map[string]interface{}{"value": string("N/A")}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_perf_state"] {
// Retrieves the current performance state for the device.
//
// Allowed PStates:
// 0: Maximum Performance.
// ..
// 15: Minimum Performance.
// 32: Unknown performance state.
pState, ret := nvml.DeviceGetPerformanceState(device.device)
pstate, ret := nvml.DeviceGetPerformanceState(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_perf_state", device.tags, m.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_perf_state")
y, err := lp.New("nv_perf_state", tags, m.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pstate))}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_power_usage_report"] {
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
//
// On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw.
//
// It is only available if power management mode is supported
power, ret := nvml.DeviceGetPowerUsage(device.device)
power, ret := nvml.DeviceGetPowerUsage(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_power_usage_report", device.tags, m.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_power_usage_report")
y, err := lp.New("nv_power_usage_report", tags, m.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
// Retrieves the current clock speeds for the device.
//
// Available clock information:
// * CLOCK_GRAPHICS: Graphics clock domain.
// * CLOCK_SM: Streaming Multiprocessor clock domain.
// * CLOCK_MEM: Memory clock domain.
if !device.excludeMetrics["nv_graphics_clock_report"] {
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
gclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_graphics_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_graphics_clock_report")
y, err := lp.New("nv_graphics_clock_report", tags, m.meta, map[string]interface{}{"value": float64(gclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_sm_clock_report"] {
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
smclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_sm_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_sm_clock_report")
y, err := lp.New("nv_sm_clock_report", tags, m.meta, map[string]interface{}{"value": float64(smclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_mem_clock_report"] {
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
memclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_mem_clock_report", device.tags, m.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_mem_clock_report")
y, err := lp.New("nv_mem_clock_report", tags, m.meta, map[string]interface{}{"value": float64(memclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
// Retrieves the maximum clock speeds for the device.
//
// Available clock information:
// * CLOCK_GRAPHICS: Graphics clock domain.
// * CLOCK_SM: Streaming multiprocessor clock domain.
// * CLOCK_MEM: Memory clock domain.
// * CLOCK_VIDEO: Video encoder/decoder clock domain.
// * CLOCK_COUNT: Count of clock types.
//
// Note:
/// On GPUs from Fermi family current P0 clocks (reported by nvmlDeviceGetClockInfo) can differ from max clocks by few MHz.
if !device.excludeMetrics["nv_max_graphics_clock"] {
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_graphics_clock", device.tags, m.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_graphics_clock")
y, err := lp.New("nv_max_graphics_clock", tags, m.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_max_sm_clock"] {
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
max_smclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_sm_clock", device.tags, m.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_sm_clock")
y, err := lp.New("nv_max_sm_clock", tags, m.meta, map[string]interface{}{"value": float64(max_smclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_max_mem_clock"] {
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
max_memclk, ret := nvml.DeviceGetClockInfo(device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_max_mem_clock", device.tags, m.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_max_mem_clock")
y, err := lp.New("nv_max_mem_clock", tags, m.meta, map[string]interface{}{"value": float64(max_memclk)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_ecc_db_error"] {
// Retrieves the total ECC error counts for the device.
//
// For Fermi or newer fully supported devices.
// Only applicable to devices with ECC.
// Requires NVML_INFOROM_ECC version 1.0 or higher.
// Requires ECC Mode to be enabled.
//
// The total error count is the sum of errors across each of the separate memory systems,
// i.e. the total set of errors across the entire device.
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device, 1, 1)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_ecc_db_error", device.tags, m.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_db_error")
y, err := lp.New("nv_ecc_db_error", tags, m.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_ecc_sb_error"] {
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device, 0, 1)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_ecc_sb_error", device.tags, m.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
if err == nil {
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_ecc_sb_error")
y, err := lp.New("nv_ecc_sb_error", tags, m.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_power_man_limit"] {
// Retrieves the power management limit associated with this device.
//
// For Fermi or newer fully supported devices.
//
// The power limit defines the upper boundary for the card's power draw.
// If the card's total power draw reaches this limit the power management algorithm kicks in.
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_power_man_limit", device.tags, m.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_power_man_limit")
y, err := lp.New("nv_power_man_limit", tags, m.meta, map[string]interface{}{"value": float64(pwr_limit)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_encoder_util"] {
// Retrieves the current utilization and sampling size in microseconds for the Encoder
//
// For Kepler or newer fully supported devices.
//
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_encoder_util", device.tags, m.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_encoder_util")
y, err := lp.New("nv_encoder_util", tags, m.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
if !device.excludeMetrics["nv_decoder_util"] {
// Retrieves the current utilization and sampling size in microseconds for the Decoder
//
// For Kepler or newer fully supported devices.
//
// Note: On MIG-enabled GPUs, querying decoder utilization is not currently supported.
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device)
if ret == nvml.SUCCESS {
y, err := lp.New("nv_decoder_util", device.tags, m.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
_, skip = stringArrayContains(m.config.ExcludeMetrics, "nv_decoder_util")
y, err := lp.New("nv_decoder_util", tags, m.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
if err == nil && !skip {
output <- y
}
}
}
}
}

View File

@@ -1,92 +0,0 @@
package collectors
import (
"encoding/json"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// These are the fields we read from the JSON configuration
type SampleCollectorConfig struct {
Interval string `json:"interval"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type SampleCollector struct {
metricCollector
config SampleTimerCollectorConfig // the configuration structure
meta map[string]string // default meta information
tags map[string]string // default tags
}
// Functions to implement MetricCollector interface
// Init(...), Read(...), Close()
// See: metricCollector.go
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *SampleCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "InternalCollector"
// This is for later use, also call it early
m.setup()
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
// Define tags sent with each metric
// The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag)
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Set up everything that the collector requires during the Read() execution
// Check files required, test execution of some commands, create data structure
// for all topological entities (sockets, NUMA domains, ...)
// Return some useful error message in case of any failures
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric
timestamp := time.Now()
value := 1.0
// If you want to measure something for a specific amount of time, use interval
// start := readState()
// time.Sleep(interval)
// stop := readState()
// value = (stop - start) / interval.Seconds()
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
// Send it to output channel
output <- y
}
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *SampleCollector) Close() {
// Unset flag
m.init = false
}

View File

@@ -1,122 +0,0 @@
package collectors
import (
"encoding/json"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// These are the fields we read from the JSON configuration
type SampleTimerCollectorConfig struct {
Interval string `json:"interval"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type SampleTimerCollector struct {
metricCollector
wg sync.WaitGroup // sync group for management
done chan bool // channel for management
meta map[string]string // default meta information
tags map[string]string // default tags
config SampleTimerCollectorConfig // the configuration structure
interval time.Duration // the interval parsed from configuration
ticker *time.Ticker // own timer
output chan lp.CCMetric // own internal output channel
}
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SampleTimerCollector"
// This is for later use, also call it early
m.setup()
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
// Define tags sent with each metric
// The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag)
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Parse the read interval duration
m.interval, err = time.ParseDuration(m.config.Interval)
if err != nil {
cclog.ComponentError(m.name, "Error parsing interval:", err.Error())
return err
}
// Storage for output channel
m.output = nil
// Mangement channel for the timer function.
m.done = make(chan bool)
// Create the own ticker
m.ticker = time.NewTicker(m.interval)
// Start the timer loop with return functionality by sending 'true' to the done channel
m.wg.Add(1)
go func() {
select {
case <-m.done:
// Exit the timer loop
cclog.ComponentDebug(m.name, "Closing...")
m.wg.Done()
return
case timestamp := <-m.ticker.C:
// This is executed every timer tick but we have to wait until the first
// Read() to get the output channel
if m.output != nil {
m.ReadMetrics(timestamp)
}
}
}()
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
// This function is called at each interval timer tick
func (m *SampleTimerCollector) ReadMetrics(timestamp time.Time) {
// Create a sample metric
value := 1.0
// If you want to measure something for a specific amout of time, use interval
// start := readState()
// time.Sleep(interval)
// stop := readState()
// value = (stop - start) / interval.Seconds()
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil && m.output != nil {
// Send it to output channel if we have a valid channel
m.output <- y
}
}
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Capture output channel
m.output = output
}
func (m *SampleTimerCollector) Close() {
// Send signal to the timer loop to stop it
m.done <- true
// Wait until the timer loop is done
m.wg.Wait()
// Unset flag
m.init = false
}

View File

@@ -4,227 +4,110 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
// /sys/class/hwmon/hwmon*/name -> coretemp
// /sys/class/hwmon/hwmon*/temp*_label -> Core 0
// /sys/class/hwmon/hwmon*/temp*_input -> 27800 = 27.8°C
// /sys/class/hwmon/hwmon*/temp*_max -> 86000 = 86.0°C
// /sys/class/hwmon/hwmon*/temp*_crit -> 100000 = 100.0°C
const HWMON_PATH = `/sys/class/hwmon`
type TempCollectorSensor struct {
name string
label string
metricName string // Default: name_label
file string
maxTempName string
maxTemp int64
critTempName string
critTemp int64
tags map[string]string
type TempCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics"`
TagOverride map[string]map[string]string `json:"tag_override"`
}
type TempCollector struct {
metricCollector
config struct {
ExcludeMetrics []string `json:"exclude_metrics"`
TagOverride map[string]map[string]string `json:"tag_override"`
ReportMaxTemp bool `json:"report_max_temperature"`
ReportCriticalTemp bool `json:"report_critical_temperature"`
}
sensors []*TempCollectorSensor
config TempCollectorConfig
}
func (m *TempCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "TempCollector"
m.setup()
m.meta = map[string]string{"source": m.name, "group": "IPMI", "unit": "degC"}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
return err
}
}
m.meta = map[string]string{
"source": m.name,
"group": "IPMI",
"unit": "degC",
}
m.sensors = make([]*TempCollectorSensor, 0)
// Find all temperature sensor files
globPattern := filepath.Join("/sys/class/hwmon", "*", "temp*_input")
inputFiles, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s': %v", globPattern, err)
}
if inputFiles == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
}
// Get sensor name for each temperature sensor file
for _, file := range inputFiles {
sensor := new(TempCollectorSensor)
// sensor name
nameFile := filepath.Join(filepath.Dir(file), "name")
name, err := ioutil.ReadFile(nameFile)
if err == nil {
sensor.name = strings.TrimSpace(string(name))
}
// sensor label
labelFile := strings.TrimSuffix(file, "_input") + "_label"
label, err := ioutil.ReadFile(labelFile)
if err == nil {
sensor.label = strings.TrimSpace(string(label))
}
// sensor metric name
switch {
case len(sensor.name) == 0 && len(sensor.label) == 0:
continue
case sensor.name == "coretemp" && strings.HasPrefix(sensor.label, "Core ") ||
sensor.name == "coretemp" && strings.HasPrefix(sensor.label, "Package id "):
sensor.metricName = "temp_" + sensor.label
case len(sensor.name) != 0 && len(sensor.label) != 0:
sensor.metricName = sensor.name + "_" + sensor.label
case len(sensor.name) != 0:
sensor.metricName = sensor.name
case len(sensor.label) != 0:
sensor.metricName = sensor.label
}
sensor.metricName = strings.ToLower(sensor.metricName)
sensor.metricName = strings.Replace(sensor.metricName, " ", "_", -1)
// Add temperature prefix, if required
if !strings.Contains(sensor.metricName, "temp") {
sensor.metricName = "temp_" + sensor.metricName
}
// Sensor file
sensor.file = file
// Sensor tags
sensor.tags = map[string]string{
"type": "node",
}
// Apply tag override configuration
for key, newtags := range m.config.TagOverride {
if strings.Contains(sensor.file, key) {
sensor.tags = newtags
break
}
}
// max temperature
if m.config.ReportMaxTemp {
maxTempFile := strings.TrimSuffix(file, "_input") + "_max"
if buffer, err := ioutil.ReadFile(maxTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.maxTempName = strings.Replace(sensor.metricName, "temp", "max_temp", 1)
sensor.maxTemp = x / 1000
}
}
}
// critical temperature
if m.config.ReportCriticalTemp {
criticalTempFile := strings.TrimSuffix(file, "_input") + "_crit"
if buffer, err := ioutil.ReadFile(criticalTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.critTempName = strings.Replace(sensor.metricName, "temp", "crit_temp", 1)
sensor.critTemp = x / 1000
}
}
}
m.sensors = append(m.sensors, sensor)
}
// Empty sensors map
if len(m.sensors) == 0 {
return fmt.Errorf("no temperature sensors found")
}
// Finished initialization
m.init = true
return nil
}
func get_hwmon_sensors() (map[string]map[string]string, error) {
var folders []string
var sensors map[string]map[string]string
sensors = make(map[string]map[string]string)
err := filepath.Walk(HWMON_PATH, func(p string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
folders = append(folders, p)
return nil
})
if err != nil {
return sensors, err
}
for _, f := range folders {
sensors[f] = make(map[string]string)
myp := fmt.Sprintf("%s/", f)
err := filepath.Walk(myp, func(path string, info os.FileInfo, err error) error {
dir, fname := filepath.Split(path)
if strings.Contains(fname, "temp") && strings.Contains(fname, "_input") {
namefile := fmt.Sprintf("%s/%s", dir, strings.Replace(fname, "_input", "_label", -1))
name, ierr := ioutil.ReadFile(namefile)
if ierr == nil {
sensors[f][strings.Replace(string(name), "\n", "", -1)] = path
}
}
return nil
})
if err != nil {
continue
}
}
return sensors, nil
}
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
for _, sensor := range m.sensors {
// Read sensor file
buffer, err := ioutil.ReadFile(sensor.file)
sensors, err := get_hwmon_sensors()
if err != nil {
return
}
for _, files := range sensors {
for name, file := range files {
tags := map[string]string{"type": "node"}
for key, newtags := range m.config.TagOverride {
if strings.Contains(file, key) {
tags = newtags
break
}
}
mname := strings.Replace(name, " ", "_", -1)
if !strings.Contains(mname, "temp") {
mname = fmt.Sprintf("temp_%s", mname)
}
buffer, err := ioutil.ReadFile(string(file))
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", sensor.file, err))
continue
}
x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert temperature '%s' to int64: %v", buffer, err))
continue
}
x /= 1000
y, err := lp.New(
sensor.metricName,
sensor.tags,
m.meta,
map[string]interface{}{"value": x},
time.Now(),
)
x, err := strconv.ParseInt(strings.Replace(string(buffer), "\n", "", -1), 0, 64)
if err == nil {
output <- y
}
// max temperature
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
y, err := lp.New(
sensor.maxTempName,
sensor.tags,
m.meta,
map[string]interface{}{"value": sensor.maxTemp},
time.Now(),
)
if err == nil {
output <- y
}
}
// critical temperature
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
y, err := lp.New(
sensor.critTempName,
sensor.tags,
m.meta,
map[string]interface{}{"value": sensor.critTemp},
time.Now(),
)
y, err := lp.New(strings.ToLower(mname), tags, m.meta, map[string]interface{}{"value": int(float64(x) / 1000)}, time.Now())
if err == nil {
cclog.ComponentDebug(m.name, y)
output <- y
}
}
}
}
}
func (m *TempCollector) Close() {

View File

@@ -8,7 +8,6 @@ import (
"os/exec"
"strings"
"time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
@@ -39,14 +38,14 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
m.config.Num_procs = int(DEFAULT_NUM_PROCS)
}
if m.config.Num_procs <= 0 || m.config.Num_procs > MAX_NUM_PROCS {
return fmt.Errorf("num_procs option must be set in 'topprocs' config (range: 1-%d)", MAX_NUM_PROCS)
return errors.New(fmt.Sprintf("num_procs option must be set in 'topprocs' config (range: 1-%d)", MAX_NUM_PROCS))
}
m.setup()
command := exec.Command("ps", "-Ao", "comm", "--sort=-pcpu")
command.Wait()
_, err = command.Output()
if err != nil {
return errors.New("failed to execute command")
return errors.New("Failed to execute command")
}
m.init = true
return nil

View File

@@ -1,189 +0,0 @@
# Configuring the CC metric collector
The configuration of the CC metric collector consists of five configuration files: one global file and four component related files.
## Global configuration
The global file contains the paths to the other four files and some global options.
```json
{
"sinks": "sinks.json",
"collectors" : "collectors.json",
"receivers" : "receivers.json",
"router" : "router.json",
"interval": 10,
"duration": 1
}
```
Be aware that the paths are relative to the execution folder of the cc-metric-collector binary, so it is recommended to use absolute paths.
## Component configuration
The others are mainly list of of subcomponents: the collectors, the receivers, the router and the sinks. Their role is best shown in a picture:
```mermaid
flowchart LR
subgraph col ["Collectors"]
direction TB
cpustat["cpustat"]
memstat["memstat"]
tempstat["tempstat"]
misc["..."]
end
subgraph Receivers ["Receivers"]
direction TB
nats["NATS"]
httprecv["HTTP"]
miscrecv[...]
end
subgraph calc["Aggregator"]
direction LR
cache["Cache"]
agg["Calculator"]
end
subgraph sinks ["Sinks"]
direction RL
influx["InfluxDB"]
ganglia["Ganglia"]
logger["Logfile"]
miscsink["..."]
end
cpustat --> CollectorManager["CollectorManager"]
memstat --> CollectorManager
tempstat --> CollectorManager
misc --> CollectorManager
nats --> ReceiverManager["ReceiverManager"]
httprecv --> ReceiverManager
miscrecv --> ReceiverManager
CollectorManager --> newrouter["Router"]
ReceiverManager -.-> newrouter
calc -.-> newrouter
newrouter --> SinkManager["SinkManager"]
newrouter -.-> calc
SinkManager --> influx
SinkManager --> ganglia
SinkManager --> logger
SinkManager --> miscsink
```
There are four parts:
- The collectors read data from files, execute commands and call dynamically loaded library function and send it to the router
- The router can process metrics by cacheing and evaluating functions and conditions on them
- The sinks send the metrics to storage backends
- The receivers can be used to receive metrics from other collectors and forward them to the router. They can be used to create a tree-like structure of collectors.
(A maybe better differentiation between collectors and receivers is that the collectors are called periodically while the receivers have their own logic and submit metrics at any time)
### Collectors configuration file
The collectors configuration file tells which metrics should be queried from the system. The metric gathering is logically grouped in so called 'Collectors'. So there are Collectors to read CPU, memory or filesystem statistics. The collectors configuration file is a list of these collectors with collector-specific configurations:
```json
{
"cpustat" : {},
"diskstat": {
"exclude_metrics": [
"disk_total"
]
}
}
```
The first one is the CPU statistics collector without any collector-specific setting. The second one enables disk mount statistics but excludes the metric `disk_total`.
All names and possible collector-specific configuration options can be found [here](../collectors/README.md).
Some collectors might dynamically load shared libraries. In order to enable these collectors, make sure that the shared library path is part of the `LD_LIBRARY_PATH` environment variable.
### Sinks configuration file
The sinks define the output/sending of metrics. The metrics can be forwarded to multiple sinks, even to sinks of the same type. The sinks configuration file is a list of these sinks, each with an individual name.
```json
{
"myinflux" : {
"type" : "influxasync",
"host": "localhost",
"port": "8086",
"organization" : "testorga",
"database" : "testbucket",
"password" : "<my secret JWT>"
},
"companyinflux" : {
"type" : "influxasync",
"host": "companyhost",
"port": "8086",
"organization" : "company",
"database" : "main",
"password" : "<company's secret JWT>"
}
}
```
The above example configuration file defines two sink, both ot type `influxasync`. They are differentiated internally by the names: `myinflux` and `companyinflux`.
All types and possible sink-specific configuration options can be found [here](../sinks/README.md).
Some sinks might dynamically load shared libraries. In order to enable these sinks, make sure that the shared library path is part of the `LD_LIBRARY_PATH` environment variable.
### Router configuration file
The collectors and the sinks are connected through the router. The router forwards the metrics to the sinks but enables some data processing. A common example is to tag all passing metrics like adding `cluster=mycluster`. But also aggregations like "take the average of all 'ipc' metrics" (ipc -> Instructions Per Cycle). Since the configurations of these aggregations can be quite complicated, we refer to the router's [README](../internal/metricRouter/README.md).
A simple router configuration file to start with looks like this:
```json
{
"add_tags" : [
{
"key" : "cluster",
"value" : "mycluster",
"if" : "*"
}
],
"interval_timestamp" : false,
"num_cache_intervals" : 0
}
```
With the `add_tags` section, we tell to attach the `cluster=mycluster` tag to each (`*` metric). The `interval_timestamp` tell the router to not touch the timestamp of metrics. It is possible to send all metrics within an interval with a common time stamp to avoid later alignment issues. The `num_cache_intervals` diables the cache completely. The cache is only required if you want to do complex metric aggregations.
All configuration options can be found [here](../internal/metricRouter/README.md).
### Receivers configuration file
The receivers are a special feature of the CC Metric Collector to enable simpler integration into exising setups. While collectors query data from the local system, the receivers commonly get data from other systems through some network technology like HTTP or NATS. The idea is keep the current setup but send it to a CC Metric Collector which forwards it to the the destination system (if a sink exists for it). For most setups, the receivers are not required and an the receiver config file should contain only an empty JSON map (`{}`).
```json
{
"nats_rack0": {
"type": "nats",
"address" : "nats-server.example.org",
"port" : "4222",
"subject" : "rack0",
},
"nats_rack1": {
"type": "nats",
"address" : "nats-server.example.org",
"port" : "4222",
"subject" : "rack1",
}
}
```
This example configuration creates two receivers with the names `nats_rack0` and `nats_rack1`. While one subscribes to metrics published with the `rack0` subject, the other one subscribes to the `rack0` subject. The NATS server is the same as it manages all subjects in a subnet. (As example, the router could add tags `rack=0` and `rack=1` respectively to the received metrics.)
All types and possible receiver-specific configuration options can be found [here](../receivers/README.md).

1
go.mod
View File

@@ -14,6 +14,7 @@ require (
require (
github.com/PaesslerAG/gval v1.1.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/mattn/go-sqlite3 v1.14.11
github.com/nats-io/nats-server/v2 v2.7.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
)

2
go.sum
View File

@@ -54,6 +54,8 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.11 h1:gt+cp9c0XGqe9S/wAHTL3n/7MqY+siPWgWJgqdsFrzQ=
github.com/mattn/go-sqlite3 v1.14.11/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=

View File

@@ -2,187 +2,239 @@ package ccmetric
import (
"fmt"
"sort"
"time"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
write "github.com/influxdata/influxdb-client-go/v2/api/write"
lp "github.com/influxdata/line-protocol" // MIT license
)
// Most functions are derived from github.com/influxdata/line-protocol/metric.go
// The metric type is extended with an extra meta information list re-using the Tag
// type.
//
// See: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/
type ccMetric struct {
name string // Measurement name
meta map[string]string // map of meta data tags
tags map[string]string // map of of tags
fields map[string]interface{} // map of of fields
tm time.Time // timestamp
name string
tags []*lp.Tag
fields []*lp.Field
tm time.Time
meta []*lp.Tag
}
// ccMetric access functions
type CCMetric interface {
ToPoint(metaAsTags map[string]bool) *write.Point // Generate influxDB point for data type ccMetric
ToLineProtocol(metaAsTags map[string]bool) string // Generate influxDB line protocol for data type ccMetric
Name() string // Get metric name
SetName(name string) // Set metric name
Time() time.Time // Get timestamp
SetTime(t time.Time) // Set timestamp
Tags() map[string]string // Map of tags
AddTag(key, value string) // Add a tag
GetTag(key string) (value string, ok bool) // Get a tag by its key
HasTag(key string) (ok bool) // Check if a tag key is present
RemoveTag(key string) // Remove a tag by its key
Meta() map[string]string // Map of meta data tags
AddMeta(key, value string) // Add a meta data tag
GetMeta(key string) (value string, ok bool) // Get a meta data tab addressed by its key
HasMeta(key string) (ok bool) // Check if a meta data key is present
RemoveMeta(key string) // Remove a meta data tag by its key
Fields() map[string]interface{} // Map of fields
AddField(key string, value interface{}) // Add a field
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
HasField(key string) (ok bool) // Check if a field key is present
RemoveField(key string) // Remove a field addressed by its key
lp.MutableMetric
Name() string
AddTag(key, value string)
GetTag(key string) (string, bool)
HasTag(key string) bool
RemoveTag(key string)
Tags() map[string]string
TagList() []*lp.Tag
AddMeta(key, value string)
GetMeta(key string) (string, bool)
HasMeta(key string) bool
RemoveMeta(key string)
Meta() map[string]string
MetaList() []*lp.Tag
AddField(key string, value interface{})
GetField(key string) (interface{}, bool)
HasField(key string) bool
RemoveField(key string)
Fields() map[string]interface{}
FieldList() []*lp.Field
String() string
SetTime(t time.Time)
}
func (m *ccMetric) Meta() map[string]string {
meta := make(map[string]string, len(m.meta))
for _, m := range m.meta {
meta[m.Key] = m.Value
}
return meta
}
func (m *ccMetric) MetaList() []*lp.Tag {
return m.meta
}
// String implements the stringer interface for data type ccMetric
func (m *ccMetric) String() string {
return fmt.Sprintf(
"Name: %s, Tags: %+v, Meta: %+v, fields: %+v, Timestamp: %d",
m.name, m.tags, m.meta, m.fields, m.tm.UnixNano(),
)
return fmt.Sprintf("%s %v %v %v %d", m.name, m.Tags(), m.Meta(), m.Fields(), m.tm.UnixNano())
}
// ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) {
p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)
for key, ok1 := range metaAsTags {
if val, ok2 := m.GetMeta(key); ok1 && ok2 {
p.AddTag(key, val)
}
}
return p
}
// ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToLineProtocol(metaAsTags map[string]bool) string {
return write.PointToLineProtocol(
m.ToPoint(metaAsTags),
time.Nanosecond,
)
}
// Name returns the measurement name
func (m *ccMetric) Name() string {
return m.name
}
// SetName sets the measurement name
func (m *ccMetric) SetName(name string) {
m.name = name
func (m *ccMetric) Tags() map[string]string {
tags := make(map[string]string, len(m.tags))
for _, tag := range m.tags {
tags[tag.Key] = tag.Value
}
return tags
}
func (m *ccMetric) TagList() []*lp.Tag {
return m.tags
}
func (m *ccMetric) Fields() map[string]interface{} {
fields := make(map[string]interface{}, len(m.fields))
for _, field := range m.fields {
fields[field.Key] = field.Value
}
return fields
}
func (m *ccMetric) FieldList() []*lp.Field {
return m.fields
}
// Time returns timestamp
func (m *ccMetric) Time() time.Time {
return m.tm
}
// SetTime sets the timestamp
func (m *ccMetric) SetTime(t time.Time) {
m.tm = t
}
// Tags returns the the list of tags as key-value-mapping
func (m *ccMetric) Tags() map[string]string {
return m.tags
}
// AddTag adds a tag (consisting of key and value) to the map of tags
func (m *ccMetric) AddTag(key, value string) {
m.tags[key] = value
}
// GetTag returns the tag with tag's key equal to <key>
func (m *ccMetric) GetTag(key string) (string, bool) {
value, ok := m.tags[key]
return value, ok
}
// HasTag checks if a tag with key equal to <key> is present in the list of tags
func (m *ccMetric) HasTag(key string) bool {
_, ok := m.tags[key]
return ok
for _, tag := range m.tags {
if tag.Key == key {
return true
}
}
return false
}
func (m *ccMetric) GetTag(key string) (string, bool) {
for _, tag := range m.tags {
if tag.Key == key {
return tag.Value, true
}
}
return "", false
}
// RemoveTag removes the tag with tag's key equal to <key>
func (m *ccMetric) RemoveTag(key string) {
delete(m.tags, key)
for i, tag := range m.tags {
if tag.Key == key {
copy(m.tags[i:], m.tags[i+1:])
m.tags[len(m.tags)-1] = nil
m.tags = m.tags[:len(m.tags)-1]
return
}
}
}
// Meta returns the meta data tags as key-value mapping
func (m *ccMetric) Meta() map[string]string {
return m.meta
func (m *ccMetric) AddTag(key, value string) {
for i, tag := range m.tags {
if key > tag.Key {
continue
}
if key == tag.Key {
tag.Value = value
return
}
m.tags = append(m.tags, nil)
copy(m.tags[i+1:], m.tags[i:])
m.tags[i] = &lp.Tag{Key: key, Value: value}
return
}
m.tags = append(m.tags, &lp.Tag{Key: key, Value: value})
}
// AddMeta adds a meta data tag (consisting of key and value) to the map of meta data tags
func (m *ccMetric) AddMeta(key, value string) {
m.meta[key] = value
}
// GetMeta returns the meta data tag with meta data's key equal to <key>
func (m *ccMetric) GetMeta(key string) (string, bool) {
value, ok := m.meta[key]
return value, ok
}
// HasMeta checks if a meta data tag with meta data's key equal to <key> is present in the map of meta data tags
func (m *ccMetric) HasMeta(key string) bool {
_, ok := m.meta[key]
return ok
for _, tag := range m.meta {
if tag.Key == key {
return true
}
}
return false
}
func (m *ccMetric) GetMeta(key string) (string, bool) {
for _, tag := range m.meta {
if tag.Key == key {
return tag.Value, true
}
}
return "", false
}
// RemoveMeta removes the meta data tag with tag's key equal to <key>
func (m *ccMetric) RemoveMeta(key string) {
delete(m.meta, key)
for i, tag := range m.meta {
if tag.Key == key {
copy(m.meta[i:], m.meta[i+1:])
m.meta[len(m.meta)-1] = nil
m.meta = m.meta[:len(m.meta)-1]
return
}
}
}
// Fields returns the list of fields as key-value-mapping
func (m *ccMetric) Fields() map[string]interface{} {
return m.fields
func (m *ccMetric) AddMeta(key, value string) {
for i, tag := range m.meta {
if key > tag.Key {
continue
}
if key == tag.Key {
tag.Value = value
return
}
m.meta = append(m.meta, nil)
copy(m.meta[i+1:], m.meta[i:])
m.meta[i] = &lp.Tag{Key: key, Value: value}
return
}
m.meta = append(m.meta, &lp.Tag{Key: key, Value: value})
}
// AddField adds a field (consisting of key and value) to the map of fields
func (m *ccMetric) AddField(key string, value interface{}) {
m.fields[key] = value
for i, field := range m.fields {
if key == field.Key {
m.fields[i] = &lp.Field{Key: key, Value: convertField(value)}
return
}
}
m.fields = append(m.fields, &lp.Field{Key: key, Value: convertField(value)})
}
// GetField returns the field with field's key equal to <key>
func (m *ccMetric) GetField(key string) (interface{}, bool) {
v, ok := m.fields[key]
return v, ok
for _, field := range m.fields {
if field.Key == key {
return field.Value, true
}
}
return "", false
}
// HasField checks if a field with field's key equal to <key> is present in the map of fields
func (m *ccMetric) HasField(key string) bool {
_, ok := m.fields[key]
return ok
for _, field := range m.fields {
if field.Key == key {
return true
}
}
return false
}
// RemoveField removes the field with field's key equal to <key>
// from the map of fields
func (m *ccMetric) RemoveField(key string) {
delete(m.fields, key)
for i, field := range m.fields {
if field.Key == key {
copy(m.fields[i:], m.fields[i+1:])
m.fields[len(m.fields)-1] = nil
m.fields = m.fields[:len(m.fields)-1]
return
}
}
}
// New creates a new measurement point
func New(
name string,
tags map[string]string,
@@ -192,79 +244,85 @@ func New(
) (CCMetric, error) {
m := &ccMetric{
name: name,
tags: make(map[string]string, len(tags)),
meta: make(map[string]string, len(meta)),
fields: make(map[string]interface{}, len(fields)),
tags: nil,
fields: nil,
tm: tm,
meta: nil,
}
// deep copy tags, meta data tags and fields
if len(tags) > 0 {
m.tags = make([]*lp.Tag, 0, len(tags))
for k, v := range tags {
m.tags[k] = v
m.tags = append(m.tags,
&lp.Tag{Key: k, Value: v})
}
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
}
if len(meta) > 0 {
m.meta = make([]*lp.Tag, 0, len(meta))
for k, v := range meta {
m.meta[k] = v
m.meta = append(m.meta,
&lp.Tag{Key: k, Value: v})
}
sort.Slice(m.meta, func(i, j int) bool { return m.meta[i].Key < m.meta[j].Key })
}
if len(fields) > 0 {
m.fields = make([]*lp.Field, 0, len(fields))
for k, v := range fields {
v := convertField(v)
if v == nil {
continue
}
m.fields[k] = v
m.AddField(k, v)
}
}
return m, nil
}
// FromMetric copies the metric <other>
func FromMetric(other ccMetric) CCMetric {
func FromMetric(other CCMetric) CCMetric {
m := &ccMetric{
name: other.Name(),
tags: make(map[string]string, len(other.tags)),
meta: make(map[string]string, len(other.meta)),
fields: make(map[string]interface{}, len(other.fields)),
tags: make([]*lp.Tag, len(other.TagList())),
fields: make([]*lp.Field, len(other.FieldList())),
meta: make([]*lp.Tag, len(other.MetaList())),
tm: other.Time(),
}
// deep copy tags, meta data tags and fields
for key, value := range other.tags {
m.tags[key] = value
for i, tag := range other.TagList() {
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
}
for key, value := range other.meta {
m.meta[key] = value
for i, s := range other.MetaList() {
m.meta[i] = &lp.Tag{Key: s.Key, Value: s.Value}
}
for key, value := range other.fields {
m.fields[key] = value
for i, field := range other.FieldList() {
m.fields[i] = &lp.Field{Key: field.Key, Value: field.Value}
}
return m
}
// FromInfluxMetric copies the influxDB line protocol metric <other>
func FromInfluxMetric(other lp.Metric) CCMetric {
m := &ccMetric{
name: other.Name(),
tags: make(map[string]string),
meta: make(map[string]string),
fields: make(map[string]interface{}),
tags: make([]*lp.Tag, len(other.TagList())),
fields: make([]*lp.Field, len(other.FieldList())),
meta: make([]*lp.Tag, 0),
tm: other.Time(),
}
// deep copy tags and fields
for _, otherTag := range other.TagList() {
m.tags[otherTag.Key] = otherTag.Value
for i, tag := range other.TagList() {
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
}
for _, otherField := range other.FieldList() {
m.fields[otherField.Key] = otherField.Value
for i, field := range other.FieldList() {
m.fields[i] = &lp.Field{Key: field.Key, Value: field.Value}
}
return m
}
// convertField converts data types of fields by the following schemata:
// *float32, *float64, float32, float64 -> float64
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
// *[]byte, *string, []byte, string -> string
// *bool, bool -> bool
func convertField(v interface{}) interface{} {
switch v := v.(type) {
case float64:

View File

@@ -6,17 +6,12 @@ import (
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
cclogger "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
)
const SYSFS_NUMABASE = `/sys/devices/system/node`
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
const PROCFS_CPUINFO = `/proc/cpuinfo`
// intArrayContains scans an array of ints if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
@@ -29,26 +24,20 @@ func intArrayContains(array []int, str int) (int, bool) {
return -1, false
}
func fileToInt(path string) int {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
var id int64
//_, err = fmt.Scanf("%d", sbuffer, &id)
id, err = strconv.ParseInt(sbuffer, 10, 32)
if err != nil {
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
return -1
}
return int(id)
}
// stringArrayContains scans an array of strings if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
// func stringArrayContains(array []string, str string) (int, bool) {
// for i, a := range array {
// if a == str {
// return i, true
// }
// }
// return -1, false
// }
func SocketList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
@@ -73,13 +62,13 @@ func SocketList() []int {
}
func CpuList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
cpulist := make([]int, 0)
var cpulist []int
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
@@ -97,84 +86,6 @@ func CpuList() []int {
return cpulist
}
func CoreList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
corelist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "core id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return corelist
}
_, found := intArrayContains(corelist, int(id))
if !found {
corelist = append(corelist, int(id))
}
}
}
return corelist
}
func NumaNodeList() []int {
numaList := make([]int, 0)
globPath := filepath.Join(string(SYSFS_NUMABASE), "node*")
regexPath := filepath.Join(string(SYSFS_NUMABASE), "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "NumaNodeList", err.Error())
}
for _, f := range files {
if !regex.MatchString(f) {
continue
}
finfo, err := os.Lstat(f)
if err != nil {
continue
}
if !finfo.IsDir() {
continue
}
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
if _, found := intArrayContains(numaList, id); !found {
numaList = append(numaList, id)
}
}
}
}
return numaList
}
func DieList() []int {
cpulist := CpuList()
dielist := make([]int, 0)
for _, c := range cpulist {
diepath := filepath.Join(string(SYSFS_CPUBASE), fmt.Sprintf("cpu%d", c), "topology/die_id")
dieid := fileToInt(diepath)
if dieid > 0 {
_, found := intArrayContains(dielist, int(dieid))
if !found {
dielist = append(dielist, int(dieid))
}
}
}
if len(dielist) > 0 {
return dielist
}
return SocketList()
}
type CpuEntry struct {
Cpuid int
SMT int
@@ -190,7 +101,7 @@ func CpuData() []CpuEntry {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
//cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
@@ -218,14 +129,14 @@ func CpuData() []CpuEntry {
getSMT := func(cpuid int, basepath string) int {
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/thread_siblings_list", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
log.Print(err)
}
threadlist := make([]int, 0)
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
for _, x := range strings.Split(sbuffer, ",") {
id, err := strconv.ParseInt(x, 10, 32)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
log.Print(err)
}
threadlist = append(threadlist, int(id))
}
@@ -238,25 +149,21 @@ func CpuData() []CpuEntry {
}
getNumaDomain := func(basepath string) int {
globPath := filepath.Join(basepath, "node*")
regexPath := filepath.Join(basepath, "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
files, err := filepath.Glob(fmt.Sprintf("%s/node*", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getNumaDomain", err.Error())
log.Print(err)
}
for _, f := range files {
finfo, err := os.Lstat(f)
if err == nil && finfo.IsDir() {
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil && (finfo.IsDir() || finfo.Mode()&os.ModeSymlink != 0) {
var id int
parts := strings.Split(f, "/")
_, err = fmt.Scanf("node%d", parts[len(parts)-1], &id)
if err == nil {
return id
}
}
}
}
return 0
}
@@ -264,36 +171,29 @@ func CpuData() []CpuEntry {
for _, c := range CpuList() {
clist = append(clist, CpuEntry{Cpuid: c})
}
for i, centry := range clist {
for _, centry := range clist {
centry.Socket = -1
centry.Numadomain = -1
centry.Die = -1
centry.Core = -1
// Set base directory for topology lookup
cpustr := fmt.Sprintf("cpu%d", centry.Cpuid)
base := filepath.Join("/sys/devices/system/cpu", cpustr)
topoBase := filepath.Join(base, "topology")
base := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/topology", centry.Cpuid)
// Lookup CPU core id
centry.Core = getCore(topoBase)
centry.Core = getCore(base)
// Lookup CPU socket id
centry.Socket = getSocket(topoBase)
centry.Socket = getSocket(base)
// Lookup CPU die id
centry.Die = getDie(topoBase)
if centry.Die < 0 {
centry.Die = centry.Socket
}
centry.Die = getDie(base)
// Lookup SMT thread id
centry.SMT = getSMT(centry.Cpuid, topoBase)
centry.SMT = getSMT(centry.Cpuid, base)
// Lookup NUMA domain id
centry.Numadomain = getNumaDomain(base)
// Update values in output list
clist[i] = centry
}
return clist
}
@@ -303,41 +203,35 @@ type CpuInformation struct {
SMTWidth int
NumSockets int
NumDies int
NumCores int
NumNumaDomains int
}
func CpuInfo() CpuInformation {
var c CpuInformation
smtList := make([]int, 0)
numaList := make([]int, 0)
dieList := make([]int, 0)
socketList := make([]int, 0)
coreList := make([]int, 0)
smt := 0
numa := 0
die := 0
socket := 0
cdata := CpuData()
for _, d := range cdata {
if _, ok := intArrayContains(smtList, d.SMT); !ok {
smtList = append(smtList, d.SMT)
if d.SMT > smt {
smt = d.SMT
}
if _, ok := intArrayContains(numaList, d.Numadomain); !ok {
numaList = append(numaList, d.Numadomain)
if d.Numadomain > numa {
numa = d.Numadomain
}
if _, ok := intArrayContains(dieList, d.Die); !ok {
dieList = append(dieList, d.Die)
if d.Die > die {
die = d.Die
}
if _, ok := intArrayContains(socketList, d.Socket); !ok {
socketList = append(socketList, d.Socket)
}
if _, ok := intArrayContains(coreList, d.Core); !ok {
coreList = append(coreList, d.Core)
if d.Socket > socket {
socket = d.Socket
}
}
c.NumNumaDomains = len(numaList)
c.SMTWidth = len(smtList)
c.NumDies = len(dieList)
c.NumCores = len(coreList)
c.NumSockets = len(socketList)
c.NumNumaDomains = numa + 1
c.SMTWidth = smt + 1
c.NumDies = die + 1
c.NumSockets = socket + 1
c.NumHWthreads = len(cdata)
return c
}
@@ -381,47 +275,3 @@ func GetCpuCore(cpuid int) int {
}
return -1
}
func GetSocketCpus(socket int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Socket == socket {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetNumaDomainCpus(domain int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Numadomain == domain {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetDieCpus(die int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Die == die {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetCoreCpus(core int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Core == core {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}

View File

@@ -1,38 +0,0 @@
# The MetricAggregator
In some cases, further combination of metrics or raw values is required. For that strings like `foo + 1` with runtime dependent `foo` need to be evaluated. The MetricAggregator relies on the [`gval`](https://github.com/PaesslerAG/gval) Golang package to perform all expression evaluation. The `gval` package provides the basic arithmetic operations but the MetricAggregator defines additional ones.
**Note**: To get an impression which expressions can be handled by `gval`, see its [README](https://github.com/PaesslerAG/gval/blob/master/README.md)
## Simple expression evaluation
For simple expression evaluation, the MetricAggregator provides two function for different use-cases:
- `EvalBoolCondition(expression string, params map[string]interface{}`: Used by the MetricRouter to match metrics like `metric.Name() == 'mymetric'`
- `EvalFloat64Condition(expression string, params map[string]interface{})`: Used by the MetricRouter and LikwidCollector to derive new values like `(PMC0+PMC1)/PMC3`
## MetricAggregator extensions for `gval`
The MetricAggregator provides these functions additional to the `Full` language in `gval`:
- `sum(array)`: Sum up values in an array like `sum(values)`
- `min(array)`: Get the minimum value in an array like `min(values)`
- `avg(array)`: Get the mean value in an array like `avg(values)`
- `mean(array)`: Get the mean value in an array like `mean(values)`
- `max(array)`: Get the maximum value in an array like `max(values)`
- `len(array)`: Get the length of an array like `len(values)`
- `median(array)`: Get the median value in an array like `mean(values)`
- `in`: Check existence in an array like `0 in getCpuList()` to check whether there is an entry `0`. Also substring matching works like `temp in metric.Name()`
- `match`: Regular-expression matching like `match('temp_cores_%d+', metric.Name())`. **Note** all `\` in an regex has to be replaced with `%`
- `getCpuCore(cpuid)`: For a CPU id, the the corresponding CPU core id like `getCpuCore(0)`
- `getCpuSocket(cpuid)`: For a CPU id, the the corresponding CPU socket id
- `getCpuNuma(cpuid)`: For a CPU id, the the corresponding NUMA domain id
- `getCpuDie(cpuid)`: For a CPU id, the the corresponding CPU die id
- `getSockCpuList(sockid)`: For a given CPU socket id, the list of CPU ids is returned like the CPUs on socket 1 `getSockCpuList(1)`
- `getNumaCpuList(numaid)`: For a given NUMA node id, the list of CPU ids is returned
- `getDieCpuList(dieid)`: For a given CPU die id, the list of CPU ids is returned
- `getCoreCpuList(coreid)`: For a given CPU core id, the list of CPU ids is returned
- `getCpuList`: Get the list of all CPUs
## Limitations
- Since the metrics are written in JSON files which do not allow `""` without proper escaping inside of JSON strings, you have to use `''` for strings.
- Since `\` is interpreted by JSON as escape character, it cannot be used in metrics. But it is required to write regular expressions. So instead of `/`, use `%` and the MetricAggregator replaces them after reading the JSON file.

View File

@@ -6,10 +6,6 @@ The CCMetric router sits in between the collectors and the sinks and can be used
```json
{
"num_cache_intervals" : 1,
"interval_timestamp" : true,
"hostname_tag" : "hostname",
"max_forward" : 50,
"add_tags" : [
{
"key" : "cluster",
@@ -29,80 +25,16 @@ The CCMetric router sits in between the collectors and the sinks and can be used
"if" : "*"
}
],
"interval_aggregates" : [
{
"name" : "temp_cores_avg",
"if" : "match('temp_core_%d+', metric.Name())",
"function" : "avg(values)",
"tags" : {
"type" : "node"
},
"meta" : {
"group": "IPMI",
"unit": "degC",
"source": "TempCollector"
}
}
],
"drop_metrics" : [
"not_interesting_metric_at_all"
],
"drop_metrics_if" : [
"match('temp_core_%d+', metric.Name())"
],
"rename_metrics" : {
"metric_12345" : "mymetric"
}
"interval_timestamp" : true
}
```
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
# Processing order in the router
# Conditional manipulation of tags
- Add the `hostname_tag` tag (if sent by collectors or cache)
- If `interval_timestamp == true`, change time of metrics
- Check if metric should be dropped (`drop_metrics` and `drop_metrics_if`)
- Add tags from `add_tags`
- Delete tags from `del_tags`
- Rename metric based on `rename_metrics` and store old name as `oldname` in meta information
- Add tags from `add_tags` (if you used the new name in the `if` condition)
- Delete tags from `del_tags` (if you used the new name in the `if` condition)
- Send to sinks
- Move to cache (if `num_cache_intervals > 0`)
The `if` setting allows conditional testing of a single metric like in the example:
# The `interval_timestamp` option
The collectors' `Read()` functions are not called simultaneously and therefore the metrics gathered in an interval can have different timestamps. If you want to avoid that and have a common timestamp (the beginning of the interval), set this option to `true` and the MetricRouter sets the time.
# The `num_cache_intervals` option
If the MetricRouter should buffer metrics of intervals in a MetricCache, this option specifies the number of past intervals that should be kept. If `num_cache_intervals = 0`, the cache is disabled. With `num_cache_intervals = 1`, only the metrics of the last interval are buffered.
A `num_cache_intervals > 0` is required to use the `interval_aggregates` option.
# The `hostname_tag` option
By default, the router tags metrics with the hostname for all locally created metrics. The default tag name is `hostname`, but it can be changed if your organization wants anything else
# The `max_forward` option
Every time the router receives a metric through any of the channels, it tries to directly read up to `max_forward` metrics from the same channel. This was done as the router thread would go to sleep and wake up with every arriving metric. The default are `50` metrics at once and `max_forward` needs to greater than `1`.
# The `rename_metrics` option
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
```json
{
"oldname" : "newname",
"clock_mhz" : "clock"
}
```
# Conditional manipulation of tags (`add_tags` and `del_tags`)
Common config format:
```json
{
"key" : "test",
@@ -111,131 +43,8 @@ Common config format:
}
```
## The `del_tags` option
If the CCMetric name is equal to 'temp_package_id_0', it adds an additional tag `test=testing` to the metric.
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
Never delete tags:
- `hostname`
- `type`
- `type-id`
## The `add_tags` option
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
For this metric, a more useful example would be:
```json
[
{
"key" : "type",
"value" : "socket",
"if" : "name == 'temp_package_id_0'"
},
{
"key" : "type-id",
"value" : "0",
"if" : "name == 'temp_package_id_0'"
},
]
```
The metric `temp_package_id_0` corresponds to the tempature of the first CPU socket (=package). With the above configuration, the tags would reflect that because commonly the [TempCollector](../../collectors/tempMetric.md) submits only `node` metrics.
In order to match all metrics, you can use `*`, so in order to add a flag per default. This is useful to attached system-specific tags like `cluster=testcluster`:
```json
{
"key" : "cluster",
"value" : "testcluster",
"if" : "*"
}
```
# Dropping metrics
In some cases, you want to drop a metric and don't get it forwarded to the sinks. There are two options based on the required specification:
- Based only on the metric name -> `drop_metrics` section
- An evaluable condition with more overhead -> `drop_metrics_if` section
## The `drop_metrics` section
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
```json
{
"drop_metrics" : [
"drop_metric_1",
"drop_metric_2"
]
}
```
The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
## The `drop_metrics_if` section
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
```json
{
"drop_metrics_if" : [
"match('drop_metric_%d+', name)",
"match('cpu', type) && type-id == 0"
]
}
```
The first line is comparable with the example in `drop_metrics`, it drops all metrics starting with `drop_metric_` and ending with a number. The second line drops all metrics of the first hardware thread (**not** recommended)
In order to match all metrics, you can use `*`, so in order to add a flag per default, like the `cluster=testcluster` tag in the example.
# Aggregate metric values of the current interval with the `interval_aggregates` option
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
```json
"interval_aggregates" : [
{
"name" : "new_metric_name",
"if" : "match('sub_metric_%d+', metric.Name())",
"function" : "avg(values)",
"tags" : {
"key" : "value",
"type" : "node"
},
"meta" : {
"key" : "value",
"group": "IPMI",
"unit": "<copy>",
}
}
]
```
The above configuration, collects all metric values for metrics evaluating `if` to `true`. Afterwards it calculates the average `avg` of the `values` (list of all metrics' field `value`) and creates a new CCMetric with the name `new_metric_name` and adds the tags in `tags` and the meta information in `meta`. The special value `<copy>` searches the input metrics and copies the value of the first match of `key` to the new CCMetric.
If you are not interested in the input metrics `sub_metric_%d+` at all, you can add the same condition used here to the `drop_metrics_if` section to drop them.
Use cases for `interval_aggregates`:
- Combine multiple metrics of the a collector to a new one like the [MemstatCollector](../../collectors/memstatMetric.md) does it for `mem_used`)):
```json
{
"name" : "mem_used",
"if" : "source == 'MemstatCollector'",
"function" : "sum(mem_total) - (sum(mem_free) + sum(mem_buffers) + sum(mem_cached))",
"tags" : {
"type" : "node"
},
"meta" : {
"group": "<copy>",
"unit": "<copy>",
"source": "<copy>"
}
}
```

View File

@@ -1,12 +1,10 @@
package metricAggregator
package metricRouter
import (
"context"
"fmt"
"math"
"os"
"strings"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
@@ -17,7 +15,7 @@ import (
"github.com/PaesslerAG/gval"
)
type MetricAggregatorIntervalConfig struct {
type metricAggregatorIntervalConfig struct {
Name string `json:"name"` // Metric name for the new metric
Function string `json:"function"` // Function to apply on the metric
Condition string `json:"if"` // Condition for applying function
@@ -28,7 +26,7 @@ type MetricAggregatorIntervalConfig struct {
}
type metricAggregator struct {
functions []*MetricAggregatorIntervalConfig
functions []*metricAggregatorIntervalConfig
constants map[string]interface{}
language gval.Language
output chan lp.CCMetric
@@ -63,20 +61,10 @@ var metricCacheLanguage = gval.NewLanguage(
gval.Function("getCpuList", getCpuListOfNode),
gval.Function("getCpuListOfType", getCpuListOfType),
)
var language gval.Language = gval.NewLanguage(
gval.Full(),
metricCacheLanguage,
)
var evaluables = struct {
mapping map[string]gval.Evaluable
mutex sync.Mutex
}{
mapping: make(map[string]gval.Evaluable),
}
func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.output = output
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
c.functions = make([]*metricAggregatorIntervalConfig, 0)
c.constants = make(map[string]interface{})
// add constants like hostname, numSockets, ... to constants list
@@ -96,7 +84,7 @@ func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.constants["smtWidth"] = cinfo.SMTWidth
c.language = gval.NewLanguage(
gval.Full(),
gval.Base(),
metricCacheLanguage,
)
@@ -257,16 +245,15 @@ func (c *metricAggregator) AddAggregation(name, function, condition string, tags
return nil
}
}
agg := &MetricAggregatorIntervalConfig{
Name: name,
Condition: newcond,
gvalCond: gvalCond,
Function: newfunc,
gvalFunc: gvalFunc,
Tags: tags,
Meta: meta,
}
c.functions = append(c.functions, agg)
var agg metricAggregatorIntervalConfig
agg.Name = name
agg.Condition = newcond
agg.gvalCond = gvalCond
agg.Function = newfunc
agg.gvalFunc = gvalFunc
agg.Tags = tags
agg.Meta = meta
c.functions = append(c.functions, &agg)
return nil
}
@@ -294,50 +281,6 @@ func (c *metricAggregator) AddFunction(name string, function func(args ...interf
c.language = gval.NewLanguage(c.language, gval.Function(name, function))
}
func EvalBoolCondition(condition string, params map[string]interface{}) (bool, error) {
evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock()
if !ok {
newcond :=
strings.ReplaceAll(
strings.ReplaceAll(
condition, "'", "\""), "%", "\\")
var err error
evaluable, err = language.NewEvaluable(newcond)
if err != nil {
return false, err
}
evaluables.mutex.Lock()
evaluables.mapping[condition] = evaluable
evaluables.mutex.Unlock()
}
value, err := evaluable.EvalBool(context.Background(), params)
return value, err
}
func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock()
if !ok {
newcond :=
strings.ReplaceAll(
strings.ReplaceAll(
condition, "'", "\""), "%", "\\")
var err error
evaluable, err = language.NewEvaluable(newcond)
if err != nil {
return math.NaN(), err
}
evaluables.mutex.Lock()
evaluables.mapping[condition] = evaluable
evaluables.mutex.Unlock()
}
value, err := evaluable.EvalFloat64(context.Background(), params)
return value, err
}
func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
a := new(metricAggregator)
err := a.Init(output)

View File

@@ -1,4 +1,4 @@
package metricAggregator
package metricRouter
import (
"errors"

View File

@@ -7,7 +7,6 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
)
@@ -23,14 +22,13 @@ type metricCachePeriod struct {
type metricCache struct {
numPeriods int
curPeriod int
lock sync.Mutex
intervals []*metricCachePeriod
wg *sync.WaitGroup
ticker mct.MultiChanTicker
tickchan chan time.Time
done chan bool
output chan lp.CCMetric
aggEngine agg.MetricAggregator
aggEngine MetricAggregator
}
type MetricCache interface {
@@ -61,7 +59,7 @@ func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker,
// Create a new aggregation engine. No separate goroutine at the moment
// The code is executed by the MetricCache goroutine
c.aggEngine, err = agg.NewAggregator(c.output)
c.aggEngine, err = NewAggregator(c.output)
if err != nil {
cclog.ComponentError("MetricCache", "Cannot create aggregator")
return err
@@ -104,11 +102,9 @@ func (c *metricCache) Start() {
done()
return
case tick := <-c.tickchan:
c.lock.Lock()
old := rotate(tick)
// Get the last period and evaluate aggregation metrics
starttime, endtime, metrics := c.GetPeriod(old)
c.lock.Unlock()
if len(metrics) > 0 {
c.aggEngine.Eval(starttime, endtime, metrics)
} else {
@@ -126,7 +122,6 @@ func (c *metricCache) Start() {
// to avoid reallocations
func (c *metricCache) Add(metric lp.CCMetric) {
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
c.lock.Lock()
p := c.intervals[c.curPeriod]
if p.numMetrics < p.sizeMetrics {
p.metrics[p.numMetrics] = metric
@@ -138,7 +133,6 @@ func (c *metricCache) Add(metric lp.CCMetric) {
p.sizeMetrics = p.sizeMetrics + 1
p.stopstamp = metric.Time()
}
c.lock.Unlock()
}
}
@@ -154,26 +148,16 @@ func (c *metricCache) DeleteAggregation(name string) error {
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
// is given (negative index, index larger than configured number of total intervals, ...)
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
var start time.Time = time.Now()
var stop time.Time = time.Now()
var metrics []lp.CCMetric
if index >= 0 && index < c.numPeriods {
pindex := c.curPeriod - index
if pindex < 0 {
pindex = c.numPeriods - pindex
}
if pindex >= 0 && pindex < c.numPeriods {
start = c.intervals[pindex].startstamp
stop = c.intervals[pindex].stopstamp
metrics = c.intervals[pindex].metrics
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
} else {
metrics = make([]lp.CCMetric, 0)
return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
}
} else {
metrics = make([]lp.CCMetric, 0)
}
return start, stop, metrics
return time.Now(), time.Now(), make([]lp.CCMetric, 0)
}
// Close finishes / stops the metric cache

View File

@@ -10,12 +10,10 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
"gopkg.in/Knetic/govaluate.v2"
)
const ROUTER_MAX_FORWARD = 50
// Metric router tag configuration
type metricRouterTagConfig struct {
Key string `json:"key"` // Tag name
@@ -25,17 +23,11 @@ type metricRouterTagConfig struct {
// Metric router configuration
type metricRouterConfig struct {
HostnameTagName string `json:"hostname_tag"` // Key name used when adding the hostname to a metric (default 'hostname')
AddTags []metricRouterTagConfig `json:"add_tags"` // List of tags that are added when the condition is met
DelTags []metricRouterTagConfig `json:"delete_tags"` // List of tags that are removed when the condition is met
IntervalAgg []agg.MetricAggregatorIntervalConfig `json:"interval_aggregates"` // List of aggregation function processed at the end of an interval
DropMetrics []string `json:"drop_metrics"` // List of metric names to drop. For fine-grained dropping use drop_metrics_if
DropMetricsIf []string `json:"drop_metrics_if"` // List of evaluatable terms to drop metrics
RenameMetrics map[string]string `json:"rename_metrics"` // Map to rename metric name from key to value
IntervalAgg []metricAggregatorIntervalConfig `json:"interval_aggregates"` // List of aggregation function processed at the end of an interval
IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval?
NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
dropMetrics map[string]bool // Internal map for O(1) lookup
}
// Metric router data structure
@@ -53,7 +45,6 @@ type metricRouter struct {
config metricRouterConfig // json encoded config for metric router
cache MetricCache // pointer to MetricCache
cachewg sync.WaitGroup // wait group for MetricCache
maxForward int // number of metrics to forward maximally in one iteration
}
// MetricRouter access functions
@@ -78,8 +69,6 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
r.cache_input = make(chan lp.CCMetric)
r.wg = wg
r.ticker = ticker
r.config.MaxForward = ROUTER_MAX_FORWARD
r.config.HostnameTagName = "hostname"
// Set hostname
hostname, err := os.Hostname()
@@ -103,12 +92,11 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
cclog.ComponentError("MetricRouter", err.Error())
return err
}
r.maxForward = 1
if r.config.MaxForward > r.maxForward {
r.maxForward = r.config.MaxForward
numIntervals := r.config.NumCacheIntervals
if numIntervals <= 0 {
numIntervals = 1
}
if r.config.NumCacheIntervals > 0 {
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, r.config.NumCacheIntervals)
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, numIntervals)
if err != nil {
cclog.ComponentError("MetricRouter", "MetricCache initialization failed:", err.Error())
return err
@@ -116,11 +104,6 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
for _, agg := range r.config.IntervalAgg {
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
}
}
r.config.dropMetrics = make(map[string]bool)
for _, mname := range r.config.DropMetrics {
r.config.dropMetrics[mname] = true
}
return nil
}
@@ -147,34 +130,47 @@ func (r *metricRouter) StartTimer() {
cclog.ComponentDebug("MetricRouter", "TIMER START")
}
func getParamMap(point lp.CCMetric) map[string]interface{} {
// EvalCondition evaluates condition cond for metric data from point
func (r *metricRouter) EvalCondition(cond string, point lp.CCMetric) (bool, error) {
expression, err := govaluate.NewEvaluableExpression(cond)
if err != nil {
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
return false, err
}
// Add metric name, tags, meta data, fields and timestamp to the parameter list
params := make(map[string]interface{})
params["metric"] = point
params["name"] = point.Name()
for key, value := range point.Tags() {
params[key] = value
for _, t := range point.TagList() {
params[t.Key] = t.Value
}
for key, value := range point.Meta() {
params[key] = value
for _, m := range point.MetaList() {
params[m.Key] = m.Value
}
for key, value := range point.Fields() {
params[key] = value
for _, f := range point.FieldList() {
params[f.Key] = f.Value
}
params["timestamp"] = point.Time()
return params
// evaluate condition
result, err := expression.Evaluate(params)
if err != nil {
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
return false, err
}
return bool(result.(bool)), err
}
// DoAddTags adds a tag when condition is fullfiled
func (r *metricRouter) DoAddTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.AddTags {
var conditionMatches bool
if m.Condition == "*" {
// Condition is always matched
conditionMatches = true
} else {
// Evaluate condition
var err error
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
conditionMatches, err = r.EvalCondition(m.Condition, point)
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
@@ -188,15 +184,14 @@ func (r *metricRouter) DoAddTags(point lp.CCMetric) {
// DoDelTags removes a tag when condition is fullfiled
func (r *metricRouter) DoDelTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.DelTags {
var conditionMatches bool
if m.Condition == "*" {
// Condition is always matched
conditionMatches = true
} else {
// Evaluate condition
var err error
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
conditionMatches, err = r.EvalCondition(m.Condition, point)
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
@@ -208,31 +203,9 @@ func (r *metricRouter) DoDelTags(point lp.CCMetric) {
}
}
// Conditional test whether a metric should be dropped
func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
// Simple drop check
if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
return conditionMatches
}
// Checking the dropping conditions
for _, m := range r.config.DropMetricsIf {
conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
}
if conditionMatches {
return conditionMatches
}
}
// No dropping condition met
return false
}
// Start starts the metric router
func (r *metricRouter) Start() {
// start timer if configured
r.timestamp = time.Now()
if r.config.IntervalStamp {
@@ -251,65 +224,17 @@ func (r *metricRouter) Start() {
cclog.ComponentDebug("MetricRouter", "FORWARD", point)
r.DoAddTags(point)
r.DoDelTags(point)
name := point.Name()
if new, ok := r.config.RenameMetrics[name]; ok {
point.SetName(new)
point.AddMeta("oldname", name)
}
r.DoAddTags(point)
r.DoDelTags(point)
for _, o := range r.outputs {
o <- point
}
}
// Foward message received from collector channel
coll_forward := func(p lp.CCMetric) {
// receive from metric collector
p.AddTag(r.config.HostnameTagName, r.hostname)
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
if !r.dropMetric(p) {
forward(p)
}
// even if the metric is dropped, it is stored in the cache for
// aggregations
if r.config.NumCacheIntervals > 0 {
r.cache.Add(p)
}
}
// Forward message received from receivers channel
recv_forward := func(p lp.CCMetric) {
// receive from receive manager
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
if !r.dropMetric(p) {
forward(p)
}
}
// Forward message received from cache channel
cache_forward := func(p lp.CCMetric) {
// receive from metric collector
if !r.dropMetric(p) {
p.AddTag(r.config.HostnameTagName, r.hostname)
forward(p)
}
}
// Start Metric Cache
if r.config.NumCacheIntervals > 0 {
r.cache.Start()
}
r.wg.Add(1)
go func() {
defer r.wg.Done()
for {
select {
case <-r.done:
@@ -317,22 +242,25 @@ func (r *metricRouter) Start() {
return
case p := <-r.coll_input:
coll_forward(p)
for i := 0; len(r.coll_input) > 0 && i < (r.maxForward-1); i++ {
coll_forward(<-r.coll_input)
// receive from metric collector
p.AddTag("hostname", r.hostname)
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
forward(p)
r.cache.Add(p)
case p := <-r.recv_input:
recv_forward(p)
for i := 0; len(r.recv_input) > 0 && i < (r.maxForward-1); i++ {
recv_forward(<-r.recv_input)
// receive from receive manager
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
forward(p)
case p := <-r.cache_input:
cache_forward(p)
for i := 0; len(r.cache_input) > 0 && i < (r.maxForward-1); i++ {
cache_forward(<-r.cache_input)
}
// receive from metric collector
p.AddTag("hostname", r.hostname)
forward(p)
}
}
}()
@@ -360,21 +288,14 @@ func (r *metricRouter) Close() {
r.done <- true
// wait for close of channel r.done
<-r.done
// stop timer
if r.config.IntervalStamp {
cclog.ComponentDebug("MetricRouter", "TIMER CLOSE")
r.timerdone <- true
// wait for close of channel r.timerdone
<-r.timerdone
}
// stop metric cache
if r.config.NumCacheIntervals > 0 {
cclog.ComponentDebug("MetricRouter", "CACHE CLOSE")
r.cache.Close()
r.cachewg.Wait()
}
}
// New creates a new initialized metric router

View File

@@ -24,6 +24,7 @@ import (
type CentralConfigFile struct {
Interval int `json:"interval"`
Duration int `json:"duration"`
Pidfile string `json:"pidfile,omitempty"`
CollectorConfigFile string `json:"collectors"`
RouterConfigFile string `json:"router"`
SinkConfigFile string `json:"sinks"`
@@ -86,12 +87,14 @@ func ReadCli() map[string]string {
var m map[string]string
cfg := flag.String("config", "./config.json", "Path to configuration file")
logfile := flag.String("log", "stderr", "Path for logfile")
pidfile := flag.String("pidfile", "/var/run/cc-metric-collector.pid", "Path for PID file")
once := flag.Bool("once", false, "Run all collectors only once")
debug := flag.Bool("debug", false, "Activate debug output")
flag.Parse()
m = make(map[string]string)
m["configfile"] = *cfg
m["logfile"] = *logfile
m["pidfile"] = *pidfile
if *once {
m["once"] = "true"
} else {
@@ -122,6 +125,25 @@ func ReadCli() map[string]string {
// return nil
//}
//func CreatePidfile(pidfile string) error {
// file, err := os.OpenFile(pidfile, os.O_CREATE|os.O_RDWR, 0600)
// if err != nil {
// log.Print(err)
// return err
// }
// file.Write([]byte(fmt.Sprintf("%d", os.Getpid())))
// file.Close()
// return nil
//}
//func RemovePidfile(pidfile string) error {
// info, err := os.Stat(pidfile)
// if !os.IsNotExist(err) && !info.IsDir() {
// os.Remove(pidfile)
// }
// return nil
//}
// General shutdownHandler function that gets executed in case of interrupt or graceful shutdownHandler
func shutdownHandler(config *RuntimeConfig, shutdownSignal chan os.Signal) {
defer config.Sync.Done()
@@ -152,6 +174,11 @@ func shutdownHandler(config *RuntimeConfig, shutdownSignal chan os.Signal) {
cclog.Debug("Shutdown SinkManager...")
config.SinkManager.Close()
}
// pidfile := config.ConfigFile.Pidfile
// RemovePidfile(pidfile)
// pidfile = config.CliArgs["pidfile"]
// RemovePidfile(pidfile)
}
func mainFunc() int {
@@ -199,6 +226,8 @@ func mainFunc() int {
return 1
}
// err = CreatePidfile(rcfg.CliArgs["pidfile"])
// Set log file
if logfile := rcfg.CliArgs["logfile"]; logfile != "stderr" {
cclog.SetOutput(logfile)

View File

@@ -1,8 +1,8 @@
{
"natsrecv" : {
[
{
"type": "nats",
"address": "nats://my-url",
"port" : "4222",
"database": "testcluster"
}
}
]

View File

@@ -7,23 +7,38 @@ This folder contains the ReceiveManager and receiver implementations for the cc-
The configuration file for the receivers is a list of configurations. The `type` field in each specifies which receiver to initialize.
```json
{
"myreceivername" : {
"type": "receiver-type",
<receiver-specific configuration>
[
{
"type": "nats",
"address": "nats://my-url",
"port" : "4222",
"database": "testcluster"
}
]
```
## Type `nats`
```json
{
"type": "nats",
"address": "<nats-URI or hostname>",
"port" : "<portnumber>",
"database": "<subscribe topic>"
}
```
This allows to specify
## Available receivers
- [`nats`](./natsReceiver.md): Receive metrics from the NATS network
- [`prometheus`](./prometheusReceiver.md): Scrape data from a Prometheus client
- [`http`](./httpReceiver.md): Listen for HTTP Post requests transporting metrics in InfluxDB line protocol
The `nats` receiver subscribes to the topic `database` and listens on `address` and `port` for metrics in the InfluxDB line protocol.
# Contributing own receivers
A receiver contains a few functions and is derived from the type `Receiver` (in `metricReceiver.go`):
A receiver contains three functions and is derived from the type `Receiver` (in `metricReceiver.go`):
* `Init(config ReceiverConfig) error`
* `Start() error`
* `Close()`
* `Name() string`
* `SetSink(sink chan ccMetric.CCMetric)`
For an example, check the [sample receiver](./sampleReceiver.go)
The data structures should be set up in `Init()` like opening a file or server connection. The `Start()` function should either start a go routine or issue some other asynchronous mechanism for receiving metrics. The `Close()` function should tear down anything created in `Init()`.
Finally, the receiver needs to be registered in the `receiveManager.go`. There is a list of receivers called `AvailableReceivers` which is a map (`receiver_type_string` -> `pointer to Receiver interface`). Add a new entry with a descriptive name and the new receiver.

View File

@@ -1,118 +0,0 @@
package receivers
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"sync"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/gorilla/mux"
influx "github.com/influxdata/line-protocol"
)
const HTTP_RECEIVER_PORT = "8080"
type HttpReceiverConfig struct {
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
Path string `json:"path"`
}
type HttpReceiver struct {
receiver
handler *influx.MetricHandler
parser *influx.Parser
meta map[string]string
config HttpReceiverConfig
router *mux.Router
server *http.Server
wg sync.WaitGroup
}
func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
r.name = fmt.Sprintf("HttpReceiver(%s)", name)
r.config.Port = HTTP_RECEIVER_PORT
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return err
}
}
if len(r.config.Port) == 0 {
return errors.New("not all configuration variables set required by HttpReceiver")
}
r.meta = map[string]string{"source": r.name}
p := r.config.Path
if !strings.HasPrefix(p, "/") {
p = "/" + p
}
uri := fmt.Sprintf("%s:%s%s", r.config.Addr, r.config.Port, p)
cclog.ComponentDebug(r.name, "INIT", uri)
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
r.router = mux.NewRouter()
r.router.Path(p).HandlerFunc(r.ServerHttp)
r.server = &http.Server{Addr: uri, Handler: r.router}
return nil
}
func (r *HttpReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
r.wg.Add(1)
go func() {
err := r.server.ListenAndServe()
if err != nil && err.Error() != "http: Server closed" {
cclog.ComponentError(r.name, err.Error())
}
r.wg.Done()
}()
}
func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
body, err := ioutil.ReadAll(req.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
metrics, err := r.parser.Parse(body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
for _, m := range metrics {
y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
y.AddMeta(k, v)
}
if r.sink != nil {
r.sink <- y
}
}
w.WriteHeader(http.StatusOK)
}
func (r *HttpReceiver) Close() {
r.server.Shutdown(context.Background())
}
func NewHttpReceiver(name string, config json.RawMessage) (Receiver, error) {
r := new(HttpReceiver)
err := r.Init(name, config)
return r, err
}

View File

@@ -1,23 +0,0 @@
## `http` receiver
The `http` receiver can be used receive metrics through HTTP POST requests.
### Configuration structure
```json
{
"<name>": {
"type": "http",
"address" : "",
"port" : "8080",
"path" : "/write"
}
}
```
- `type`: makes the receiver a `http` receiver
- `address`: Listen address
- `port`: Listen port
- `path`: URL path for the write endpoint
The HTTP endpoint listens to `http://<address>:<port>/<path>`

View File

@@ -1,14 +1,11 @@
package receivers
import (
// "time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type defaultReceiverConfig struct {
Type string `json:"type"`
}
// Receiver configuration: Listen address, port
type ReceiverConfig struct {
Addr string `json:"address"`
Port string `json:"port"`
@@ -19,22 +16,41 @@ type ReceiverConfig struct {
type receiver struct {
name string
addr string
port string
database string
organization string
sink chan lp.CCMetric
}
type Receiver interface {
Init(config ReceiverConfig) error
Start()
Close() // Close / finish metric receiver
Name() string // Name of the metric receiver
SetSink(sink chan lp.CCMetric) // Set sink channel
Close()
Name() string
SetSink(sink chan lp.CCMetric)
}
// Name returns the name of the metric receiver
func (r *receiver) Name() string {
return r.name
}
// SetSink set the sink channel
func (r *receiver) SetSink(sink chan lp.CCMetric) {
r.sink = sink
}
func Tags2Map(metric influx.Metric) map[string]string {
tags := make(map[string]string)
for _, t := range metric.TagList() {
tags[t.Key] = t.Value
}
return tags
}
func Fields2Map(metric influx.Metric) map[string]interface{} {
fields := make(map[string]interface{})
for _, f := range metric.FieldList() {
fields[f.Key] = f.Value
}
return fields
}

View File

@@ -1,22 +1,19 @@
package receivers
import (
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
"time"
)
type NatsReceiverConfig struct {
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
Subject string `json:"subject"`
Database string `json:"database"`
}
type NatsReceiver struct {
@@ -25,16 +22,49 @@ type NatsReceiver struct {
handler *influx.MetricHandler
parser *influx.Parser
meta map[string]string
config NatsReceiverConfig
config ReceiverConfig
}
var DefaultTime = func() time.Time {
return time.Unix(42, 0)
}
func (r *NatsReceiver) Init(config ReceiverConfig) error {
r.name = "NatsReceiver"
r.config = config
if len(r.config.Addr) == 0 ||
len(r.config.Port) == 0 ||
len(r.config.Database) == 0 {
return errors.New("Not all configuration variables set required by NatsReceiver")
}
r.meta = map[string]string{"source": r.name}
r.addr = r.config.Addr
if len(r.addr) == 0 {
r.addr = nats.DefaultURL
}
r.port = r.config.Port
if len(r.port) == 0 {
r.port = "4222"
}
uri := fmt.Sprintf("%s:%s", r.addr, r.port)
cclog.ComponentDebug("NatsReceiver", "INIT", uri)
nc, err := nats.Connect(uri)
if err == nil {
r.database = r.config.Database
r.nc = nc
} else {
r.nc = nil
return err
}
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
return err
}
func (r *NatsReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
r.nc.Subscribe(r.config.Subject, r._NatsReceive)
cclog.ComponentDebug("NatsReceiver", "START")
r.nc.Subscribe(r.database, r._NatsReceive)
}
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
@@ -54,39 +84,7 @@ func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
func (r *NatsReceiver) Close() {
if r.nc != nil {
cclog.ComponentDebug(r.name, "CLOSE")
cclog.ComponentDebug("NatsReceiver", "CLOSE")
r.nc.Close()
}
}
func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
r := new(NatsReceiver)
r.name = fmt.Sprintf("NatsReceiver(%s)", name)
r.config.Addr = nats.DefaultURL
r.config.Port = "4222"
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
if len(r.config.Addr) == 0 ||
len(r.config.Port) == 0 ||
len(r.config.Subject) == 0 {
return nil, errors.New("not all configuration variables set required by NatsReceiver")
}
r.meta = map[string]string{"source": r.name}
uri := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
cclog.ComponentDebug(r.name, "NewNatsReceiver", uri, "Subject", r.config.Subject)
if nc, err := nats.Connect(uri); err == nil {
r.nc = nc
} else {
r.nc = nil
return nil, err
}
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
return r, nil
}

View File

@@ -1,21 +0,0 @@
## `nats` receiver
The `nats` receiver can be used receive metrics from the NATS network. The `nats` receiver subscribes to the topic `database` and listens on `address` and `port` for metrics in the InfluxDB line protocol.
### Configuration structure
```json
{
"<name>": {
"type": "nats",
"address" : "nats-server.example.org",
"port" : "4222",
"subject" : "subject"
}
}
```
- `type`: makes the receiver a `nats` receiver
- `address`: Address of the NATS control server
- `port`: Port of the NATS control server
- `subject`: Subscribes to this subject and receive metrics

View File

@@ -1,122 +0,0 @@
package receivers
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type PrometheusReceiverConfig struct {
defaultReceiverConfig
Addr string `json:"address"`
Port string `json:"port"`
Path string `json:"path"`
Interval string `json:"interval"`
SSL bool `json:"ssl"`
}
type PrometheusReceiver struct {
receiver
meta map[string]string
config PrometheusReceiverConfig
interval time.Duration
done chan bool
wg sync.WaitGroup
ticker *time.Ticker
uri string
}
func (r *PrometheusReceiver) Start() {
cclog.ComponentDebug(r.name, "START", r.uri)
r.wg.Add(1)
r.ticker = time.NewTicker(r.interval)
go func() {
for {
select {
case <-r.done:
r.wg.Done()
return
case t := <-r.ticker.C:
resp, err := http.Get(r.uri)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "#") {
continue
}
lineSplit := strings.Fields(line)
// separate metric name from tags (labels in Prometheus)
tags := map[string]string{}
name := lineSplit[0]
if sindex := strings.Index(name, "{"); sindex >= 0 {
eindex := strings.Index(name, "}")
for _, kv := range strings.Split(name[sindex+1:eindex], ",") {
eq := strings.Index(kv, "=")
tags[kv[0:eq]] = strings.Trim(kv[eq+1:], "\"")
}
name = lineSplit[0][0:sindex]
}
value, err := strconv.ParseFloat(lineSplit[1], 64)
if err == nil {
y, err := lp.New(name, tags, r.meta, map[string]interface{}{"value": value}, t)
if err == nil {
r.sink <- y
}
}
}
}
}
}()
}
func (r *PrometheusReceiver) Close() {
cclog.ComponentDebug(r.name, "CLOSE")
r.done <- true
r.wg.Wait()
}
func NewPrometheusReceiver(name string, config json.RawMessage) (Receiver, error) {
r := new(PrometheusReceiver)
r.name = fmt.Sprintf("PrometheusReceiver(%s)", name)
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
if len(r.config.Addr) == 0 ||
len(r.config.Port) == 0 ||
len(r.config.Interval) == 0 {
return nil, errors.New("not all configuration variables set required by PrometheusReceiver (address and port)")
}
if len(r.config.Interval) > 0 {
t, err := time.ParseDuration(r.config.Interval)
if err == nil {
r.interval = t
}
}
r.meta = map[string]string{"source": r.name}
proto := "http"
if r.config.SSL {
proto = "https"
}
r.uri = fmt.Sprintf("%s://%s:%s/%s", proto, r.config.Addr, r.config.Port, r.config.Path)
return r, nil
}

View File

@@ -1,27 +0,0 @@
## `prometheus` receiver
The `prometheus` receiver can be used to scrape the metrics of a single `prometheus` client. It does **not** use any official Golang library but making simple HTTP get requests and parse the response.
### Configuration structure
```json
{
"<name>": {
"type": "prometheus",
"address" : "testpromhost",
"port" : "12345",
"path" : "/prometheus",
"interval": "5s",
"ssl" : true,
}
}
```
- `type`: makes the receiver a `prometheus` receiver
- `address`: Hostname or IP of the Prometheus agent
- `port`: Port of Prometheus agent
- `path`: Path to the Prometheus endpoint
- `interval`: Scrape the Prometheus endpoint in this interval (default '5s')
- `ssl`: Use SSL or not
The receiver requests data from `http(s)://<address>:<port>/<path>`.

View File

@@ -9,8 +9,8 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){
"nats": NewNatsReceiver,
var AvailableReceivers = map[string]Receiver{
"nats": &NatsReceiver{},
}
type receiveManager struct {
@@ -18,25 +18,23 @@ type receiveManager struct {
output chan lp.CCMetric
done chan bool
wg *sync.WaitGroup
config []json.RawMessage
config []ReceiverConfig
}
type ReceiveManager interface {
Init(wg *sync.WaitGroup, receiverConfigFile string) error
AddInput(name string, rawConfig json.RawMessage) error
AddInput(rawConfig json.RawMessage) error
AddOutput(output chan lp.CCMetric)
Start()
Close()
}
func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) error {
// Initialize struct fields
rm.inputs = make([]Receiver, 0)
rm.output = nil
rm.done = make(chan bool)
rm.wg = wg
rm.config = make([]json.RawMessage, 0)
rm.config = make([]ReceiverConfig, 0)
configFile, err := os.Open(receiverConfigFile)
if err != nil {
cclog.ComponentError("ReceiveManager", err.Error())
@@ -44,16 +42,15 @@ func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) er
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
var rawConfigs map[string]json.RawMessage
var rawConfigs []json.RawMessage
err = jsonParser.Decode(&rawConfigs)
if err != nil {
cclog.ComponentError("ReceiveManager", err.Error())
return err
}
for name, raw := range rawConfigs {
rm.AddInput(name, raw)
for _, raw := range rawConfigs {
rm.AddInput(raw)
}
return nil
}
@@ -67,8 +64,8 @@ func (rm *receiveManager) Start() {
cclog.ComponentDebug("ReceiveManager", "STARTED")
}
func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error {
var config defaultReceiverConfig
func (rm *receiveManager) AddInput(rawConfig json.RawMessage) error {
var config ReceiverConfig
err := json.Unmarshal(rawConfig, &config)
if err != nil {
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "JSON config error:", err.Error())
@@ -78,13 +75,14 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "unknown receiver:", err.Error())
return err
}
r, err := AvailableReceivers[config.Type](name, rawConfig)
r := AvailableReceivers[config.Type]
err = r.Init(config)
if err != nil {
cclog.ComponentError("ReceiveManager", "SKIP", name, "initialization failed:", err.Error())
cclog.ComponentError("ReceiveManager", "SKIP", r.Name(), "initialization failed:", err.Error())
return err
}
rm.inputs = append(rm.inputs, r)
rm.config = append(rm.config, rawConfig)
rm.config = append(rm.config, config)
cclog.ComponentDebug("ReceiveManager", "ADD RECEIVER", r.Name())
return nil
}

View File

@@ -1,91 +0,0 @@
package receivers
import (
"encoding/json"
"fmt"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
)
// SampleReceiver configuration: receiver type, listen address, port
type SampleReceiverConfig struct {
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
}
type SampleReceiver struct {
receiver
config SampleReceiverConfig
// Storage for static information
meta map[string]string
// Use in case of own go routine
// done chan bool
// wg sync.WaitGroup
}
// Implement functions required for Receiver interface
// Start(), Close()
// See: metricReceiver.go
func (r *SampleReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
// Start server process like http.ListenAndServe()
// or use own go routine but always make sure it exits
// as soon as it gets the signal of the r.done channel
// r.wg.Add(1)
// go func() {
// for {
// select {
// case <-r.done:
// r.wg.Done()
// return
// }
// }
// r.wg.Done()
// }()
}
// Close receiver: close network connection, close files, close libraries, ...
func (r *SampleReceiver) Close() {
cclog.ComponentDebug(r.name, "CLOSE")
// Close server like http.Shutdown()
// in case of own go routine, send the signal and wait
// r.done <- true
// r.wg.Wait()
}
// New function to create a new instance of the receiver
// Initialize the receiver by giving it a name and reading in the config JSON
func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
r := new(SampleReceiver)
// Set name of SampleReceiver
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
r.name = fmt.Sprintf("SampleReceiver(%s)", name)
// Set static information
r.meta = map[string]string{"source": r.name}
// Set defaults in r.config
// Allow overwriting these defaults by reading config JSON
// Read the sample receiver specific JSON config
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
// Check that all required fields in the configuration are set
// Use 'if len(r.config.Option) > 0' for strings
return r, nil
}

View File

@@ -15,3 +15,6 @@ CONF_DIR=/etc/cc-metric-collector
CONF_FILE=/etc/cc-metric-collector/cc-metric-collector.json
RESTART_ON_UPGRADE=true
# Only used on systemd systems
PID_FILE_DIR=/var/run

View File

@@ -1,12 +0,0 @@
Package: cc-metric-collector
Version: {VERSION}
Installed-Size: {INSTALLED_SIZE}
Architecture: {ARCH}
Maintainer: thomas.gruber@fau.de
Depends: libc6 (>= 2.2.1)
Build-Depends: debhelper-compat (= 13), git, golang-go
Description: Metric collection daemon from the ClusterCockpit suite
Homepage: https://github.com/ClusterCockpit/cc-metric-collector
Source: cc-metric-collector
Rules-Requires-Root: no

View File

@@ -14,7 +14,11 @@ Restart=on-failure
WorkingDirectory=/tmp
RuntimeDirectory=cc-metric-collector
RuntimeDirectoryMode=0750
ExecStart=/usr/sbin/cc-metric-collector --config=${CONF_FILE}
ExecStart=/usr/sbin/cc-metric-collector \
--config=${CONF_FILE} \
--pidfile=${PID_FILE_DIR}/cc-metric-collector.pid
LimitNOFILE=10000
TimeoutStopSec=20
UMask=0027

View File

@@ -1,5 +1,5 @@
Name: cc-metric-collector
Version: %{VERS}
Version: 0.1
Release: 1%{?dist}
Summary: Metric collection daemon from the ClusterCockpit suite
@@ -7,9 +7,8 @@ License: MIT
Source0: %{name}-%{version}.tar.gz
BuildRequires: go-toolset
BuildRequires: systemd-rpm-macros
# for header downloads
BuildRequires: wget
# for internal LIKWID installation
BuildRequires: wget perl-Data-Dumper
Provides: %{name} = %{version}
@@ -27,23 +26,15 @@ make
%install
install -Dpm 0750 %{name} %{buildroot}%{_sbindir}/%{name}
install -Dpm 0755 %{name} %{buildroot}%{_sbindir}/%{name}
install -Dpm 0600 config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json
install -Dpm 0600 collectors.json %{buildroot}%{_sysconfdir}/%{name}/collectors.json
install -Dpm 0600 sinks.json %{buildroot}%{_sysconfdir}/%{name}/sinks.json
install -Dpm 0600 receivers.json %{buildroot}%{_sysconfdir}/%{name}/receivers.json
install -Dpm 0600 router.json %{buildroot}%{_sysconfdir}/%{name}/router.json
install -Dpm 0644 scripts/%{name}.service %{buildroot}%{_unitdir}/%{name}.service
install -Dpm 0600 scripts/%{name}.config %{buildroot}%{_sysconfdir}/default/%{name}
install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.conf
install -Dpm 644 scripts/%{name}.service %{buildroot}%{_unitdir}/%{name}.service
install -Dpm 600 scripts/%{name}.config %{buildroot}%{_sysconfdir}/default/%{name}
%check
# go test should be here... :)
%pre
%sysusers_create_package scripts/%{name}.sysusers
%post
%systemd_post %{name}.service
@@ -51,25 +42,13 @@ install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.c
%systemd_preun %{name}.service
%files
# Binary
%attr(-,clustercockpit,clustercockpit) %{_sbindir}/%{name}
# Config
%dir %{_sysconfdir}/%{name}
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/collectors.json
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/sinks.json
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/receivers.json
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/router.json
# Systemd
%{_sbindir}/%{name}
%{_unitdir}/%{name}.service
%{_sysconfdir}/default/%{name}
%{_sysusersdir}/%{name}.conf
%config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
%changelog
* Thu Mar 03 2022 Thomas Gruber - 0.3
- Add clustercockpit user installation
* Mon Feb 14 2022 Thomas Gruber - 0.2
- Add component specific configuration files
- Add %attr to config files
* Mon Nov 22 2021 Thomas Gruber - 0.1
- Initial spec file

View File

@@ -1,2 +0,0 @@
#Type Name ID GECOS Home directory Shell
u clustercockpit - "User for ClusterCockpit" /run/cc-metric-collector /sbin/nologin

View File

@@ -1,83 +0,0 @@
#!/usr/bin/env python3
import os, os.path, sys, getopt, re, json
def which(cmd):
ospath = os.environ.get("PATH", "")
for p in ospath.split(":"):
testcmd = os.path.join(p, cmd)
if os.access(testcmd, os.X_OK):
return testcmd
return None
def group_to_json(groupfile):
gdata = []
with open(groupfile, "r") as fp:
gdata = fp.read().strip().split("\n")
events = {}
metrics = []
parse_events = False
parse_metrics = False
for line in gdata:
if line == "EVENTSET":
parse_events = True
parse_metrics = False
continue
if line == "METRICS":
parse_events = False
parse_metrics = True
continue
if len(line) == 0 or line.startswith("SHORT") or line == "LONG":
parse_events = False
parse_metrics = False
continue
if parse_events:
m = re.match("([\w\d]+)\s+([\w\d_]+)", line)
if m:
events[m.group(1)] = m.group(2)
if parse_metrics:
llist = re.split("\s+", line)
calc = llist[-1]
metric = " ".join(llist[:-1])
scope = "cpu"
if "BOX" in calc:
scope = "socket"
if "PWR" in calc:
scope = "socket"
m = {"name" : metric, "calc": calc, "scope" : scope, "publish" : True}
metrics.append(m)
return {"events" : events, "metrics" : metrics}
if len(sys.argv) != 3:
print("Usage: $0 <likwid-arch> <group-name>")
sys.exit(1)
arch = sys.argv[1]
group = sys.argv[2]
ltopo = which("likwid-topology")
if not ltopo:
print("Cannot find LIKWID installation. Please add LIKWID bin folder to your PATH.")
sys.exit(1)
bindir = os.path.dirname(ltopo)
groupdir = os.path.normpath(os.path.join(bindir, "../share/likwid/perfgroups"))
if not os.path.exists(groupdir):
print("Cannot find LIKWID performance groups in default install location")
sys.exit(1)
archdir = os.path.join(groupdir, arch)
if not os.path.exists(archdir):
print("Cannot find LIKWID performance groups for architecture {}".format(arch))
sys.exit(1)
groupfile = os.path.join(archdir, "{}.txt".format(group))
if not os.path.exists(groupfile):
print("Cannot find LIKWID performance group {} for architecture {}".format(group, arch))
sys.exit(1)
gdata = group_to_json(groupfile)
print(json.dumps(gdata, sort_keys=True, indent=2))

View File

@@ -1,6 +1,6 @@
{
"mystdout" : {
[
{
"type" : "stdout",
"meta_as_tags" : true
}
}
]

View File

@@ -2,27 +2,17 @@
This folder contains the SinkManager and sink implementations for the cc-metric-collector.
# Available sinks:
- [`stdout`](./stdoutSink.md): Print all metrics to `stdout`, `stderr` or a file
- [`http`](./httpSink.md): Send metrics to an HTTP server as POST requests
- [`influxdb`](./influxSink.md): Send metrics to an [InfluxDB](https://www.influxdata.com/products/influxdb/) database
- [`influxasync`](./influxAsyncSink.md): Send metrics to an [InfluxDB](https://www.influxdata.com/products/influxdb/) database with non-blocking write API
- [`nats`](./natsSink.md): Publish metrics to the [NATS](https://nats.io/) network overlay system
- [`ganglia`](./gangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) using the `gmetric` CLI tool
- [`libganglia`](./libgangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) directly using `libganglia.so`
- [`prometeus`](./prometheusSink.md): Publish metrics for the [Prometheus Monitoring System](https://prometheus.io/)
# Configuration
The configuration file for the sinks is a list of configurations. The `type` field in each specifies which sink to initialize.
```json
[
"mystdout" : {
{
"type" : "stdout",
"meta_as_tags" : false
},
"metricstore" : {
{
"type" : "http",
"host" : "localhost",
"port" : "4123",
@@ -32,75 +22,78 @@ The configuration file for the sinks is a list of configurations. The `type` fie
]
```
This example initializes two sinks, the `stdout` sink printing all metrics to the STDOUT and the `http` sink with the given `host`, `port`, `database` and `password`.
If `meta_as_tags` is set, all meta information attached to CCMetric are printed out as tags.
## Type `stdout`
```json
{
"type" : "stdout",
"meta_as_tags" : <true|false>
}
```
The `stdout` sink dumps all metrics to the STDOUT.
## Type `http`
```json
{
"type" : "http",
"host" : "<hostname>",
"port" : "<portnumber>",
"database" : "<database name>",
"password" : "<jwt token>",
"meta_as_tags" : <true|false>
}
```
The sink uses POST requests to send metrics to `http://<host>:<port>/<database>` using the JWT token as a JWT in the 'Authorization' header.
## Type `nats`
```json
{
"type" : "nats",
"host" : "<hostname>",
"port" : "<portnumber>",
"user" : "<username>",
"password" : "<password>",
"database" : "<database name>"
"meta_as_tags" : <true|false>
}
```
This sink publishes the CCMetric in a NATS environment using `host`, `port`, `user` and `password` for connecting. The metrics are published using the topic `database`.
## Type `influxdb`
```json
{
"type" : "influxdb",
"host" : "<hostname>",
"port" : "<portnumber>",
"user" : "<username>",
"password" : "<password or API key>",
"database" : "<database name>"
"organization": "<InfluxDB v2 organization>",
"ssl" : <true|false>,
"meta_as_tags" : <true|false>
}
```
This sink submits the CCMetrics to an InfluxDB time-series database. It uses `host`, `port` and `ssl` for connecting. For authentification, it uses either `user:password` if `user` is set and only `password` as API key. The `organization` and `database` are used for writing to the correct database.
# Contributing own sinks
A sink contains five functions and is derived from the type `sink`:
* `Init(name string, config json.RawMessage) error`
A sink contains three functions and is derived from the type `Sink`:
* `Init(config SinkConfig) error`
* `Write(point CCMetric) error`
* `Flush() error`
* `Close()`
* `New<Typename>(name string, config json.RawMessage) (Sink, error)` (calls the `Init()` function)
The data structures should be set up in `Init()` like opening a file or server connection. The `Write()` function writes/sends the data. For non-blocking sinks, the `Flush()` method tells the sink to drain its internal buffers. The `Close()` function should tear down anything created in `Init()`.
Finally, the sink needs to be registered in the `sinkManager.go`. There is a list of sinks called `AvailableSinks` which is a map (`sink_type_string` -> `pointer to sink interface`). Add a new entry with a descriptive name and the new sink.
## Sample sink
```go
package sinks
import (
"encoding/json"
"log"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type SampleSinkConfig struct {
defaultSinkConfig // defines JSON tags for 'name' and 'meta_as_tags'
}
type SampleSink struct {
sink // declarate 'name' and 'meta_as_tags'
config StdoutSinkConfig // entry point to the SampleSinkConfig
}
// Initialize the sink by giving it a name and reading in the config JSON
func (s *SampleSink) Init(name string, config json.RawMessage) error {
s.name = fmt.Sprintf("SampleSink(%s)", name) // Always specify a name here
// Read in the config JSON
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return err
}
}
return nil
}
// Code to submit a single CCMetric to the sink
func (s *SampleSink) Write(point lp.CCMetric) error {
log.Print(point)
return nil
}
// If the sink uses batched sends internally, you can tell to flush its buffers
func (s *SampleSink) Flush() error {
return nil
}
// Close sink: close network connection, close files, close libraries, ...
func (s *SampleSink) Close() {}
// New function to create a new instance of the sink
func NewSampleSink(name string, config json.RawMessage) (Sink, error) {
s := new(SampleSink)
err := s.Init(name, config)
return s, err
}
```

View File

@@ -1,269 +0,0 @@
package sinks
import (
"fmt"
"strings"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
func GangliaMetricName(point lp.CCMetric) string {
name := point.Name()
metricType, typeOK := point.GetTag("type")
metricTid, tidOk := point.GetTag("type-id")
gangliaType := metricType + metricTid
if strings.Contains(name, metricType) && tidOk {
name = strings.Replace(name, metricType, gangliaType, -1)
} else if typeOK && tidOk {
name = metricType + metricTid + "_" + name
} else if point.HasTag("device") {
device, _ := point.GetTag("device")
name = name + "_" + device
}
return name
}
func GangliaMetricRename(name string) string {
if name == "net_bytes_in" {
return "bytes_in"
} else if name == "net_bytes_out" {
return "bytes_out"
} else if name == "net_pkts_in" {
return "pkts_in"
} else if name == "net_pkts_out" {
return "pkts_out"
} else if name == "cpu_iowait" {
return "cpu_wio"
}
return name
}
func GangliaSlopeType(point lp.CCMetric) uint {
name := point.Name()
if name == "mem_total" || name == "swap_total" {
return 0
}
return 3
}
const DEFAULT_GANGLIA_METRIC_TMAX = 300
const DEFAULT_GANGLIA_METRIC_SLOPE = "both"
type GangliaMetric struct {
Name string
Type string
Slope string
Tmax int
Unit string
}
type GangliaMetricGroup struct {
Name string
Metrics []GangliaMetric
}
var CommonGangliaMetrics = []GangliaMetricGroup{
{
Name: "memory",
Metrics: []GangliaMetric{
{"mem_total", "float", "zero", 1200, "KB"},
{"swap_total", "float", "zero", 1200, "KB"},
{"mem_free", "float", "both", 180, "KB"},
{"mem_shared", "float", "both", 180, "KB"},
{"mem_buffers", "float", "both", 180, "KB"},
{"mem_cached", "float", "both", 180, "KB"},
{"swap_free", "float", "both", 180, "KB"},
{"mem_sreclaimable", "float", "both", 180, "KB"},
{"mem_slab", "float", "both", 180, "KB"},
},
},
{
Name: "cpu",
Metrics: []GangliaMetric{
{"cpu_num", "uint32", "zero", 1200, "CPUs"},
{"cpu_speed", "uint32", "zero", 1200, "MHz"},
{"cpu_user", "float", "both", 90, "%"},
{"cpu_nice", "float", "both", 90, "%"},
{"cpu_system", "float", "both", 90, "%"},
{"cpu_idle", "float", "both", 3800, "%"},
{"cpu_aidle", "float", "both", 90, "%"},
{"cpu_wio", "float", "both", 90, "%"},
{"cpu_intr", "float", "both", 90, "%"},
{"cpu_sintr", "float", "both", 90, "%"},
{"cpu_steal", "float", "both", 90, "%"},
{"cpu_guest", "float", "both", 90, "%"},
{"cpu_gnice", "float", "both", 90, "%"},
},
},
{
Name: "load",
Metrics: []GangliaMetric{
{"load_one", "float", "both", 70, ""},
{"load_five", "float", "both", 325, ""},
{"load_fifteen", "float", "both", 950, ""},
},
},
{
Name: "disk",
Metrics: []GangliaMetric{
{"disk_total", "double", "both", 1200, "GB"},
{"disk_free", "double", "both", 180, "GB"},
{"part_max_used", "float", "both", 180, "%"},
},
},
{
Name: "network",
Metrics: []GangliaMetric{
{"bytes_out", "float", "both", 300, "bytes/sec"},
{"bytes_in", "float", "both", 300, "bytes/sec"},
{"pkts_in", "float", "both", 300, "packets/sec"},
{"pkts_out", "float", "both", 300, "packets/sec"},
},
},
{
Name: "process",
Metrics: []GangliaMetric{
{"proc_run", "uint32", "both", 950, ""},
{"proc_total", "uint32", "both", 950, ""},
},
},
{
Name: "system",
Metrics: []GangliaMetric{
{"boottime", "uint32", "zero", 1200, "s"},
{"sys_clock", "uint32", "zero", 1200, "s"},
{"machine_type", "string", "zero", 1200, ""},
{"os_name", "string", "zero", 1200, ""},
{"os_release", "string", "zero", 1200, ""},
{"mtu", "uint32", "both", 1200, ""},
},
},
}
type GangliaMetricConfig struct {
Type string
Slope string
Tmax int
Unit string
Group string
Value string
Name string
}
func GetCommonGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
mname := GangliaMetricRename(point.Name())
if oldname, ok := point.GetMeta("oldname"); ok {
mname = GangliaMetricRename(oldname)
}
for _, group := range CommonGangliaMetrics {
for _, metric := range group.Metrics {
if metric.Name == mname {
valueStr := ""
value, ok := point.GetField("value")
if ok {
switch real := value.(type) {
case float64:
valueStr = fmt.Sprintf("%f", real)
case float32:
valueStr = fmt.Sprintf("%f", real)
case int64:
valueStr = fmt.Sprintf("%d", real)
case int32:
valueStr = fmt.Sprintf("%d", real)
case int:
valueStr = fmt.Sprintf("%d", real)
case uint64:
valueStr = fmt.Sprintf("%d", real)
case uint32:
valueStr = fmt.Sprintf("%d", real)
case uint:
valueStr = fmt.Sprintf("%d", real)
case string:
valueStr = real
default:
}
}
return GangliaMetricConfig{
Group: group.Name,
Type: metric.Type,
Slope: metric.Slope,
Tmax: metric.Tmax,
Unit: metric.Unit,
Value: valueStr,
Name: GangliaMetricRename(mname),
}
}
}
}
return GangliaMetricConfig{
Group: "",
Type: "",
Slope: "",
Tmax: 0,
Unit: "",
Value: "",
Name: "",
}
}
func GetGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
mname := GangliaMetricRename(point.Name())
if oldname, ok := point.GetMeta("oldname"); ok {
mname = GangliaMetricRename(oldname)
}
group := ""
if g, ok := point.GetMeta("group"); ok {
group = g
}
unit := ""
if u, ok := point.GetMeta("unit"); ok {
unit = u
}
valueType := "double"
valueStr := ""
value, ok := point.GetField("value")
if ok {
switch real := value.(type) {
case float64:
valueStr = fmt.Sprintf("%f", real)
valueType = "double"
case float32:
valueStr = fmt.Sprintf("%f", real)
valueType = "float"
case int64:
valueStr = fmt.Sprintf("%d", real)
valueType = "int32"
case int32:
valueStr = fmt.Sprintf("%d", real)
valueType = "int32"
case int:
valueStr = fmt.Sprintf("%d", real)
valueType = "int32"
case uint64:
valueStr = fmt.Sprintf("%d", real)
valueType = "uint32"
case uint32:
valueStr = fmt.Sprintf("%d", real)
valueType = "uint32"
case uint:
valueStr = fmt.Sprintf("%d", real)
valueType = "uint32"
case string:
valueStr = real
valueType = "string"
default:
valueType = "invalid"
}
}
return GangliaMetricConfig{
Group: group,
Type: valueType,
Slope: DEFAULT_GANGLIA_METRIC_SLOPE,
Tmax: DEFAULT_GANGLIA_METRIC_TMAX,
Unit: unit,
Value: valueStr,
Name: GangliaMetricRename(mname),
}
}

View File

@@ -1,83 +1,76 @@
package sinks
import (
"encoding/json"
"errors"
"fmt"
"log"
"strings"
// "time"
"os/exec"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const GMETRIC_EXEC = `gmetric`
const GMETRIC_CONFIG = `/etc/ganglia/gmond.conf`
type GangliaSinkConfig struct {
defaultSinkConfig
GmetricPath string `json:"gmetric_path,omitempty"`
GmetricConfig string `json:"gmetric_config,omitempty"`
AddGangliaGroup bool `json:"add_ganglia_group,omitempty"`
AddTagsAsDesc bool `json:"add_tags_as_desc,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
AddTypeToName bool `json:"add_type_to_name,omitempty"`
AddUnits bool `json:"add_units,omitempty"`
}
type GangliaSink struct {
sink
Sink
gmetric_path string
gmetric_config string
config GangliaSinkConfig
}
func (s *GangliaSink) Init(config sinkConfig) error {
p, err := exec.LookPath(string(GMETRIC_EXEC))
if err == nil {
s.gmetric_path = p
}
return err
}
func (s *GangliaSink) Write(point lp.CCMetric) error {
var err error = nil
//var tagsstr []string
var tagsstr []string
var argstr []string
// Get metric config (type, value, ... in suitable format)
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
for _, t := range point.TagList() {
switch t.Key {
case "cluster":
argstr = append(argstr, fmt.Sprintf("--cluster=%s", t.Value))
case "unit":
argstr = append(argstr, fmt.Sprintf("--units=%s", t.Value))
case "group":
argstr = append(argstr, fmt.Sprintf("--group=%s", t.Value))
default:
tagsstr = append(tagsstr, fmt.Sprintf("%s=%s", t.Key, t.Value))
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddGangliaGroup {
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
if len(tagsstr) > 0 {
argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
}
if s.config.AddUnits && len(conf.Unit) > 0 {
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
argstr = append(argstr, fmt.Sprintf("--name=%s", point.Name()))
for _, f := range point.FieldList() {
if f.Key == "value" {
switch f.Value.(type) {
case float64:
argstr = append(argstr, fmt.Sprintf("--value=%v", f.Value.(float64)))
argstr = append(argstr, "--type=double")
case float32:
argstr = append(argstr, fmt.Sprintf("--value=%v", f.Value.(float32)))
argstr = append(argstr, "--type=float")
case int:
argstr = append(argstr, fmt.Sprintf("--value=%d", f.Value.(int)))
argstr = append(argstr, "--type=int32")
case int64:
argstr = append(argstr, fmt.Sprintf("--value=%d", f.Value.(int64)))
argstr = append(argstr, "--type=int32")
case string:
argstr = append(argstr, fmt.Sprintf("--value=%q", f.Value.(string)))
argstr = append(argstr, "--type=string")
}
if len(s.config.ClusterName) > 0 {
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
}
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
// }
if len(s.gmetric_config) > 0 {
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
}
if s.config.AddTypeToName {
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
} else {
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
}
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
command := exec.Command(s.gmetric_path, argstr...)
command.Wait()
_, err = command.Output()
log.Print(s.gmetric_path, " ", strings.Join(argstr, " "))
// command := exec.Command(string(GMETRIC_EXEC), strings.Join(argstr, " "))
// command.Wait()
// _, err := command.Output()
return err
}
@@ -87,38 +80,3 @@ func (s *GangliaSink) Flush() error {
func (s *GangliaSink) Close() {
}
func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
s := new(GangliaSink)
s.name = fmt.Sprintf("GangliaSink(%s)", name)
s.config.AddTagsAsDesc = false
s.config.AddGangliaGroup = false
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
s.gmetric_path = ""
s.gmetric_config = ""
if len(s.config.GmetricPath) > 0 {
p, err := exec.LookPath(s.config.GmetricPath)
if err == nil {
s.gmetric_path = p
}
}
if len(s.gmetric_path) == 0 {
p, err := exec.LookPath(string(GMETRIC_EXEC))
if err == nil {
s.gmetric_path = p
}
}
if len(s.gmetric_path) == 0 {
return nil, errors.New("cannot find executable 'gmetric'")
}
if len(s.config.GmetricConfig) > 0 {
s.gmetric_config = s.config.GmetricConfig
}
return s, nil
}

View File

@@ -1,21 +0,0 @@
## `ganglia` sink
The `ganglia` sink uses the `gmetric` tool of the [Ganglia Monitoring System](http://ganglia.info/) to submit the metrics
### Configuration structure
```json
{
"<name>": {
"type": "ganglia",
"meta_as_tags" : true,
"gmetric_path" : "/path/to/gmetric",
"add_ganglia_group" : true
}
}
```
- `type`: makes the sink an `ganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmetric_path`: Path to `gmetric` executable (optional). If not given, the sink searches in `$PATH` for `gmetric`.
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.

View File

@@ -2,109 +2,63 @@ package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type HttpSinkConfig struct {
defaultSinkConfig
URL string `json:"url,omitempty"`
JWT string `json:"jwt,omitempty"`
Timeout string `json:"timeout,omitempty"`
MaxIdleConns int `json:"max_idle_connections,omitempty"`
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
FlushDelay string `json:"flush_delay,omitempty"`
}
type HttpSink struct {
sink
client *http.Client
url, jwt string
encoder *influx.Encoder
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
buffer *bytes.Buffer
flushTimer *time.Timer
config HttpSinkConfig
maxIdleConns int
idleConnTimeout time.Duration
timeout time.Duration
flushDelay time.Duration
}
func (s *HttpSink) Write(m lp.CCMetric) error {
if s.buffer.Len() == 0 && s.flushDelay != 0 {
// This is the first write since the last flush, start the flushTimer!
if s.flushTimer != nil && s.flushTimer.Stop() {
cclog.ComponentDebug("HttpSink", "unexpected: the flushTimer was already running?")
func (s *HttpSink) Init(config sinkConfig) error {
s.name = "HttpSink"
if len(config.Host) == 0 || len(config.Port) == 0 || len(config.Database) == 0 {
return errors.New("`host`, `port` and `database` config options required for TCP sink")
}
// Run a batched flush for all lines that have arrived in the last second
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
if err := s.Flush(); err != nil {
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
}
})
}
s.client = &http.Client{}
s.url = fmt.Sprintf("http://%s:%s/%s", config.Host, config.Port, config.Database)
s.port = config.Port
s.jwt = config.Password
s.buffer = &bytes.Buffer{}
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
p := m.ToPoint(s.meta_as_tags)
s.lock.Lock()
_, err := s.encoder.Encode(p)
s.lock.Unlock() // defer does not work here as Flush() takes the lock as well
if err != nil {
return err
}
// Flush synchronously if "flush_delay" is zero
if s.flushDelay == 0 {
return s.Flush()
}
return nil
}
func (s *HttpSink) Write(point lp.CCMetric) error {
_, err := s.encoder.Encode(point)
return err
}
func (s *HttpSink) Flush() error {
// buffer is read by client.Do, prevent concurrent modifications
s.lock.Lock()
defer s.lock.Unlock()
// Do not flush empty buffer
if s.buffer.Len() == 0 {
return nil
}
// Create new request to send buffer
req, err := http.NewRequest(http.MethodPost, s.config.URL, s.buffer)
req, err := http.NewRequest(http.MethodPost, s.url, s.buffer)
if err != nil {
return err
}
// Set authorization header
if len(s.config.JWT) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
if len(s.jwt) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.jwt))
}
// Send
res, err := s.client.Do(req)
// Clear buffer
s.buffer.Reset()
// Handle transport/tcp errors
if err != nil {
return err
}
// Handle application errors
if res.StatusCode != http.StatusOK {
if res.StatusCode != 200 {
return errors.New(res.Status)
}
@@ -112,65 +66,5 @@ func (s *HttpSink) Flush() error {
}
func (s *HttpSink) Close() {
s.flushTimer.Stop()
if err := s.Flush(); err != nil {
cclog.ComponentError("HttpSink", "flush failed:", err.Error())
}
s.client.CloseIdleConnections()
}
func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
s := new(HttpSink)
// Set default values
s.name = fmt.Sprintf("HttpSink(%s)", name)
s.config.MaxIdleConns = 10
s.config.IdleConnTimeout = "5s"
s.config.Timeout = "5s"
s.config.FlushDelay = "1s"
// Read config
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
if len(s.config.URL) == 0 {
return nil, errors.New("`url` config option is required for HTTP sink")
}
if s.config.MaxIdleConns > 0 {
s.maxIdleConns = s.config.MaxIdleConns
}
if len(s.config.IdleConnTimeout) > 0 {
t, err := time.ParseDuration(s.config.IdleConnTimeout)
if err == nil {
s.idleConnTimeout = t
}
}
if len(s.config.Timeout) > 0 {
t, err := time.ParseDuration(s.config.Timeout)
if err == nil {
s.timeout = t
}
}
if len(s.config.FlushDelay) > 0 {
t, err := time.ParseDuration(s.config.FlushDelay)
if err == nil {
s.flushDelay = t
}
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
tr := &http.Transport{
MaxIdleConns: s.maxIdleConns,
IdleConnTimeout: s.idleConnTimeout,
}
s.client = &http.Client{Transport: tr, Timeout: s.timeout}
s.buffer = &bytes.Buffer{}
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
return s, nil
}

View File

@@ -1,29 +0,0 @@
## `http` sink
The `http` sink uses POST requests to a HTTP server to submit the metrics in the InfluxDB line-protocol format. It uses JSON web tokens for authentification. The sink creates batches of metrics before sending, to reduce the HTTP traffic.
### Configuration structure
```json
{
"<name>": {
"type": "http",
"meta_as_tags" : true,
"url" : "https://my-monitoring.example.com:1234/api/write",
"jwt" : "blabla.blabla.blabla",
"timeout": "5s",
"max_idle_connections" : 10,
"idle_connection_timeout" : "5s",
"flush_delay": "2s",
}
}
```
- `type`: makes the sink an `http` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `url`: The full URL of the endpoint
- `jwt`: JSON web tokens for authentification (Using the *Bearer* scheme)
- `timeout`: General timeout for the HTTP client (default '5s')
- `max_idle_connections`: Maximally idle connections (default 10)
- `idle_connection_timeout`: Timeout for idle connections (default '5s')
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)

View File

@@ -1,185 +0,0 @@
package sinks
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
)
type InfluxAsyncSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
RetentionPol string `json:"retention_policy,omitempty"`
// Maximum number of points sent to server in single request. Default 5000
BatchSize uint `json:"batch_size,omitempty"`
// Interval, in ms, in which is buffer flushed if it has not been already written (by reaching batch size) . Default 1000ms
FlushInterval uint `json:"flush_interval,omitempty"`
InfluxRetryInterval string `json:"retry_interval"`
InfluxExponentialBase uint `json:"retry_exponential_base"`
InfluxMaxRetries uint `json:"max_retries"`
InfluxMaxRetryTime string `json:"max_retry_time"`
}
type InfluxAsyncSink struct {
sink
client influxdb2.Client
writeApi influxdb2Api.WriteAPI
errors <-chan error
config InfluxAsyncSinkConfig
influxRetryInterval uint
influxMaxRetryTime uint
}
func (s *InfluxAsyncSink) connect() error {
var auth string
var uri string
if s.config.SSL {
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
} else {
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
}
if len(s.config.User) == 0 {
auth = s.config.Password
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
}
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
clientOptions := influxdb2.DefaultOptions()
if s.config.BatchSize != 0 {
clientOptions.SetBatchSize(s.config.BatchSize)
}
if s.config.FlushInterval != 0 {
clientOptions.SetFlushInterval(s.config.FlushInterval)
}
clientOptions.SetTLSConfig(
&tls.Config{
InsecureSkipVerify: true,
},
)
clientOptions.SetMaxRetryInterval(s.influxRetryInterval)
clientOptions.SetMaxRetryTime(s.influxMaxRetryTime)
clientOptions.SetExponentialBase(s.config.InfluxExponentialBase)
clientOptions.SetMaxRetries(s.config.InfluxMaxRetries)
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPI(s.config.Organization, s.config.Database)
ok, err := s.client.Ping(context.Background())
if err != nil {
return err
}
if !ok {
return fmt.Errorf("connection to %s not healthy", uri)
}
return nil
}
func (s *InfluxAsyncSink) Write(m lp.CCMetric) error {
s.writeApi.WritePoint(
m.ToPoint(s.meta_as_tags),
)
return nil
}
func (s *InfluxAsyncSink) Flush() error {
s.writeApi.Flush()
return nil
}
func (s *InfluxAsyncSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
s.writeApi.Flush()
s.client.Close()
}
func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
s := new(InfluxAsyncSink)
s.name = fmt.Sprintf("InfluxSink(%s)", name)
// Set default for maximum number of points sent to server in single request.
s.config.BatchSize = 100
s.influxRetryInterval = uint(time.Duration(1) * time.Second)
s.config.InfluxRetryInterval = "1s"
s.influxMaxRetryTime = uint(7 * time.Duration(24) * time.Hour)
s.config.InfluxMaxRetryTime = "168h"
s.config.InfluxMaxRetries = 20
s.config.InfluxExponentialBase = 2
// Default retry intervals (in seconds)
// 1 2
// 2 4
// 4 8
// 8 16
// 16 32
// 32 64
// 64 128
// 128 256
// 256 512
// 512 1024
// 1024 2048
// 2048 4096
// 4096 8192
// 8192 16384
// 16384 32768
// 32768 65536
// 65536 131072
// 131072 262144
// 262144 524288
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 ||
len(s.config.Organization) == 0 ||
len(s.config.Password) == 0 {
return nil, errors.New("not all configuration variables set required by InfluxAsyncSink")
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
toUint := func(duration string, def uint) uint {
t, err := time.ParseDuration(duration)
if err == nil {
return uint(t.Milliseconds())
}
return def
}
s.influxRetryInterval = toUint(s.config.InfluxRetryInterval, s.influxRetryInterval)
s.influxMaxRetryTime = toUint(s.config.InfluxMaxRetryTime, s.influxMaxRetryTime)
// Connect to InfluxDB server
if err := s.connect(); err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
// Start background: Read from error channel
s.errors = s.writeApi.Errors()
go func() {
for err := range s.errors {
cclog.ComponentError(s.name, err.Error())
}
}()
return s, nil
}

View File

@@ -1,44 +0,0 @@
## `influxasync` sink
The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **non-blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
### Configuration structure
```json
{
"<name>": {
"type": "influxasync",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw",
"organization": "myorg",
"ssl": true,
"batch_size": 200,
"retry_interval" : "1s",
"retry_exponential_base" : 2,
"max_retries": 20,
"max_retry_time" : "168h"
}
}
```
- `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server
- `user`: Username for basic authentification
- `password`: Password for basic authentification
- `organization`: Organization in the InfluxDB
- `ssl`: Use SSL connection
- `batch_size`: batch up metrics internally, default 100
- `retry_interval`: Base retry interval for failed write requests, default 1s
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
- `max_retries`: Maximal number of retry attempts
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)

View File

@@ -3,88 +3,77 @@ package sinks
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
"log"
)
type InfluxSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
RetentionPol string `json:"retention_policy,omitempty"`
InfluxRetryInterval string `json:"retry_interval"`
InfluxExponentialBase uint `json:"retry_exponential_base"`
InfluxMaxRetries uint `json:"max_retries"`
InfluxMaxRetryTime string `json:"max_retry_time"`
//InfluxMaxRetryDelay string `json:"max_retry_delay"` // It is mentioned in the docs but there is no way to set it
}
type InfluxSink struct {
sink
client influxdb2.Client
writeApi influxdb2Api.WriteAPIBlocking
config InfluxSinkConfig
influxRetryInterval uint
influxMaxRetryTime uint
//influxMaxRetryDelay uint
retPolicy string
}
func (s *InfluxSink) connect() error {
var auth string
var uri string
if s.config.SSL {
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
if s.ssl {
uri = fmt.Sprintf("https://%s:%s", s.host, s.port)
} else {
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
uri = fmt.Sprintf("http://%s:%s", s.host, s.port)
}
if len(s.config.User) == 0 {
auth = s.config.Password
if len(s.user) == 0 {
auth = s.password
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
}
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
clientOptions := influxdb2.DefaultOptions()
clientOptions.SetTLSConfig(
&tls.Config{
InsecureSkipVerify: true,
},
)
clientOptions.SetMaxRetryInterval(s.influxRetryInterval)
clientOptions.SetMaxRetryTime(s.influxMaxRetryTime)
clientOptions.SetExponentialBase(s.config.InfluxExponentialBase)
clientOptions.SetMaxRetries(s.config.InfluxMaxRetries)
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPIBlocking(s.config.Organization, s.config.Database)
ok, err := s.client.Ping(context.Background())
if err != nil {
return err
}
if !ok {
return fmt.Errorf("connection to %s not healthy", uri)
auth = fmt.Sprintf("%s:%s", s.user, s.password)
}
log.Print("Using URI ", uri, " Org ", s.organization, " Bucket ", s.database)
s.client = influxdb2.NewClientWithOptions(uri, auth,
influxdb2.DefaultOptions().SetTLSConfig(&tls.Config{InsecureSkipVerify: true}))
s.writeApi = s.client.WriteAPIBlocking(s.organization, s.database)
return nil
}
func (s *InfluxSink) Write(m lp.CCMetric) error {
err :=
s.writeApi.WritePoint(
context.Background(),
m.ToPoint(s.meta_as_tags),
)
func (s *InfluxSink) Init(config sinkConfig) error {
s.name = "InfluxSink"
if len(config.Host) == 0 ||
len(config.Port) == 0 ||
len(config.Database) == 0 ||
len(config.Organization) == 0 ||
len(config.Password) == 0 {
return errors.New("Not all configuration variables set required by InfluxSink")
}
s.host = config.Host
s.port = config.Port
s.database = config.Database
s.organization = config.Organization
s.user = config.User
s.password = config.Password
s.ssl = config.SSL
s.meta_as_tags = config.MetaAsTags
return s.connect()
}
func (s *InfluxSink) Write(point lp.CCMetric) error {
tags := map[string]string{}
fields := map[string]interface{}{}
for _, t := range point.TagList() {
tags[t.Key] = t.Value
}
if s.meta_as_tags {
for _, m := range point.MetaList() {
tags[m.Key] = m.Value
}
}
for _, f := range point.FieldList() {
fields[f.Key] = f.Value
}
p := influxdb2.NewPoint(point.Name(), tags, fields, point.Time())
err := s.writeApi.WritePoint(context.Background(), p)
return err
}
@@ -93,52 +82,6 @@ func (s *InfluxSink) Flush() error {
}
func (s *InfluxSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
log.Print("Closing InfluxDB connection")
s.client.Close()
}
func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
s := new(InfluxSink)
s.name = fmt.Sprintf("InfluxSink(%s)", name)
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
s.influxRetryInterval = uint(time.Duration(1) * time.Second)
s.config.InfluxRetryInterval = "1s"
s.influxMaxRetryTime = uint(7 * time.Duration(24) * time.Hour)
s.config.InfluxMaxRetryTime = "168h"
s.config.InfluxMaxRetries = 20
s.config.InfluxExponentialBase = 2
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 ||
len(s.config.Organization) == 0 ||
len(s.config.Password) == 0 {
return nil, errors.New("not all configuration variables set required by InfluxSink")
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
toUint := func(duration string, def uint) uint {
t, err := time.ParseDuration(duration)
if err == nil {
return uint(t.Milliseconds())
}
return def
}
s.influxRetryInterval = toUint(s.config.InfluxRetryInterval, s.influxRetryInterval)
s.influxMaxRetryTime = toUint(s.config.InfluxMaxRetryTime, s.influxMaxRetryTime)
// Connect to InfluxDB server
if err := s.connect(); err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
return s, nil
}

View File

@@ -1,42 +0,0 @@
## `influxdb` sink
The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
### Configuration structure
```json
{
"<name>": {
"type": "influxdb",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw",
"organization": "myorg",
"ssl": true,
"retry_interval" : "1s",
"retry_exponential_base" : 2,
"max_retries": 20,
"max_retry_time" : "168h"
}
}
```
- `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server
- `user`: Username for basic authentification
- `password`: Password for basic authentification
- `organization`: Organization in the InfluxDB
- `ssl`: Use SSL connection
- `retry_interval`: Base retry interval for failed write requests, default 1s
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
- `max_retries`: Maximal number of retry attempts
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)

View File

@@ -1,283 +0,0 @@
package sinks
/*
#cgo CFLAGS: -DGM_PROTOCOL_GUARD
#cgo LDFLAGS: -L. -Wl,--unresolved-symbols=ignore-in-object-files
#include <stdlib.h>
// This is a copy&paste snippet of ganglia.h (BSD-3 license)
// See https://github.com/ganglia/monitor-core
// for further information
enum ganglia_slope {
GANGLIA_SLOPE_ZERO = 0,
GANGLIA_SLOPE_POSITIVE,
GANGLIA_SLOPE_NEGATIVE,
GANGLIA_SLOPE_BOTH,
GANGLIA_SLOPE_UNSPECIFIED,
GANGLIA_SLOPE_DERIVATIVE,
GANGLIA_SLOPE_LAST_LEGAL_VALUE=GANGLIA_SLOPE_DERIVATIVE
};
typedef enum ganglia_slope ganglia_slope_t;
typedef struct Ganglia_pool* Ganglia_pool;
typedef struct Ganglia_gmond_config* Ganglia_gmond_config;
typedef struct Ganglia_udp_send_channels* Ganglia_udp_send_channels;
struct Ganglia_metric {
Ganglia_pool pool;
struct Ganglia_metadata_message *msg;
char *value;
void *extra;
};
typedef struct Ganglia_metric * Ganglia_metric;
#ifdef __cplusplus
extern "C" {
#endif
Ganglia_gmond_config Ganglia_gmond_config_create(char *path, int fallback_to_default);
//void Ganglia_gmond_config_destroy(Ganglia_gmond_config config);
Ganglia_udp_send_channels Ganglia_udp_send_channels_create(Ganglia_pool p, Ganglia_gmond_config config);
void Ganglia_udp_send_channels_destroy(Ganglia_udp_send_channels channels);
int Ganglia_udp_send_message(Ganglia_udp_send_channels channels, char *buf, int len );
Ganglia_metric Ganglia_metric_create( Ganglia_pool parent_pool );
int Ganglia_metric_set( Ganglia_metric gmetric, char *name, char *value, char *type, char *units, unsigned int slope, unsigned int tmax, unsigned int dmax);
int Ganglia_metric_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
//int Ganglia_metadata_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
//int Ganglia_metadata_send_real( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels, char *override_string );
void Ganglia_metadata_add( Ganglia_metric gmetric, char *name, char *value );
//int Ganglia_value_send( Ganglia_metric gmetric, Ganglia_udp_send_channels send_channels );
void Ganglia_metric_destroy( Ganglia_metric gmetric );
Ganglia_pool Ganglia_pool_create( Ganglia_pool parent );
void Ganglia_pool_destroy( Ganglia_pool pool );
//ganglia_slope_t cstr_to_slope(const char* str);
//const char* slope_to_cstr(unsigned int slope);
#ifdef __cplusplus
}
#endif
*/
import "C"
import (
"encoding/json"
"errors"
"fmt"
"unsafe"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/dl"
)
const (
GANGLIA_LIB_NAME = "libganglia.so"
GANGLIA_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
GMOND_CONFIG_FILE = `/etc/ganglia/gmond.conf`
)
// type LibgangliaSinkSpecialMetric struct {
// MetricName string `json:"metric_name,omitempty"`
// NewName string `json:"new_name,omitempty"`
// Slope string `json:"slope,omitempty"`
// }
type LibgangliaSinkConfig struct {
defaultSinkConfig
GangliaLib string `json:"libganglia_path,omitempty"`
GmondConfig string `json:"gmond_config,omitempty"`
AddGangliaGroup bool `json:"add_ganglia_group,omitempty"`
AddTypeToName bool `json:"add_type_to_name,omitempty"`
AddUnits bool `json:"add_units,omitempty"`
ClusterName string `json:"cluster_name,omitempty"`
//SpecialMetrics map[string]LibgangliaSinkSpecialMetric `json:"rename_metrics,omitempty"` // Map to rename metric name from key to value
//AddTagsAsDesc bool `json:"add_tags_as_desc,omitempty"`
}
type LibgangliaSink struct {
sink
config LibgangliaSinkConfig
global_context C.Ganglia_pool
gmond_config C.Ganglia_gmond_config
send_channels C.Ganglia_udp_send_channels
cstrCache map[string]*C.char
}
func (s *LibgangliaSink) Write(point lp.CCMetric) error {
var err error = nil
var c_name *C.char
var c_value *C.char
var c_type *C.char
var c_unit *C.char
// helper function for looking up C strings in the cache
lookup := func(key string) *C.char {
if _, exist := s.cstrCache[key]; !exist {
s.cstrCache[key] = C.CString(key)
}
return s.cstrCache[key]
}
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddTypeToName {
conf.Name = GangliaMetricName(point)
}
c_value = C.CString(conf.Value)
c_type = lookup(conf.Type)
c_name = lookup(conf.Name)
// Add unit
unit := ""
if s.config.AddUnits {
unit = conf.Unit
}
c_unit = lookup(unit)
// Determine the slope of the metric. Ganglia's own collector mostly use
// 'both' but the mem and swap total uses 'zero'.
slope_type := C.GANGLIA_SLOPE_BOTH
switch conf.Slope {
case "zero":
slope_type = C.GANGLIA_SLOPE_ZERO
case "both":
slope_type = C.GANGLIA_SLOPE_BOTH
}
// Create a new Ganglia metric
gmetric := C.Ganglia_metric_create(s.global_context)
// Set name, value, type and unit in the Ganglia metric
// The default slope_type is both directions, so up and down. Some metrics want 'zero' slope, probably constant.
// The 'tmax' value is by default 300.
rval := C.int(0)
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), C.uint(conf.Tmax), 0)
switch rval {
case 1:
C.free(unsafe.Pointer(c_value))
return errors.New("invalid parameters")
case 2:
C.free(unsafe.Pointer(c_value))
return errors.New("one of your parameters has an invalid character '\"'")
case 3:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the type parameter \"%s\" is not a valid type", conf.Type)
case 4:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the value parameter \"%s\" does not represent a number", conf.Value)
default:
}
// Set the cluster name, otherwise it takes it from the configuration file
if len(s.config.ClusterName) > 0 {
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
}
// Set the group metadata in the Ganglia metric if configured
if s.config.AddGangliaGroup {
c_group := lookup(conf.Group)
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
}
// Now we send the metric
// gmetric does provide some more options like description and other options
// but they are not provided by the collectors
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
if rval != 0 {
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
// fall throuph to use Ganglia_metric_destroy from common cleanup
}
// Cleanup Ganglia metric
C.Ganglia_metric_destroy(gmetric)
// Free the value C string, the only one not stored in the cache
C.free(unsafe.Pointer(c_value))
return err
}
func (s *LibgangliaSink) Flush() error {
return nil
}
func (s *LibgangliaSink) Close() {
// Destroy Ganglia configration struct
// (not done by gmetric, I thought I am more clever but no...)
//C.Ganglia_gmond_config_destroy(s.gmond_config)
// Destroy Ganglia pool
C.Ganglia_pool_destroy(s.global_context)
// Cleanup C string cache
for _, cstr := range s.cstrCache {
C.free(unsafe.Pointer(cstr))
}
}
func NewLibgangliaSink(name string, config json.RawMessage) (Sink, error) {
s := new(LibgangliaSink)
var err error = nil
s.name = fmt.Sprintf("LibgangliaSink(%s)", name)
//s.config.AddTagsAsDesc = false
s.config.AddGangliaGroup = false
s.config.AddTypeToName = false
s.config.AddUnits = true
s.config.GmondConfig = string(GMOND_CONFIG_FILE)
s.config.GangliaLib = string(GANGLIA_LIB_NAME)
if len(config) > 0 {
err = json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
return nil, err
}
}
lib := dl.New(s.config.GangliaLib, GANGLIA_LIB_DL_FLAGS)
if lib == nil {
return nil, fmt.Errorf("error instantiating DynamicLibrary for %s", s.config.GangliaLib)
}
err = lib.Open()
if err != nil {
return nil, fmt.Errorf("error opening %s: %v", s.config.GangliaLib, err)
}
// Set up cache for the C strings
s.cstrCache = make(map[string]*C.char)
// s.cstrCache["globals"] = C.CString("globals")
// s.cstrCache["override_hostname"] = C.CString("override_hostname")
// s.cstrCache["override_ip"] = C.CString("override_ip")
// Add some constant strings
s.cstrCache["GROUP"] = C.CString("GROUP")
s.cstrCache["CLUSTER"] = C.CString("CLUSTER")
s.cstrCache[""] = C.CString("")
// Add cluster name for lookup in Write()
if len(s.config.ClusterName) > 0 {
s.cstrCache[s.config.ClusterName] = C.CString(s.config.ClusterName)
}
// Add supported types for later lookup in Write()
s.cstrCache["double"] = C.CString("double")
s.cstrCache["int32"] = C.CString("int32")
s.cstrCache["string"] = C.CString("string")
// Create Ganglia pool
s.global_context = C.Ganglia_pool_create(nil)
// Load Ganglia configuration
s.cstrCache[s.config.GmondConfig] = C.CString(s.config.GmondConfig)
s.gmond_config = C.Ganglia_gmond_config_create(s.cstrCache[s.config.GmondConfig], 0)
//globals := C.cfg_getsec(gmond_config, s.cstrCache["globals"])
//override_hostname := C.cfg_getstr(globals, s.cstrCache["override_hostname"])
//override_ip := C.cfg_getstr(globals, s.cstrCache["override_ip"])
s.send_channels = C.Ganglia_udp_send_channels_create(s.global_context, s.gmond_config)
return s, nil
}

View File

@@ -1,41 +0,0 @@
## `libganglia` sink
The `libganglia` sink interacts directly with the library of the [Ganglia Monitoring System](http://ganglia.info/) to submit the metrics. Consequently, it needs to be installed on all nodes. But this is commonly the case if you want to use Ganglia, because it requires at least a node daemon (`gmond` or `ganglia-monitor`) to work.
The `libganglia` sink has probably less overhead compared to the `ganglia` sink because it does not require any process generation but initializes the environment and UDP connections only once.
### Configuration structure
```json
{
"<name>": {
"type": "libganglia",
"gmetric_config" : "/path/to/gmetric/config",
"cluster_name": "MyCluster",
"add_ganglia_group" : true,
"add_type_to_name": true,
"add_units" : true
}
}
```
- `type`: makes the sink an `libganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmond_config`: Path to the Ganglia configuration file `gmond.conf` (default: `/etc/ganglia/gmond.conf`)
- `cluster_name`: Set a cluster name for the metric. If not set, it is taken from `gmond_config`
- `add_ganglia_group`: Add a Ganglia metric group based on meta information. Some old versions of `gmetric` do not support the `--group` option
- `add_type_to_name`: Ganglia commonly uses only node-level metrics but with cc-metric-collector, there are metrics for cpus, memory domains, CPU sockets and the whole node. In order to get eeng, this option prefixes the metric name with `<type><type-id>_` or `device_` depending on the metric tags and meta information. For metrics of the whole node `type=node`, no prefix is added
- `add_units`: Add metric value unit if there is a `unit` entry in the metric tags or meta information
### Ganglia Installation
My development system is Ubuntu 20.04. To install the required libraries with `apt`:
```
$ sudo apt install libganglia1
```
The `libganglia.so` gets installed in `/usr/lib`. The Ganglia headers `libganglia1-dev` are **not** required.
I added a `Makefile` in the `sinks` subfolder that searches for the library in `/usr` and creates a symlink (`sinks/libganglia.so`) for running/building the cc-metric-collector. So just type `make` before running/building in the main folder or the `sinks` subfolder.

View File

@@ -1,27 +1,42 @@
package sinks
import (
// "time"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type defaultSinkConfig struct {
MetaAsTags []string `json:"meta_as_tags,omitempty"`
type sinkConfig struct {
Type string `json:"type"`
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
Organization string `json:"organization,omitempty"`
SSL bool `json:"ssl,omitempty"`
MetaAsTags bool `json:"meta_as_tags,omitempty"`
}
type sink struct {
meta_as_tags map[string]bool // Use meta data tags as tags
name string // Name of the sink
host string
port string
user string
password string
database string
organization string
ssl bool
meta_as_tags bool
name string
}
type Sink interface {
Write(point lp.CCMetric) error // Write metric to the sink
Flush() error // Flush buffered metrics
Close() // Close / finish metric sink
Name() string // Name of the metric sink
Init(config sinkConfig) error
Write(point lp.CCMetric) error
Flush() error
Close()
Name() string
}
// Name returns the name of the metric sink
func (s *sink) Name() string {
return s.name
}

View File

@@ -2,62 +2,64 @@ package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
"log"
"time"
)
type NatsSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Database string `json:"database,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
}
type NatsSink struct {
sink
client *nats.Conn
encoder *influx.Encoder
buffer *bytes.Buffer
config NatsSinkConfig
}
func (s *NatsSink) connect() error {
var err error
var uinfo nats.Option = nil
var nc *nats.Conn
if len(s.config.User) > 0 && len(s.config.Password) > 0 {
uinfo = nats.UserInfo(s.config.User, s.config.Password)
}
uri := fmt.Sprintf("nats://%s:%s", s.config.Host, s.config.Port)
cclog.ComponentDebug(s.name, "Connect to", uri)
uinfo := nats.UserInfo(s.user, s.password)
uri := fmt.Sprintf("nats://%s:%s", s.host, s.port)
log.Print("Using URI ", uri)
s.client = nil
if uinfo != nil {
nc, err = nats.Connect(uri, uinfo)
} else {
nc, err = nats.Connect(uri)
}
nc, err := nats.Connect(uri, uinfo)
if err != nil {
cclog.ComponentError(s.name, "Connect to", uri, "failed:", err.Error())
log.Fatal(err)
return err
}
s.client = nc
return nil
}
func (s *NatsSink) Write(m lp.CCMetric) error {
func (s *NatsSink) Init(config sinkConfig) error {
s.name = "NatsSink"
if len(config.Host) == 0 ||
len(config.Port) == 0 ||
len(config.Database) == 0 {
return errors.New("Not all configuration variables set required by NatsSink")
}
s.host = config.Host
s.port = config.Port
s.database = config.Database
s.organization = config.Organization
s.user = config.User
s.password = config.Password
// Setup Influx line protocol
s.buffer = &bytes.Buffer{}
s.buffer.Grow(1025)
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
s.encoder.SetMaxLineBytes(1024)
// Setup infos for connection
return s.connect()
}
func (s *NatsSink) Write(point lp.CCMetric) error {
if s.client != nil {
_, err := s.encoder.Encode(m.ToPoint(s.meta_as_tags))
_, err := s.encoder.Encode(point)
if err != nil {
cclog.ComponentError(s.name, "Write:", err.Error())
log.Print(err)
return err
}
}
@@ -66,8 +68,7 @@ func (s *NatsSink) Write(m lp.CCMetric) error {
func (s *NatsSink) Flush() error {
if s.client != nil {
if err := s.client.Publish(s.config.Database, s.buffer.Bytes()); err != nil {
cclog.ComponentError(s.name, "Flush:", err.Error())
if err := s.client.Publish(s.database, s.buffer.Bytes()); err != nil {
return err
}
s.buffer.Reset()
@@ -76,41 +77,8 @@ func (s *NatsSink) Flush() error {
}
func (s *NatsSink) Close() {
log.Print("Closing Nats connection")
if s.client != nil {
cclog.ComponentDebug(s.name, "Close")
s.client.Close()
}
}
func NewNatsSink(name string, config json.RawMessage) (Sink, error) {
s := new(NatsSink)
s.name = fmt.Sprintf("NatsSink(%s)", name)
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
if len(s.config.Host) == 0 ||
len(s.config.Port) == 0 ||
len(s.config.Database) == 0 {
return nil, errors.New("not all configuration variables set required by NatsSink")
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
// Setup Influx line protocol
s.buffer = &bytes.Buffer{}
s.buffer.Grow(1025)
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
s.encoder.SetMaxLineBytes(1024)
// Setup infos for connection
if err := s.connect(); err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
return s, nil
}

View File

@@ -1,28 +0,0 @@
## `nats` sink
The `nats` sink publishes all metrics into a NATS network. The publishing key is the database name provided in the configuration file
### Configuration structure
```json
{
"<name>": {
"type": "nats",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw"
}
}
```
- `type`: makes the sink an `nats` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are published with this subject
- `host`: Hostname of the NATS server
- `port`: Portnumber (as string) of the NATS server
- `user`: Username for basic authentification
- `password`: Password for basic authentification

View File

@@ -1,199 +0,0 @@
package sinks
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"sync"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type PrometheusSinkConfig struct {
defaultSinkConfig
Host string `json:"host,omitempty"`
Port string `json:"port"`
Path string `json:"path,omitempty"`
GroupAsNameSpace bool `json:"group_as_namespace,omitempty"`
// User string `json:"user,omitempty"`
// Password string `json:"password,omitempty"`
// FlushDelay string `json:"flush_delay,omitempty"`
}
type PrometheusSink struct {
sink
config PrometheusSinkConfig
labelMetrics map[string]*prometheus.GaugeVec
nodeMetrics map[string]prometheus.Gauge
promWg sync.WaitGroup
promServer *http.Server
}
func intToFloat64(input interface{}) (float64, error) {
switch value := input.(type) {
case float64:
return value, nil
case float32:
return float64(value), nil
case int:
return float64(value), nil
case int32:
return float64(value), nil
case int64:
return float64(value), nil
}
return 0, errors.New("cannot cast value to float64")
}
func getLabelValue(metric lp.CCMetric) []string {
labelValues := []string{}
if tid, tidok := metric.GetTag("type-id"); tidok && metric.HasTag("type") {
labelValues = append(labelValues, tid)
}
if d, ok := metric.GetTag("device"); ok {
labelValues = append(labelValues, d)
} else if d, ok := metric.GetMeta("device"); ok {
labelValues = append(labelValues, d)
}
return labelValues
}
func getLabelNames(metric lp.CCMetric) []string {
labelNames := []string{}
if t, tok := metric.GetTag("type"); tok && metric.HasTag("type-id") {
labelNames = append(labelNames, t)
}
if _, ok := metric.GetTag("device"); ok {
labelNames = append(labelNames, "device")
} else if _, ok := metric.GetMeta("device"); ok {
labelNames = append(labelNames, "device")
}
return labelNames
}
func (s *PrometheusSink) newMetric(metric lp.CCMetric) error {
var value float64 = 0
name := metric.Name()
opts := prometheus.GaugeOpts{
Name: name,
}
labels := getLabelNames(metric)
labelValues := getLabelValue(metric)
if len(labels) > 0 && len(labels) != len(labelValues) {
return fmt.Errorf("cannot detect metric labels for metric %s", name)
}
if metricValue, ok := metric.GetField("value"); ok {
if floatValue, err := intToFloat64(metricValue); err == nil {
value = floatValue
} else {
return fmt.Errorf("metric %s with value '%v' cannot be casted to float64", name, metricValue)
}
}
if s.config.GroupAsNameSpace && metric.HasMeta("group") {
g, _ := metric.GetMeta("group")
opts.Namespace = strings.ToLower(g)
}
if len(labels) > 0 {
new := prometheus.NewGaugeVec(opts, labels)
new.WithLabelValues(labelValues...).Set(value)
s.labelMetrics[name] = new
prometheus.Register(new)
} else {
new := prometheus.NewGauge(opts)
new.Set(value)
s.nodeMetrics[name] = new
prometheus.Register(new)
}
return nil
}
func (s *PrometheusSink) updateMetric(metric lp.CCMetric) error {
var value float64 = 0.0
name := metric.Name()
labelValues := getLabelValue(metric)
if metricValue, ok := metric.GetField("value"); ok {
if floatValue, err := intToFloat64(metricValue); err == nil {
value = floatValue
} else {
return fmt.Errorf("metric %s with value '%v' cannot be casted to float64", name, metricValue)
}
}
if len(labelValues) > 0 {
if _, ok := s.labelMetrics[name]; !ok {
err := s.newMetric(metric)
if err != nil {
return err
}
}
s.labelMetrics[name].WithLabelValues(labelValues...).Set(value)
} else {
if _, ok := s.labelMetrics[name]; !ok {
err := s.newMetric(metric)
if err != nil {
return err
}
}
s.nodeMetrics[name].Set(value)
}
return nil
}
func (s *PrometheusSink) Write(m lp.CCMetric) error {
return s.updateMetric(m)
}
func (s *PrometheusSink) Flush() error {
return nil
}
func (s *PrometheusSink) Close() {
cclog.ComponentDebug(s.name, "CLOSE")
s.promServer.Shutdown(context.Background())
s.promWg.Wait()
}
func NewPrometheusSink(name string, config json.RawMessage) (Sink, error) {
s := new(PrometheusSink)
s.name = "PrometheusSink"
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
if len(s.config.Port) == 0 {
err := errors.New("not all configuration variables set required by PrometheusSink")
cclog.ComponentError(s.name, err.Error())
return nil, err
}
s.labelMetrics = make(map[string]*prometheus.GaugeVec)
s.nodeMetrics = make(map[string]prometheus.Gauge)
s.promWg.Add(1)
go func() {
router := mux.NewRouter()
// Prometheus endpoint
router.Path("/" + s.config.Path).Handler(promhttp.Handler())
url := fmt.Sprintf("%s:%s", s.config.Host, s.config.Port)
cclog.ComponentDebug(s.name, "Serving Prometheus metrics at", fmt.Sprintf("%s:%s/%s", s.config.Host, s.config.Port, s.config.Path))
s.promServer = &http.Server{Addr: url, Handler: router}
err := s.promServer.ListenAndServe()
if err != nil && err.Error() != "http: Server closed" {
cclog.ComponentError(s.name, err.Error())
}
s.promWg.Done()
}()
return s, nil
}

View File

@@ -1,23 +0,0 @@
## `prometheus` sink
The `prometheus` sink publishes all metrics via an HTTP server ready to be scraped by a [Prometheus](https://prometheus.io) server. It creates gauge metrics for all node metrics and gauge vectors for all metrics with a subtype like 'device', 'cpu' or 'socket'.
### Configuration structure
```json
{
"<name>": {
"type": "prometheus",
"host": "localhost",
"port": "8080",
"path": "metrics"
}
}
```
- `type`: makes the sink an `prometheus` sink
- `host`: The HTTP server gets bound to that IP/hostname
- `port`: Portnumber (as string) for the HTTP server
- `path`: Path where the metrics should be servered. The metrics will be published at `host`:`port`/`path`
- `group_as_namespace`: Most metrics contain a group as meta information like 'memory', 'load'. With this the metric names are extended to `group`_`name` if possible.

View File

@@ -1,80 +0,0 @@
package sinks
import (
"encoding/json"
"fmt"
"log"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
type SampleSinkConfig struct {
// defines JSON tags for 'type' and 'meta_as_tags' (string list)
// See: metricSink.go
defaultSinkConfig
// Additional config options, for SampleSink
}
type SampleSink struct {
// declares elements 'name' and 'meta_as_tags' (string to bool map!)
sink
config SampleSinkConfig // entry point to the SampleSinkConfig
}
// Implement functions required for Sink interface
// Write(...), Flush(), Close()
// See: metricSink.go
// Code to submit a single CCMetric to the sink
func (s *SampleSink) Write(point lp.CCMetric) error {
// based on s.meta_as_tags use meta infos as tags
log.Print(point)
return nil
}
// If the sink uses batched sends internally, you can tell to flush its buffers
func (s *SampleSink) Flush() error {
return nil
}
// Close sink: close network connection, close files, close libraries, ...
func (s *SampleSink) Close() {
cclog.ComponentDebug(s.name, "CLOSE")
}
// New function to create a new instance of the sink
// Initialize the sink by giving it a name and reading in the config JSON
func NewSampleSink(name string, config json.RawMessage) (Sink, error) {
s := new(SampleSink)
// Set name of sampleSink
// The name should be chosen in such a way that different instances of SampleSink can be distinguished
s.name = fmt.Sprintf("SampleSink(%s)", name) // Always specify a name here
// Set defaults in s.config
// Allow overwriting these defaults by reading config JSON
// Read in the config JSON
if len(config) > 0 {
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.meta_as_tags[k] = true
}
// Check if all required fields in the config are set
// E.g. use 'len(s.config.Option) > 0' for string settings
// Establish connection to the server, library, ...
// Check required files exist and lookup path(s) of executable(s)
// Return (nil, meaningful error message) in case of errors
return s, nil
}

View File

@@ -2,7 +2,6 @@ package sinks
import (
"encoding/json"
"fmt"
"os"
"sync"
@@ -10,90 +9,76 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
const SINK_MAX_FORWARD = 50
// Map of all available sinks
var AvailableSinks = map[string]func(name string, config json.RawMessage) (Sink, error){
"ganglia": NewGangliaSink,
"libganglia": NewLibgangliaSink,
"stdout": NewStdoutSink,
"nats": NewNatsSink,
"influxdb": NewInfluxSink,
"influxasync": NewInfluxAsyncSink,
"http": NewHttpSink,
var AvailableSinks = map[string]Sink{
"influxdb": new(InfluxSink),
"stdout": new(StdoutSink),
"nats": new(NatsSink),
"http": new(HttpSink),
"ganglia": new(GangliaSink),
}
// Metric collector manager data structure
type sinkManager struct {
input chan lp.CCMetric // input channel
outputs []Sink // List of sinks to use
done chan bool // channel to finish / stop metric sink manager
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
sinks map[string]Sink // Mapping sink name to sink
maxForward int // number of metrics to write maximally in one iteration
config []sinkConfig // json encoded config for sink manager
}
// Sink manager access functions
type SinkManager interface {
Init(wg *sync.WaitGroup, sinkConfigFile string) error
AddInput(input chan lp.CCMetric)
AddOutput(name string, config json.RawMessage) error
AddOutput(config json.RawMessage) error
Start()
Close()
}
// Init initializes the sink manager by:
// * Reading its configuration file
// * Adding the configured sinks and providing them with the corresponding config
func (sm *sinkManager) Init(wg *sync.WaitGroup, sinkConfigFile string) error {
sm.input = nil
sm.outputs = make([]Sink, 0)
sm.done = make(chan bool)
sm.wg = wg
sm.sinks = make(map[string]Sink, 0)
sm.maxForward = SINK_MAX_FORWARD
if len(sinkConfigFile) == 0 {
return nil
}
sm.config = make([]sinkConfig, 0)
// Read sink config file
if len(sinkConfigFile) > 0 {
configFile, err := os.Open(sinkConfigFile)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
return err
}
defer configFile.Close()
// Parse config
jsonParser := json.NewDecoder(configFile)
var rawConfigs map[string]json.RawMessage
var rawConfigs []json.RawMessage
err = jsonParser.Decode(&rawConfigs)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
return err
}
// Start sinks
for name, raw := range rawConfigs {
err = sm.AddOutput(name, raw)
for _, raw := range rawConfigs {
err = sm.AddOutput(raw)
if err != nil {
cclog.ComponentError("SinkManager", err.Error())
continue
}
}
}
return nil
}
// Start starts the sink managers background task, which
// distributes received metrics to the sinks
func (sm *sinkManager) Start() {
batchcount := 20
sm.wg.Add(1)
go func() {
defer sm.wg.Done()
// Sink manager is done
done := func() {
for _, s := range sm.sinks {
for _, s := range sm.outputs {
s.Flush()
s.Close()
}
@@ -101,16 +86,6 @@ func (sm *sinkManager) Start() {
cclog.ComponentDebug("SinkManager", "DONE")
}
toTheSinks := func(p lp.CCMetric) {
// Send received metric to all outputs
cclog.ComponentDebug("SinkManager", "WRITE", p)
for _, s := range sm.sinks {
if err := s.Write(p); err != nil {
cclog.ComponentError("SinkManager", "WRITE", s.Name(), "write failed:", err.Error())
}
}
}
for {
select {
case <-sm.done:
@@ -118,11 +93,21 @@ func (sm *sinkManager) Start() {
return
case p := <-sm.input:
toTheSinks(p)
for i := 0; len(sm.input) > 0 && i < sm.maxForward; i++ {
p := <-sm.input
toTheSinks(p)
// Send received metric to all outputs
cclog.ComponentDebug("SinkManager", "WRITE", p)
for _, s := range sm.outputs {
s.Write(p)
}
// Flush all outputs
if batchcount == 0 {
cclog.ComponentDebug("SinkManager", "FLUSH")
for _, s := range sm.outputs {
s.Flush()
}
batchcount = 20
}
batchcount--
}
}
}()
@@ -136,26 +121,29 @@ func (sm *sinkManager) AddInput(input chan lp.CCMetric) {
sm.input = input
}
func (sm *sinkManager) AddOutput(name string, rawConfig json.RawMessage) error {
func (sm *sinkManager) AddOutput(rawConfig json.RawMessage) error {
var err error
var sinkConfig defaultSinkConfig
if len(rawConfig) > 0 {
err := json.Unmarshal(rawConfig, &sinkConfig)
var config sinkConfig
if len(rawConfig) > 3 {
err = json.Unmarshal(rawConfig, &config)
if err != nil {
cclog.ComponentError("SinkManager", "SKIP", config.Type, "JSON config error:", err.Error())
return err
}
}
if _, found := AvailableSinks[sinkConfig.Type]; !found {
cclog.ComponentError("SinkManager", "SKIP", name, "unknown sink:", sinkConfig.Type)
if _, found := AvailableSinks[config.Type]; !found {
cclog.ComponentError("SinkManager", "SKIP", config.Type, "unknown sink:", err.Error())
return err
}
s, err := AvailableSinks[sinkConfig.Type](name, rawConfig)
s := AvailableSinks[config.Type]
err = s.Init(config)
if err != nil {
cclog.ComponentError("SinkManager", "SKIP", s.Name(), "initialization failed:", err.Error())
return err
}
sm.sinks[name] = s
cclog.ComponentDebug("SinkManager", "ADD SINK", s.Name(), "with name", fmt.Sprintf("'%s'", name))
sm.outputs = append(sm.outputs, s)
sm.config = append(sm.config, config)
cclog.ComponentDebug("SinkManager", "ADD SINK", s.Name())
return nil
}
@@ -169,7 +157,7 @@ func (sm *sinkManager) Close() {
// New creates a new initialized sink manager
func New(wg *sync.WaitGroup, sinkConfigFile string) (SinkManager, error) {
sm := new(sinkManager)
sm := &sinkManager{}
err := sm.Init(wg, sinkConfigFile)
if err != nil {
return nil, err

Some files were not shown because too many files have changed in this diff Show More