Compare commits

..

5 Commits

Author SHA1 Message Date
Thomas Roehl
9dd6ff1a76 Add StatsAPI to README 2022-04-02 16:07:13 +02:00
Thomas Roehl
257b4a64b5 Add missing main API file 2022-04-02 16:06:51 +02:00
Thomas Roehl
5eeb097136 Add stats counters to sinks 2022-04-02 16:06:03 +02:00
Thomas Roehl
4a4992877c Add stats counters to collectors 2022-04-02 16:05:52 +02:00
Thomas Roehl
9447685a69 Add StatsApi. Started if a configuration file is set in global config.json 2022-04-02 16:05:27 +02:00
126 changed files with 3628 additions and 11698 deletions

View File

@@ -3,6 +3,6 @@
"collectors" : ".github/ci-collectors.json", "collectors" : ".github/ci-collectors.json",
"receivers" : ".github/ci-receivers.json", "receivers" : ".github/ci-receivers.json",
"router" : ".github/ci-router.json", "router" : ".github/ci-router.json",
"interval": "5s", "interval": 5,
"duration": "1s" "duration": 1
} }

View File

@@ -1,8 +1,6 @@
{ {
"testoutput" : { "testoutput" : {
"type" : "stdout", "type" : "stdout",
"meta_as_tags" : [ "meta_as_tags" : true
"unit"
]
} }
} }

View File

@@ -8,17 +8,16 @@ on:
push: push:
tags: tags:
- '**' - '**'
workflow_dispatch:
jobs: jobs:
# #
# Build on AlmaLinux 8 using go-toolset # Build on AlmaLinux 8.5 using go-toolset
# #
AlmaLinux8-RPM-build: AlmaLinux-RPM-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux # See: https://hub.docker.com/_/almalinux
container: almalinux:8 container: almalinux:8.5
# The job outputs link to the outputs of the 'rpmrename' step # The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs # Only job outputs can be used in child jobs
outputs: outputs:
@@ -28,154 +27,61 @@ jobs:
# Use dnf to install development packages # Use dnf to install development packages
- name: Install development packages - name: Install development packages
run: | run: dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules # Checkout git repository and submodules
# fetch-depth must be 0 to use git describe # fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout # See: https://github.com/marketplace/actions/checkout
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
submodules: recursive submodules: recursive
fetch-depth: 0 fetch-depth: 0
# - name: Setup Golang # Use dnf to install build dependencies
# uses: actions/setup-go@v5 - name: Install build dependencies
# with: run: dnf --assumeyes builddep scripts/cc-metric-collector.spec
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector - name: RPM build MetricCollector
id: rpmbuild id: rpmbuild
run: | run: make RPM
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM # AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# AlmaLinux 8 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el8' in the RPM file names # so the created RPM both contain the substring 'el8' in the RPM file names
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation # This step replaces the substring 'el8' to 'alma85'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8 container contains the # because it is unclear whether the default AlmaLinux 8.5 container contains the
# 'rename' command. This way we also get the new names for output. # 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el8/alma8/) - name: Rename RPMs (s/el8/alma85/)
id: rpmrename id: rpmrename
run: | run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}" OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}" OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el8/alma8}" NEW_RPM="${OLD_RPM/el8/alma85}"
NEW_SRPM=${OLD_SRPM/el8/alma8} NEW_SRPM=${OLD_SRPM/el8/alma85}
mv "${OLD_RPM}" "${NEW_RPM}" mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}" mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT echo "::set-output name=SRPM::${NEW_SRPM}"
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT echo "::set-output name=RPM::${NEW_RPM}"
# See: https://github.com/actions/upload-artifact # See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact - name: Save RPM as artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: cc-metric-collector RPM for AlmaLinux 8 name: cc-metric-collector RPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.RPM }} path: ${{ steps.rpmrename.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact - name: Save SRPM as artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: cc-metric-collector SRPM for AlmaLinux 8 name: cc-metric-collector SRPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.SRPM }} path: ${{ steps.rpmrename.outputs.SRPM }}
overwrite: true
#
# Build on AlmaLinux 9 using go-toolset
#
AlmaLinux9-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:9
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
outputs:
rpm : ${{steps.rpmrename.outputs.RPM}}
srpm : ${{steps.rpmrename.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
# AlmaLinux 9 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el9' in the RPM file names
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8 container contains the
# 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el9/alma9/)
id: rpmrename
run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el9/alma9}"
NEW_SRPM=${OLD_SRPM/el9/alma9}
mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector RPM for AlmaLinux 9
path: ${{ steps.rpmrename.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector SRPM for AlmaLinux 9
path: ${{ steps.rpmrename.outputs.SRPM }}
overwrite: true
# #
# Build on UBI 8 using go-toolset # Build on UBI 8 using go-toolset
# #
UBI-8-RPM-build: UBI-8-RPM-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c35984d70cc534b3a3784e?container-tabs=gti # See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: registry.access.redhat.com/ubi8/ubi:8.8-1032.1692772289 container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
# The job outputs link to the outputs of the 'rpmbuild' step # The job outputs link to the outputs of the 'rpmbuild' step
outputs: outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}} rpm : ${{steps.rpmbuild.outputs.RPM}}
@@ -184,210 +90,36 @@ jobs:
# Use dnf to install development packages # Use dnf to install development packages
- name: Install development packages - name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git
# Checkout git repository and submodules # Checkout git repository and submodules
# fetch-depth must be 0 to use git describe # fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout # See: https://github.com/marketplace/actions/checkout
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
submodules: recursive submodules: recursive
fetch-depth: 0 fetch-depth: 0
# - name: Setup Golang # Use dnf to install build dependencies
# uses: actions/setup-go@v5 - name: Install build dependencies
# with: run: dnf --assumeyes --disableplugin=subscription-manager builddep scripts/cc-metric-collector.spec
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector - name: RPM build MetricCollector
id: rpmbuild id: rpmbuild
run: | run: make RPM
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
# See: https://github.com/actions/upload-artifact # See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact - name: Save RPM as artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: cc-metric-collector RPM for UBI 8 name: cc-metric-collector RPM for UBI 8
path: ${{ steps.rpmbuild.outputs.RPM }} path: ${{ steps.rpmbuild.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact - name: Save SRPM as artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v2
with: with:
name: cc-metric-collector SRPM for UBI 8 name: cc-metric-collector SRPM for UBI 8
path: ${{ steps.rpmbuild.outputs.SRPM }} path: ${{ steps.rpmbuild.outputs.SRPM }}
overwrite: true
#
# Build on UBI 9 using go-toolset
#
UBI-9-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi9
# The job outputs link to the outputs of the 'rpmbuild' step
# The job outputs link to the outputs of the 'rpmbuild' step
outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}}
srpm : ${{steps.rpmbuild.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector RPM for UBI 9
path: ${{ steps.rpmbuild.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector SRPM for UBI 9
path: ${{ steps.rpmbuild.outputs.SRPM }}
overwrite: true
#
# Build on Ubuntu 22.04 using official go package
#
Ubuntu-jammy-build:
runs-on: ubuntu-latest
container: ubuntu:22.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
deb : ${{steps.debrename.outputs.DEB}}
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make DEB
- name: Rename DEB (add '_ubuntu22.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 22.04
path: ${{ steps.debrename.outputs.DEB }}
overwrite: true
#
# Build on Ubuntu 24.04 using official go package
#
Ubuntu-noblenumbat-build:
runs-on: ubuntu-latest
container: ubuntu:24.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
deb : ${{steps.debrename.outputs.DEB}}
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make DEB
- name: Rename DEB (add '_ubuntu24.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu24.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 24.04
path: ${{ steps.debrename.outputs.DEB }}
overwrite: true
# #
# Create release with fresh RPMs # Create release with fresh RPMs
@@ -395,56 +127,28 @@ jobs:
Release: Release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# We need the RPMs, so add dependency # We need the RPMs, so add dependency
needs: [AlmaLinux8-RPM-build, AlmaLinux9-RPM-build, UBI-8-RPM-build, UBI-9-RPM-build, Ubuntu-jammy-build, Ubuntu-noblenumbat-build] needs: [AlmaLinux-RPM-build, UBI-8-RPM-build]
steps: steps:
# See: https://github.com/actions/download-artifact # See: https://github.com/actions/download-artifact
- name: Download AlmaLinux 8 RPM - name: Download AlmaLinux 8.5 RPM
uses: actions/download-artifact@v4 uses: actions/download-artifact@v2
with: with:
name: cc-metric-collector RPM for AlmaLinux 8 name: cc-metric-collector RPM for AlmaLinux 8.5
- name: Download AlmaLinux 8 SRPM - name: Download AlmaLinux 8.5 SRPM
uses: actions/download-artifact@v4 uses: actions/download-artifact@v2
with: with:
name: cc-metric-collector SRPM for AlmaLinux 8 name: cc-metric-collector SRPM for AlmaLinux 8.5
- name: Download AlmaLinux 9 RPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector RPM for AlmaLinux 9
- name: Download AlmaLinux 9 SRPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector SRPM for AlmaLinux 9
- name: Download UBI 8 RPM - name: Download UBI 8 RPM
uses: actions/download-artifact@v4 uses: actions/download-artifact@v2
with: with:
name: cc-metric-collector RPM for UBI 8 name: cc-metric-collector RPM for UBI 8
- name: Download UBI 8 SRPM - name: Download UBI 8 SRPM
uses: actions/download-artifact@v4 uses: actions/download-artifact@v2
with: with:
name: cc-metric-collector SRPM for UBI 8 name: cc-metric-collector SRPM for UBI 8
- name: Download UBI 9 RPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector RPM for UBI 9
- name: Download UBI 9 SRPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector SRPM for UBI 9
- name: Download Ubuntu 22.04 DEB
uses: actions/download-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 22.04
- name: Download Ubuntu 24.04 DEB
uses: actions/download-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 24.04
# The download actions do not publish the name of the downloaded file, # The download actions do not publish the name of the downloaded file,
# so we re-use the job outputs of the parent jobs. The files are all # so we re-use the job outputs of the parent jobs. The files are all
# downloaded to the current folder. # downloaded to the current folder.
@@ -454,51 +158,27 @@ jobs:
- name: Set RPM variables - name: Set RPM variables
id: files id: files
run: | run: |
ALMA_8_RPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.rpm}}") ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
ALMA_8_SRPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.srpm}}") ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
ALMA_9_RPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.rpm}}")
ALMA_9_SRPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.srpm}}")
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}") UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}") UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
UBI_9_RPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.rpm}}") echo "ALMA_85_RPM::${ALMA_85_RPM}"
UBI_9_SRPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.srpm}}") echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
U_2404_DEB=$(basename "${{ needs.Ubuntu-noblenumbat-build.outputs.deb}}")
echo "ALMA_8_RPM::${ALMA_8_RPM}"
echo "ALMA_8_SRPM::${ALMA_8_SRPM}"
echo "ALMA_9_RPM::${ALMA_9_RPM}"
echo "ALMA_9_SRPM::${ALMA_9_SRPM}"
echo "UBI_8_RPM::${UBI_8_RPM}" echo "UBI_8_RPM::${UBI_8_RPM}"
echo "UBI_8_SRPM::${UBI_8_SRPM}" echo "UBI_8_SRPM::${UBI_8_SRPM}"
echo "UBI_9_RPM::${UBI_9_RPM}" echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
echo "UBI_9_SRPM::${UBI_9_SRPM}" echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "U_2204_DEB::${U_2204_DEB}" echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
echo "U_2404_DEB::${U_2404_DEB}" echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
echo "ALMA_8_RPM=${ALMA_8_RPM}" >> $GITHUB_OUTPUT
echo "ALMA_8_SRPM=${ALMA_8_SRPM}" >> $GITHUB_OUTPUT
echo "ALMA_9_RPM=${ALMA_9_RPM}" >> $GITHUB_OUTPUT
echo "ALMA_9_SRPM=${ALMA_9_SRPM}" >> $GITHUB_OUTPUT
echo "UBI_8_RPM=${UBI_8_RPM}" >> $GITHUB_OUTPUT
echo "UBI_8_SRPM=${UBI_8_SRPM}" >> $GITHUB_OUTPUT
echo "UBI_9_RPM=${UBI_9_RPM}" >> $GITHUB_OUTPUT
echo "UBI_9_SRPM=${UBI_9_SRPM}" >> $GITHUB_OUTPUT
echo "U_2204_DEB=${U_2204_DEB}" >> $GITHUB_OUTPUT
echo "U_2404_DEB=${U_2404_DEB}" >> $GITHUB_OUTPUT
# See: https://github.com/softprops/action-gh-release # See: https://github.com/softprops/action-gh-release
- name: Release - name: Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
with: with:
name: cc-metric-collector-${{github.ref_name}} name: cc-metric-collector-${{github.ref_name}}
files: | files: |
${{ steps.files.outputs.ALMA_8_RPM }} ${{ steps.files.outputs.ALMA_85_RPM }}
${{ steps.files.outputs.ALMA_8_SRPM }} ${{ steps.files.outputs.ALMA_85_SRPM }}
${{ steps.files.outputs.ALMA_9_RPM }}
${{ steps.files.outputs.ALMA_9_SRPM }}
${{ steps.files.outputs.UBI_8_RPM }} ${{ steps.files.outputs.UBI_8_RPM }}
${{ steps.files.outputs.UBI_8_SRPM }} ${{ steps.files.outputs.UBI_8_SRPM }}
${{ steps.files.outputs.UBI_9_RPM }}
${{ steps.files.outputs.UBI_9_SRPM }}
${{ steps.files.outputs.U_2204_DEB }}
${{ steps.files.outputs.U_2404_DEB }}

View File

@@ -4,31 +4,32 @@
name: Run Test name: Run Test
# Run on event push # Run on event push
on: on: push
push:
workflow_dispatch:
jobs: jobs:
# #
# Job build-1-21 # Job build-1-17
# Build on latest Ubuntu using golang version 1.21 # Build on latest Ubuntu using golang version 1.17
# #
build-1-21: build-1-17:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
# See: https://github.com/marketplace/actions/checkout # See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules # Checkout git repository and submodules
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
submodules: recursive submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment # See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang - name: Setup Golang
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: '1.21' go-version: '^1.17.7'
# Install libganglia
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
- name: Build MetricCollector - name: Build MetricCollector
run: make run: make
@@ -37,298 +38,31 @@ jobs:
run: ./cc-metric-collector --once --config .github/ci-config.json run: ./cc-metric-collector --once --config .github/ci-config.json
# #
# Job build-1-22 # Job build-1-16
# Build on latest Ubuntu using golang version 1.22 # Build on latest Ubuntu using golang version 1.16
# #
build-1-22: build-1-16:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
# See: https://github.com/marketplace/actions/checkout # See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules # Checkout git repository and submodules
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
submodules: recursive submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment # See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang - name: Setup Golang
uses: actions/setup-go@v5 uses: actions/setup-go@v2
with: with:
go-version: '1.22' go-version: '^1.16.7' # The version AlmaLinux 8.5 uses
# Install libganglia
- name: Setup Ganglia
run: sudo apt install ganglia-monitor libganglia1
- name: Build MetricCollector - name: Build MetricCollector
run: make run: make
- name: Run MetricCollector once - name: Run MetricCollector once
run: ./cc-metric-collector --once --config .github/ci-config.json run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Job build-1-23
# Build on latest Ubuntu using golang version 1.23
#
build-1-23:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: '1.23'
- name: Build MetricCollector
run: make
- name: Run MetricCollector once
run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Build on AlmaLinux 8
#
AlmaLinux8-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:8
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on AlmaLinux 9
#
AlmaLinux9-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:9
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on UBI 8 using go-toolset
#
UBI-8-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi8
# The job outputs link to the outputs of the 'rpmbuild' step
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on UBI 9 using go-toolset
#
UBI-9-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi9
# The job outputs link to the outputs of the 'rpmbuild' step
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on Ubuntu 22.04 using official go package
#
Ubuntu-jammy-build:
runs-on: ubuntu-latest
container: ubuntu:22.04
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
make DEB
#
# Build on Ubuntu 24.04 using official go package
#
Ubuntu-noblenumbat-build:
runs-on: ubuntu-latest
container: ubuntu:24.04
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
make DEB

View File

@@ -1,29 +0,0 @@
{
"title": "cc-metric-collector",
"description": "Monitoring agent for ClusterCockpit.",
"creators": [
{
"affiliation": "Regionales Rechenzentrum Erlangen, Friedrich-Alexander-Universität Erlangen-Nürnberg",
"name": "Thomas Gruber",
"orcid": "0000-0001-5560-6964"
},
{
"affiliation": "Steinbuch Centre for Computing, Karlsruher Institut für Technologie",
"name": "Holger Obermaier",
"orcid": "0000-0002-6830-6626"
}
],
"upload_type": "software",
"license": "MIT",
"access_right": "open",
"keywords": [
"performance-monitoring",
"cluster-monitoring",
"open-source"
],
"communities": [
{
"identifier": "clustercockpit"
}
]
}

View File

@@ -16,16 +16,15 @@ COMPONENT_DIRS := collectors \
internal/multiChanTicker internal/multiChanTicker
BINDIR = bin BINDIR = bin
GOBIN = $(shell which go)
.PHONY: all .PHONY: all
all: $(APP) all: $(APP)
$(APP): $(GOSRC) go.mod $(APP): $(GOSRC)
make -C collectors make -C collectors
$(GOBIN) get go get
$(GOBIN) build -o $(APP) $(GOSRC_APP) go build -o $(APP) $(GOSRC_APP)
install: $(APP) install: $(APP)
@WORKSPACE=$(PREFIX) @WORKSPACE=$(PREFIX)
@@ -52,25 +51,25 @@ clean:
.PHONY: fmt .PHONY: fmt
fmt: fmt:
$(GOBIN) fmt $(GOSRC_COLLECTORS) go fmt $(GOSRC_COLLECTORS)
$(GOBIN) fmt $(GOSRC_SINKS) go fmt $(GOSRC_SINKS)
$(GOBIN) fmt $(GOSRC_RECEIVERS) go fmt $(GOSRC_RECEIVERS)
$(GOBIN) fmt $(GOSRC_APP) go fmt $(GOSRC_APP)
@for F in $(GOSRC_INTERNAL); do $(GOBIN) fmt $$F; done @for F in $(GOSRC_INTERNAL); do go fmt $$F; done
# Examine Go source code and reports suspicious constructs # Examine Go source code and reports suspicious constructs
.PHONY: vet .PHONY: vet
vet: vet:
$(GOBIN) vet ./... go vet ./...
# Run linter for the Go programming language. # Run linter for the Go programming language.
# Using static analysis, it finds bugs and performance issues, offers simplifications, and enforces style rules # Using static analysis, it finds bugs and performance issues, offers simplifications, and enforces style rules
.PHONY: staticcheck .PHONY: staticcheck
staticcheck: staticcheck:
$(GOBIN) install honnef.co/go/tools/cmd/staticcheck@latest go install honnef.co/go/tools/cmd/staticcheck@latest
$$($(GOBIN) env GOPATH)/bin/staticcheck ./... $$(go env GOPATH)/bin/staticcheck ./...
.ONESHELL: .ONESHELL:
.PHONY: RPM .PHONY: RPM
@@ -84,7 +83,7 @@ RPM: scripts/cc-metric-collector.spec
@COMMITISH="HEAD" @COMMITISH="HEAD"
@VERS=$$(git describe --tags $${COMMITISH}) @VERS=$$(git describe --tags $${COMMITISH})
@VERS=$${VERS#v} @VERS=$${VERS#v}
@VERS=$$(echo $${VERS} | sed -e s+'-'+'_'+g) @VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}") @eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}")
@PREFIX="$${NAME}-$${VERSION}" @PREFIX="$${NAME}-$${VERSION}"
@FORMAT="tar.gz" @FORMAT="tar.gz"
@@ -96,8 +95,10 @@ RPM: scripts/cc-metric-collector.spec
@if [[ "$${GITHUB_ACTIONS}" == true ]]; then @if [[ "$${GITHUB_ACTIONS}" == true ]]; then
@ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm" @ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm"
@ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm" @ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm"
@ echo "SRPM=$${SRPMFILE}" >> $${GITHUB_OUTPUT} @ echo "RPM: $${RPMFILE}"
@ echo "RPM=$${RPMFILE}" >> $${GITHUB_OUTPUT} @ echo "SRPM: $${SRPMFILE}"
@ echo "::set-output name=SRPM::$${SRPMFILE}"
@ echo "::set-output name=RPM::$${RPMFILE}"
@fi @fi
.PHONY: DEB .PHONY: DEB
@@ -106,24 +107,21 @@ DEB: scripts/cc-metric-collector.deb.control $(APP)
@WORKSPACE=$${PWD}/.dpkgbuild @WORKSPACE=$${PWD}/.dpkgbuild
@DEBIANDIR=$${WORKSPACE}/debian @DEBIANDIR=$${WORKSPACE}/debian
@DEBIANBINDIR=$${WORKSPACE}/DEBIAN @DEBIANBINDIR=$${WORKSPACE}/DEBIAN
@mkdir --parents --verbose $${WORKSPACE} $${DEBIANBINDIR} @mkdir --parents --verbose $$WORKSPACE $$DEBIANBINDIR
#@mkdir --parents --verbose $$DEBIANDIR #@mkdir --parents --verbose $$DEBIANDIR
@CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control" @CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control"
@COMMITISH="HEAD" @COMMITISH="HEAD"
@VERS=$$(git describe --tags --abbrev=0 $${COMMITISH}) @VERS=$$(git describe --tags --abbrev=0 $${COMMITISH})
@if [ -z "$${VERS}" ]; then VERS=${GITHUB_REF_NAME}; fi
@VERS=$${VERS#v} @VERS=$${VERS#v}
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@ARCH=$$(uname -m) @ARCH=$$(uname -m)
@ARCH=$$(echo $${ARCH} | sed -e s+'_'+'-'+g) @ARCH=$$(echo $$ARCH | sed -e s+'_'+'-'+g)
@if [ "$${ARCH}" = "x86-64" ]; then ARCH=amd64; fi
@PREFIX="$${NAME}-$${VERSION}_$${ARCH}" @PREFIX="$${NAME}-$${VERSION}_$${ARCH}"
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$${WORKSPACE}"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//') @SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$$WORKSPACE"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
@SIZE="$$(awk -v size="$${SIZE_BYTES}" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')" @SIZE="$$(awk -v size="$$SIZE_BYTES" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
@sed -e s+"{VERSION}"+"$${VERS}"+g -e s+"{INSTALLED_SIZE}"+"$${SIZE}"+g -e s+"{ARCH}"+"$${ARCH}"+g $${CONTROLFILE} > $${DEBIANBINDIR}/control #@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANDIR}/control
@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANBINDIR}/control
@make PREFIX=$${WORKSPACE} install @make PREFIX=$${WORKSPACE} install
@DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb" @DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb"
@dpkg-deb -b $${WORKSPACE} "$${DEB_FILE}" @dpkg-deb -b $${WORKSPACE} "$$DEB_FILE"
@if [ "$${GITHUB_ACTIONS}" = "true" ]; then
@ echo "DEB=$${DEB_FILE}" >> $${GITHUB_OUTPUT}
@fi
@rm -r "$${WORKSPACE}" @rm -r "$${WORKSPACE}"

View File

@@ -1,6 +1,5 @@
# cc-metric-collector # cc-metric-collector
A node agent for measuring, processing and forwarding node level metrics. It is part of the ClusterCockpit ecosystem.
A node agent for measuring, processing and forwarding node level metrics. It is part of the [ClusterCockpit ecosystem](./docs/introduction.md).
The metric collector sends (and receives) metric in the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/) as it provides flexibility while providing a separation between tags (like index columns in relational databases) and fields (like data columns). The metric collector sends (and receives) metric in the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/) as it provides flexibility while providing a separation between tags (like index columns in relational databases) and fields (like data columns).
@@ -8,14 +7,10 @@ There is a single timer loop that triggers all collectors serially, collects the
The receiver runs as a go routine side-by-side with the timer loop and asynchronously forwards received metrics to the sink. The receiver runs as a go routine side-by-side with the timer loop and asynchronously forwards received metrics to the sink.
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7438287.svg)](https://doi.org/10.5281/zenodo.7438287)
# Configuration # Configuration
Configuration is implemented using a single json document that is distributed over network and may be persisted as file. Configuration is implemented using a single json document that is distributed over network and may be persisted as file.
Supported metrics are documented [here](https://github.com/ClusterCockpit/cc-specifications/blob/master/interfaces/lineprotocol/README.md). Supported metrics are documented [here](https://github.com/ClusterCockpit/cc-specifications/blob/master/metrics/lineprotocol_alternative.md).
There is a main configuration file with basic settings that point to the other configuration files for the different components. There is a main configuration file with basic settings that point to the other configuration files for the different components.
@@ -25,19 +20,21 @@ There is a main configuration file with basic settings that point to the other c
"collectors" : "collectors.json", "collectors" : "collectors.json",
"receivers" : "receivers.json", "receivers" : "receivers.json",
"router" : "router.json", "router" : "router.json",
"interval": "10s", "stats_api" : "api.json",
"duration": "1s" "interval": 10,
"duration": 1
} }
``` ```
The `interval` defines how often the metrics should be read and send to the sink. The `duration` tells collectors how long one measurement has to take. This is important for some collectors, like the `likwid` collector. For more information, see [here](./docs/configuration.md). The `interval` defines how often the metrics should be read and send to the sink. The `duration` tells collectors how long one measurement has to take. This is important for some collectors, like the `likwid` collector.
See the component READMEs for their configuration: See the component READMEs for their configuration:
* [`collectors`](./collectors/README.md) * [`collectors`](./collectors/README.md)
* [`sinks`](./sinks/README.md) * [`sinks`](./sinks/README.md)
* [`receivers`](./receivers/README.md) * [`receivers`](./receivers/README.md)
* [`router`](./internal/metricRouter/README.md) * [`router`](./internal/metricRouter/README.md)
* [`stats_api`](./internal/metricRouter/StatsApi.md)
# Installation # Installation
@@ -48,7 +45,6 @@ $ go get (requires at least golang 1.16)
$ make $ make
``` ```
For more information, see [here](./docs/building.md).
# Running # Running
@@ -62,7 +58,6 @@ Usage of metric-collector:
-once -once
Run all collectors only once Run all collectors only once
``` ```
# Scenarios # Scenarios
The metric collector was designed with flexibility in mind, so it can be used in many scenarios. Here are a few: The metric collector was designed with flexibility in mind, so it can be used in many scenarios. Here are a few:
@@ -100,12 +95,11 @@ flowchart TD
``` ```
# Contributing # Contributing
The ClusterCockpit ecosystem is designed to be used by different HPC computing centers. Since configurations and setups differ between the centers, the centers likely have to put some work into the cc-metric-collector to gather all desired metrics. The ClusterCockpit ecosystem is designed to be used by different HPC computing centers. Since configurations and setups differ between the centers, the centers likely have to put some work into the cc-metric-collector to gather all desired metrics.
You are free to open an issue to request a collector but we would also be happy about PRs. You are free to open an issue to request a collector but we would also be happy about PRs.
# Contact # Contact
* [Matrix.org ClusterCockpit General chat](https://matrix.to/#/#clustercockpit-dev:matrix.org) * [Matrix.org ClusterCockpit General chat](https://matrix.to/#/#clustercockpit-dev:matrix.org)
* [Matrix.org ClusterCockpit Development chat](https://matrix.to/#/#clustercockpit:matrix.org) * [Matrix.org ClusterCockpit Development chat](https://matrix.to/#/#clustercockpit:matrix.org)

View File

@@ -15,19 +15,20 @@ import (
"sync" "sync"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter" mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
) )
type CentralConfigFile struct { type CentralConfigFile struct {
Interval string `json:"interval"` Interval int `json:"interval"`
Duration string `json:"duration"` Duration int `json:"duration"`
CollectorConfigFile string `json:"collectors"` CollectorConfigFile string `json:"collectors"`
RouterConfigFile string `json:"router"` RouterConfigFile string `json:"router"`
SinkConfigFile string `json:"sinks"` SinkConfigFile string `json:"sinks"`
ReceiverConfigFile string `json:"receivers,omitempty"` ReceiverConfigFile string `json:"receivers,omitempty"`
StatsApiConfigFile string `json:"stats_api,omitempty"`
} }
func LoadCentralConfiguration(file string, config *CentralConfigFile) error { func LoadCentralConfiguration(file string, config *CentralConfigFile) error {
@@ -52,9 +53,10 @@ type RuntimeConfig struct {
CollectManager collectors.CollectorManager CollectManager collectors.CollectorManager
SinkManager sinks.SinkManager SinkManager sinks.SinkManager
ReceiveManager receivers.ReceiveManager ReceiveManager receivers.ReceiveManager
StatsApi mr.StatsApi
MultiChanTicker mct.MultiChanTicker MultiChanTicker mct.MultiChanTicker
Channels []chan lp.CCMessage Channels []chan lp.CCMetric
Sync sync.WaitGroup Sync sync.WaitGroup
} }
@@ -152,11 +154,16 @@ func shutdownHandler(config *RuntimeConfig, shutdownSignal chan os.Signal) {
cclog.Debug("Shutdown SinkManager...") cclog.Debug("Shutdown SinkManager...")
config.SinkManager.Close() config.SinkManager.Close()
} }
if config.StatsApi != nil {
cclog.Debug("Shutdown StatsApi...")
config.StatsApi.Close()
}
} }
func mainFunc() int { func mainFunc() int {
var err error var err error
use_recv := false use_recv := false
use_api := false
// Initialize runtime configuration // Initialize runtime configuration
rcfg := RuntimeConfig{ rcfg := RuntimeConfig{
@@ -164,6 +171,7 @@ func mainFunc() int {
CollectManager: nil, CollectManager: nil,
SinkManager: nil, SinkManager: nil,
ReceiveManager: nil, ReceiveManager: nil,
StatsApi: nil,
CliArgs: ReadCli(), CliArgs: ReadCli(),
} }
@@ -173,36 +181,16 @@ func mainFunc() int {
cclog.Error("Error reading configuration file ", rcfg.CliArgs["configfile"], ": ", err.Error()) cclog.Error("Error reading configuration file ", rcfg.CliArgs["configfile"], ": ", err.Error())
return 1 return 1
} }
if rcfg.ConfigFile.Interval <= 0 || time.Duration(rcfg.ConfigFile.Interval)*time.Second <= 0 {
// Properly use duration parser with inputs like '60s', '5m' or similar cclog.Error("Configuration value 'interval' must be greater than zero")
if len(rcfg.ConfigFile.Interval) > 0 {
t, err := time.ParseDuration(rcfg.ConfigFile.Interval)
if err != nil {
cclog.Error("Configuration value 'interval' no valid duration")
}
rcfg.Interval = t
if rcfg.Interval == 0 {
cclog.Error("Configuration value 'interval' must be greater than zero")
return 1
}
}
// Properly use duration parser with inputs like '60s', '5m' or similar
if len(rcfg.ConfigFile.Duration) > 0 {
t, err := time.ParseDuration(rcfg.ConfigFile.Duration)
if err != nil {
cclog.Error("Configuration value 'duration' no valid duration")
}
rcfg.Duration = t
if rcfg.Duration == 0 {
cclog.Error("Configuration value 'duration' must be greater than zero")
return 1
}
}
if rcfg.Duration > rcfg.Interval {
cclog.Error("The interval should be greater than duration")
return 1 return 1
} }
rcfg.Interval = time.Duration(rcfg.ConfigFile.Interval) * time.Second
if rcfg.ConfigFile.Duration <= 0 || time.Duration(rcfg.ConfigFile.Duration)*time.Second <= 0 {
cclog.Error("Configuration value 'duration' must be greater than zero")
return 1
}
rcfg.Duration = time.Duration(rcfg.ConfigFile.Duration) * time.Second
if len(rcfg.ConfigFile.RouterConfigFile) == 0 { if len(rcfg.ConfigFile.RouterConfigFile) == 0 {
cclog.Error("Metric router configuration file must be set") cclog.Error("Metric router configuration file must be set")
@@ -242,7 +230,7 @@ func mainFunc() int {
} }
// Connect metric router to sink manager // Connect metric router to sink manager
RouterToSinksChannel := make(chan lp.CCMessage, 200) RouterToSinksChannel := make(chan lp.CCMetric, 200)
rcfg.SinkManager.AddInput(RouterToSinksChannel) rcfg.SinkManager.AddInput(RouterToSinksChannel)
rcfg.MetricRouter.AddOutput(RouterToSinksChannel) rcfg.MetricRouter.AddOutput(RouterToSinksChannel)
@@ -254,7 +242,7 @@ func mainFunc() int {
} }
// Connect collector manager to metric router // Connect collector manager to metric router
CollectToRouterChannel := make(chan lp.CCMessage, 200) CollectToRouterChannel := make(chan lp.CCMetric, 200)
rcfg.CollectManager.AddOutput(CollectToRouterChannel) rcfg.CollectManager.AddOutput(CollectToRouterChannel)
rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel) rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel)
@@ -267,12 +255,22 @@ func mainFunc() int {
} }
// Connect receive manager to metric router // Connect receive manager to metric router
ReceiveToRouterChannel := make(chan lp.CCMessage, 200) ReceiveToRouterChannel := make(chan lp.CCMetric, 200)
rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel) rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel)
rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel) rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel)
use_recv = true use_recv = true
} }
// Create new statistics API manager
if len(rcfg.ConfigFile.StatsApiConfigFile) > 0 {
rcfg.StatsApi, err = mr.NewStatsApi(rcfg.MultiChanTicker, &rcfg.Sync, rcfg.ConfigFile.StatsApiConfigFile)
if err != nil {
cclog.Error(err.Error())
return 1
}
use_api = true
}
// Create shutdown handler // Create shutdown handler
shutdownSignal := make(chan os.Signal, 1) shutdownSignal := make(chan os.Signal, 1)
signal.Notify(shutdownSignal, os.Interrupt) signal.Notify(shutdownSignal, os.Interrupt)
@@ -280,6 +278,11 @@ func mainFunc() int {
rcfg.Sync.Add(1) rcfg.Sync.Add(1)
go shutdownHandler(&rcfg, shutdownSignal) go shutdownHandler(&rcfg, shutdownSignal)
// Start the stats api early to be prepared for init settings
if use_api {
rcfg.StatsApi.Start()
}
// Start the managers // Start the managers
rcfg.MetricRouter.Start() rcfg.MetricRouter.Start()
rcfg.SinkManager.Start() rcfg.SinkManager.Start()
@@ -291,7 +294,7 @@ func mainFunc() int {
// Wait until one tick has passed. This is a workaround // Wait until one tick has passed. This is a workaround
if rcfg.CliArgs["once"] == "true" { if rcfg.CliArgs["once"] == "true" {
x := 1.2 * float64(rcfg.Interval.Seconds()) x := 1.2 * float64(rcfg.ConfigFile.Interval)
time.Sleep(time.Duration(int(x)) * time.Second) time.Sleep(time.Duration(int(x)) * time.Second)
shutdownSignal <- os.Interrupt shutdownSignal <- os.Interrupt
} }

View File

@@ -12,7 +12,6 @@
"proc_total" "proc_total"
] ]
}, },
"memstat": {},
"netstat": { "netstat": {
"include_devices": [ "include_devices": [
"enp5s0" "enp5s0"
@@ -34,8 +33,5 @@
"type-id": "1" "type-id": "1"
} }
} }
},
"topprocs": {
"num_procs": 5
} }
} }

View File

@@ -1,33 +1,25 @@
# LIKWID version
LIKWID_VERSION := 5.4.1
LIKWID_INSTALLED_FOLDER := $(shell dirname $$(which likwid-topology 2>/dev/null) 2>/dev/null)
LIKWID_FOLDER := $(CURDIR)/likwid
all: likwid all: likwid
# LIKWID version
LIKWID_VERSION = 5.2.1
.ONESHELL: .ONESHELL:
.PHONY: likwid .PHONY: likwid
likwid: likwid:
if [ -n "$(LIKWID_INSTALLED_FOLDER)" ]; then INSTALL_FOLDER="$${PWD}/likwid"
# Using likwid include files from system installation BUILD_FOLDER="$${PWD}/likwidbuild"
INCLUDE_DIR="$(LIKWID_INSTALLED_FOLDER)/../include" if [ -d $${INSTALL_FOLDER} ]; then rm -r $${INSTALL_FOLDER}; fi
mkdir --parents --verbose "$(LIKWID_FOLDER)" mkdir --parents --verbose $${INSTALL_FOLDER} $${BUILD_FOLDER}
cp "$${INCLUDE_DIR}"/*.h "$(LIKWID_FOLDER)" wget -P "$${BUILD_FOLDER}" ftp://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz
else tar -C $${BUILD_FOLDER} -xf $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION).tar.gz
# Using likwid include files from downloaded tar archive install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/likwid*.h $${INSTALL_FOLDER}/
if [ -d "$(LIKWID_FOLDER)" ]; then install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/bstrlib.h $${INSTALL_FOLDER}/
rm --recursive "$(LIKWID_FOLDER)" rm -r $${BUILD_FOLDER}
fi
BUILD_FOLDER="$${PWD}/likwidbuild"
mkdir --parents --verbose "$${BUILD_FOLDER}"
wget --output-document=- http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz |
tar --directory="$${BUILD_FOLDER}" --extract --gz
install -D --verbose --preserve-timestamps --mode=0644 --target-directory="$(LIKWID_FOLDER)" "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/likwid*.h
rm --recursive "$${BUILD_FOLDER}"
fi
.PHONY: clean
clean: clean:
rm -rf likwid rm -rf likwid
.PHONY: clean

View File

@@ -35,11 +35,10 @@ In contrast to the configuration files for sinks and receivers, the collectors c
* [`nfs4stat`](./nfs4Metric.md) * [`nfs4stat`](./nfs4Metric.md)
* [`cpufreq`](./cpufreqMetric.md) * [`cpufreq`](./cpufreqMetric.md)
* [`cpufreq_cpuinfo`](./cpufreqCpuinfoMetric.md) * [`cpufreq_cpuinfo`](./cpufreqCpuinfoMetric.md)
* [`numastats`](./numastatsMetric.md) * [`numastat`](./numastatMetric.md)
* [`gpfs`](./gpfsMetric.md) * [`gpfs`](./gpfsMetric.md)
* [`beegfs_meta`](./beegfsmetaMetric.md) * [`beegfs_meta`](./beegfsmetaMetric.md)
* [`beegfs_storage`](./beegfsstorageMetric.md) * [`beegfs_storage`](./beegfsstorageMetric.md)
* [`rocm_smi`](./rocmsmiMetric.md)
## Todos ## Todos
@@ -51,7 +50,7 @@ A collector reads data from any source, parses it to metrics and submits these m
* `Name() string`: Return the name of the collector * `Name() string`: Return the name of the collector
* `Init(config json.RawMessage) error`: Initializes the collector using the given collector-specific config in JSON. Check if needed files/commands exists, ... * `Init(config json.RawMessage) error`: Initializes the collector using the given collector-specific config in JSON. Check if needed files/commands exists, ...
* `Initialized() bool`: Check if a collector is successfully initialized * `Initialized() bool`: Check if a collector is successfully initialized
* `Read(duration time.Duration, output chan ccMetric.CCMetric)`: Read, parse and submit data to the `output` channel as [`CCMetric`](../internal/ccMetric/README.md). If the collector has to measure anything for some duration, use the provided function argument `duration`. * `Read(duration time.Duration, output chan ccMetric.CCMetric)`: Read, parse and submit data to the `output` channel as [`CCMetric`](../internal/ccMetric/README.md). If the collector has to measure anything for some duration, use the provided function argument `duration`.
* `Close()`: Closes down the collector. * `Close()`: Closes down the collector.
It is recommanded to call `setup()` in the `Init()` function. It is recommanded to call `setup()` in the `Init()` function.

View File

@@ -5,7 +5,7 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
"os/user" "os/user"
@@ -14,8 +14,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const DEFAULT_BEEGFS_CMD = "beegfs-ctl" const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
@@ -29,10 +30,11 @@ type BeegfsMetaCollectorConfig struct {
type BeegfsMetaCollector struct { type BeegfsMetaCollector struct {
metricCollector metricCollector
tags map[string]string tags map[string]string
matches map[string]string matches map[string]string
config BeegfsMetaCollectorConfig config BeegfsMetaCollectorConfig
skipFS map[string]struct{} skipFS map[string]struct{}
statsProcessedMetrics int64
} }
func (m *BeegfsMetaCollector) Init(config json.RawMessage) error { func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
@@ -55,7 +57,6 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
m.name = "BeegfsMetaCollector" m.name = "BeegfsMetaCollector"
m.setup() m.setup()
m.parallel = true
// Set default beegfs-ctl binary // Set default beegfs-ctl binary
m.config.Beegfs = DEFAULT_BEEGFS_CMD m.config.Beegfs = DEFAULT_BEEGFS_CMD
@@ -106,16 +107,17 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
if err != nil { if err != nil {
return fmt.Errorf("BeegfsMetaCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err) return fmt.Errorf("BeegfsMetaCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err)
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
//get mounpoint //get mounpoint
buffer, _ := os.ReadFile(string("/proc/mounts")) buffer, _ := ioutil.ReadFile(string("/proc/mounts"))
mounts := strings.Split(string(buffer), "\n") mounts := strings.Split(string(buffer), "\n")
var mountpoints []string var mountpoints []string
for _, line := range mounts { for _, line := range mounts {
@@ -157,9 +159,9 @@ func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMess
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error()) fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode()) fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := io.ReadAll(cmdStderr) data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stderr: \"%s\"\n", string(data)) fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = io.ReadAll(cmdStdout) data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stdout: \"%s\"\n", string(data)) fmt.Fprintf(os.Stderr, "BeegfsMetaCollector.Read(): command stdout: \"%s\"\n", string(data))
return return
} }
@@ -216,13 +218,15 @@ func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMess
for key, data := range m.matches { for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32) value, _ := strconv.ParseFloat(data, 32)
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *BeegfsMetaCollector) Close() { func (m *BeegfsMetaCollector) Close() {

View File

@@ -5,7 +5,7 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
"os/user" "os/user"
@@ -14,8 +14,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// Struct for the collector-specific JSON config // Struct for the collector-specific JSON config
@@ -27,10 +28,11 @@ type BeegfsStorageCollectorConfig struct {
type BeegfsStorageCollector struct { type BeegfsStorageCollector struct {
metricCollector metricCollector
tags map[string]string tags map[string]string
matches map[string]string matches map[string]string
config BeegfsStorageCollectorConfig config BeegfsStorageCollectorConfig
skipFS map[string]struct{} skipFS map[string]struct{}
statsProcessedMetrics int64
} }
func (m *BeegfsStorageCollector) Init(config json.RawMessage) error { func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
@@ -48,7 +50,6 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
m.name = "BeegfsStorageCollector" m.name = "BeegfsStorageCollector"
m.setup() m.setup()
m.parallel = true
// Set default beegfs-ctl binary // Set default beegfs-ctl binary
m.config.Beegfs = DEFAULT_BEEGFS_CMD m.config.Beegfs = DEFAULT_BEEGFS_CMD
@@ -99,16 +100,17 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
if err != nil { if err != nil {
return fmt.Errorf("BeegfsStorageCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err) return fmt.Errorf("BeegfsStorageCollector.Init(): Failed to find beegfs-ctl binary '%s': %v", m.config.Beegfs, err)
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
//get mounpoint //get mounpoint
buffer, _ := os.ReadFile(string("/proc/mounts")) buffer, _ := ioutil.ReadFile(string("/proc/mounts"))
mounts := strings.Split(string(buffer), "\n") mounts := strings.Split(string(buffer), "\n")
var mountpoints []string var mountpoints []string
for _, line := range mounts { for _, line := range mounts {
@@ -149,9 +151,9 @@ func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCM
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error()) fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): Failed to execute command \"%s\": %s\n", cmd.String(), err.Error())
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode()) fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command exit code: \"%d\"\n", cmd.ProcessState.ExitCode())
data, _ := io.ReadAll(cmdStderr) data, _ := ioutil.ReadAll(cmdStderr)
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stderr: \"%s\"\n", string(data)) fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stderr: \"%s\"\n", string(data))
data, _ = io.ReadAll(cmdStdout) data, _ = ioutil.ReadAll(cmdStdout)
fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stdout: \"%s\"\n", string(data)) fmt.Fprintf(os.Stderr, "BeegfsStorageCollector.Read(): command stdout: \"%s\"\n", string(data))
return return
} }
@@ -208,13 +210,15 @@ func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCM
for key, data := range m.matches { for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32) value, _ := strconv.ParseFloat(data, 32)
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *BeegfsStorageCollector) Close() { func (m *BeegfsStorageCollector) Close() {

View File

@@ -6,61 +6,53 @@ import (
"sync" "sync"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker" mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
) )
// Map of all available metric collectors // Map of all available metric collectors
var AvailableCollectors = map[string]MetricCollector{ var AvailableCollectors = map[string]MetricCollector{
"likwid": new(LikwidCollector), "likwid": new(LikwidCollector),
"loadavg": new(LoadavgCollector), "loadavg": new(LoadavgCollector),
"memstat": new(MemstatCollector), "memstat": new(MemstatCollector),
"netstat": new(NetstatCollector), "netstat": new(NetstatCollector),
"ibstat": new(InfinibandCollector), "ibstat": new(InfinibandCollector),
"lustrestat": new(LustreCollector), "lustrestat": new(LustreCollector),
"cpustat": new(CpustatCollector), "cpustat": new(CpustatCollector),
"topprocs": new(TopProcsCollector), "topprocs": new(TopProcsCollector),
"nvidia": new(NvidiaCollector), "nvidia": new(NvidiaCollector),
"customcmd": new(CustomCmdCollector), "customcmd": new(CustomCmdCollector),
"iostat": new(IOstatCollector), "iostat": new(IOstatCollector),
"diskstat": new(DiskstatCollector), "diskstat": new(DiskstatCollector),
"tempstat": new(TempCollector), "tempstat": new(TempCollector),
"ipmistat": new(IpmiCollector), "ipmistat": new(IpmiCollector),
"gpfs": new(GpfsCollector), "gpfs": new(GpfsCollector),
"cpufreq": new(CPUFreqCollector), "cpufreq": new(CPUFreqCollector),
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector), "cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
"nfs3stat": new(Nfs3Collector), "nfs3stat": new(Nfs3Collector),
"nfs4stat": new(Nfs4Collector), "nfs4stat": new(Nfs4Collector),
"numastats": new(NUMAStatsCollector), "numastats": new(NUMAStatsCollector),
"beegfs_meta": new(BeegfsMetaCollector), "beegfs_meta": new(BeegfsMetaCollector),
"beegfs_storage": new(BeegfsStorageCollector), "beegfs_storage": new(BeegfsStorageCollector),
"rapl": new(RAPLCollector),
"rocm_smi": new(RocmSmiCollector),
"self": new(SelfCollector),
"schedstat": new(SchedstatCollector),
"nfsiostat": new(NfsIOStatCollector),
} }
// Metric collector manager data structure // Metric collector manager data structure
type collectorManager struct { type collectorManager struct {
collectors []MetricCollector // List of metric collectors to read in parallel collectors []MetricCollector // List of metric collectors to use
serial []MetricCollector // List of metric collectors to read serially output chan lp.CCMetric // Output channels
output chan lp.CCMessage // Output channels done chan bool // channel to finish / stop metric collector manager
done chan bool // channel to finish / stop metric collector manager ticker mct.MultiChanTicker // periodically ticking once each interval
ticker mct.MultiChanTicker // periodically ticking once each interval duration time.Duration // duration (for metrics that measure over a given duration)
duration time.Duration // duration (for metrics that measure over a given duration) wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector config map[string]json.RawMessage // json encoded config for collector manager
config map[string]json.RawMessage // json encoded config for collector manager
collector_wg sync.WaitGroup // internally used wait group for the parallel reading of collector
parallel_run bool // Flag whether the collectors are currently read in parallel
} }
// Metric collector manager access functions // Metric collector manager access functions
type CollectorManager interface { type CollectorManager interface {
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
AddOutput(output chan lp.CCMessage) AddOutput(output chan lp.CCMetric)
Start() Start()
Close() Close()
} }
@@ -74,7 +66,6 @@ type CollectorManager interface {
// Initialization is done for all configured collectors // Initialization is done for all configured collectors
func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error { func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error {
cm.collectors = make([]MetricCollector, 0) cm.collectors = make([]MetricCollector, 0)
cm.serial = make([]MetricCollector, 0)
cm.output = nil cm.output = nil
cm.done = make(chan bool) cm.done = make(chan bool)
cm.wg = wg cm.wg = wg
@@ -109,11 +100,7 @@ func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Durat
continue continue
} }
cclog.ComponentDebug("CollectorManager", "ADD COLLECTOR", collector.Name()) cclog.ComponentDebug("CollectorManager", "ADD COLLECTOR", collector.Name())
if collector.Parallel() { cm.collectors = append(cm.collectors, collector)
cm.collectors = append(cm.collectors, collector)
} else {
cm.serial = append(cm.serial, collector)
}
} }
return nil return nil
} }
@@ -129,10 +116,6 @@ func (cm *collectorManager) Start() {
// Collector manager is done // Collector manager is done
done := func() { done := func() {
// close all metric collectors // close all metric collectors
if cm.parallel_run {
cm.collector_wg.Wait()
cm.parallel_run = false
}
for _, c := range cm.collectors { for _, c := range cm.collectors {
c.Close() c.Close()
} }
@@ -147,26 +130,7 @@ func (cm *collectorManager) Start() {
done() done()
return return
case t := <-tick: case t := <-tick:
cm.parallel_run = true
for _, c := range cm.collectors { for _, c := range cm.collectors {
// Wait for done signal or execute the collector
select {
case <-cm.done:
done()
return
default:
// Read metrics from collector c via goroutine
cclog.ComponentDebug("CollectorManager", c.Name(), t)
cm.collector_wg.Add(1)
go func(myc MetricCollector) {
myc.Read(cm.duration, cm.output)
cm.collector_wg.Done()
}(c)
}
}
cm.collector_wg.Wait()
cm.parallel_run = false
for _, c := range cm.serial {
// Wait for done signal or execute the collector // Wait for done signal or execute the collector
select { select {
case <-cm.done: case <-cm.done:
@@ -187,7 +151,7 @@ func (cm *collectorManager) Start() {
} }
// AddOutput adds the output channel to the metric collector manager // AddOutput adds the output channel to the metric collector manager
func (cm *collectorManager) AddOutput(output chan lp.CCMessage) { func (cm *collectorManager) AddOutput(output chan lp.CCMetric) {
cm.output = output cm.output = output
} }

View File

@@ -10,22 +10,35 @@ import (
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
//
// CPUFreqCollector // CPUFreqCollector
// a metric collector to measure the current frequency of the CPUs // a metric collector to measure the current frequency of the CPUs
// as obtained from /proc/cpuinfo // as obtained from /proc/cpuinfo
// Only measure on the first hyperthread // Only measure on the first hyperthread
//
type CPUFreqCpuInfoCollectorTopology struct { type CPUFreqCpuInfoCollectorTopology struct {
isHT bool processor string // logical processor number (continuous, starting at 0)
tagSet map[string]string coreID string // socket local core ID
coreID_int int64
physicalPackageID string // socket / package ID
physicalPackageID_int int64
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
tagSet map[string]string
} }
type CPUFreqCpuInfoCollector struct { type CPUFreqCpuInfoCollector struct {
metricCollector metricCollector
topology []CPUFreqCpuInfoCollectorTopology topology []*CPUFreqCpuInfoCollectorTopology
statsProcessedMetrics int64
} }
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error { func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
@@ -37,7 +50,6 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
m.setup() m.setup()
m.name = "CPUFreqCpuInfoCollector" m.name = "CPUFreqCpuInfoCollector"
m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
"group": "CPU", "group": "CPU",
@@ -54,9 +66,11 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
// Collect topology information from file cpuinfo // Collect topology information from file cpuinfo
foundFreq := false foundFreq := false
processor := "" processor := ""
var numNonHT_int int64 = 0
coreID := "" coreID := ""
physicalPackageID := "" physicalPackageID := ""
m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0) var maxPhysicalPackageID int64 = 0
m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
coreSeenBefore := make(map[string]bool) coreSeenBefore := make(map[string]bool)
// Read cpuinfo file, line by line // Read cpuinfo file, line by line
@@ -85,22 +99,41 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
len(coreID) > 0 && len(coreID) > 0 &&
len(physicalPackageID) > 0 { len(physicalPackageID) > 0 {
topology := new(CPUFreqCpuInfoCollectorTopology)
// Processor
topology.processor = processor
// Core ID
topology.coreID = coreID
topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
}
// Physical package ID
topology.physicalPackageID = physicalPackageID
topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
}
// increase maximun socket / package ID, when required
if topology.physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = topology.physicalPackageID_int
}
// is hyperthread?
globalID := physicalPackageID + ":" + coreID globalID := physicalPackageID + ":" + coreID
topology.isHT = coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
if !topology.isHT {
// increase number on non hyper thread cores
numNonHT_int++
}
// store collected topology information // store collected topology information
m.topology = append(m.topology, m.topology = append(m.topology, topology)
CPUFreqCpuInfoCollectorTopology{
isHT: coreSeenBefore[globalID],
tagSet: map[string]string{
"type": "hwthread",
"type-id": processor,
"package_id": physicalPackageID,
},
},
)
// mark core as seen before
coreSeenBefore[globalID] = true
// reset topology information // reset topology information
foundFreq = false foundFreq = false
@@ -110,16 +143,26 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
} }
} }
// Check if at least one CPU with frequency information was detected numPhysicalPackageID_int := maxPhysicalPackageID + 1
if len(m.topology) == 0 { numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
return fmt.Errorf("no CPU frequency info found in %s", cpuInfoFile) numNonHT := fmt.Sprint(numNonHT_int)
for _, t := range m.topology {
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"package_id": t.physicalPackageID,
}
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized // Check if already initialized
if !m.init { if !m.init {
return return
@@ -154,7 +197,8 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err)) fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
return return
} }
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil { if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
m.statsProcessedMetrics++
output <- y output <- y
} }
} }
@@ -162,6 +206,7 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
} }
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *CPUFreqCpuInfoCollector) Close() { func (m *CPUFreqCpuInfoCollector) Close() {

View File

@@ -1,11 +1,10 @@
## `cpufreq_cpuinfo` collector
## `cpufreq_cpuinfo` collector
```json ```json
"cpufreq_cpuinfo": {} "cpufreq_cpuinfo": {}
``` ```
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **hwthread** metrics. The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **cpu** metrics.
Metrics: Metrics:
* `cpufreq` * `cpufreq`

View File

@@ -3,33 +3,46 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "io/ioutil"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
"github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology" stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
type CPUFreqCollectorTopology struct { type CPUFreqCollectorTopology struct {
scalingCurFreqFile string processor string // logical processor number (continuous, starting at 0)
tagSet map[string]string coreID string // socket local core ID
coreID_int int64
physicalPackageID string // socket / package ID
physicalPackageID_int int64
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
scalingCurFreqFile string
tagSet map[string]string
} }
//
// CPUFreqCollector // CPUFreqCollector
// a metric collector to measure the current frequency of the CPUs // a metric collector to measure the current frequency of the CPUs
// as obtained from the hardware (in KHz) // as obtained from the hardware (in KHz)
// Only measure on the first hyper-thread // Only measure on the first hyper thread
// //
// See: https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html // See: https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html
//
type CPUFreqCollector struct { type CPUFreqCollector struct {
metricCollector metricCollector
topology []CPUFreqCollectorTopology topology []CPUFreqCollectorTopology
config struct { statsProcessedMetrics int64
config struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"` ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
} }
} }
@@ -42,7 +55,6 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
m.name = "CPUFreqCollector" m.name = "CPUFreqCollector"
m.setup() m.setup()
m.parallel = true
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
@@ -52,46 +64,116 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
"group": "CPU", "group": "CPU",
"unit": "Hz", "unit": "MHz",
} }
m.topology = make([]CPUFreqCollectorTopology, 0) // Loop for all CPU directories
for _, c := range ccTopology.CpuData() { baseDir := "/sys/devices/system/cpu"
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
cpuDirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s': %v", globPattern, err)
}
if cpuDirs == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
}
// Skip hyper threading CPUs // Initialize CPU topology
if c.CpuID != c.CoreCPUsList[0] { m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
continue for _, cpuDir := range cpuDirs {
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
processor_int, err := strconv.ParseInt(processor, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert cpuID '%s' to int64: %v", processor, err)
}
// Read package ID
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
line, err := ioutil.ReadFile(physicalPackageIDFile)
if err != nil {
return fmt.Errorf("unable to read physical package ID from file '%s': %v", physicalPackageIDFile, err)
}
physicalPackageID := strings.TrimSpace(string(line))
physicalPackageID_int, err := strconv.ParseInt(physicalPackageID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert packageID '%s' to int64: %v", physicalPackageID, err)
}
// Read core ID
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
line, err = ioutil.ReadFile(coreIDFile)
if err != nil {
return fmt.Errorf("unable to read core ID from file '%s': %v", coreIDFile, err)
}
coreID := strings.TrimSpace(string(line))
coreID_int, err := strconv.ParseInt(coreID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
} }
// Check access to current frequency file // Check access to current frequency file
scalingCurFreqFile := filepath.Join("/sys/devices/system/cpu", fmt.Sprintf("cpu%d", c.CpuID), "cpufreq/scaling_cur_freq") scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
err := unix.Access(scalingCurFreqFile, unix.R_OK) err = unix.Access(scalingCurFreqFile, unix.R_OK)
if err != nil { if err != nil {
return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err) return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err)
} }
m.topology = append(m.topology, t := &m.topology[processor_int]
CPUFreqCollectorTopology{ t.processor = processor
tagSet: map[string]string{ t.physicalPackageID = physicalPackageID
"type": "hwthread", t.physicalPackageID_int = physicalPackageID_int
"type-id": fmt.Sprint(c.CpuID), t.coreID = coreID
"package_id": fmt.Sprint(c.Socket), t.coreID_int = coreID_int
}, t.scalingCurFreqFile = scalingCurFreqFile
scalingCurFreqFile: scalingCurFreqFile,
},
)
} }
// Initialized // is processor a hyperthread?
cclog.ComponentDebug( coreSeenBefore := make(map[string]bool)
m.name, for i := range m.topology {
"initialized", t := &m.topology[i]
len(m.topology), "non-hyper-threading CPUs")
globalID := t.physicalPackageID + ":" + t.coreID
t.isHT = coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
}
// number of non hyper thread cores and packages / sockets
var numNonHT_int int64 = 0
var maxPhysicalPackageID int64 = 0
for i := range m.topology {
t := &m.topology[i]
// Update maxPackageID
if t.physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = t.physicalPackageID_int
}
if !t.isHT {
numNonHT_int++
}
}
numPhysicalPackageID_int := maxPhysicalPackageID + 1
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
numNonHT := fmt.Sprint(numNonHT_int)
for i := range m.topology {
t := &m.topology[i]
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "cpu",
"type-id": t.processor,
"package_id": t.physicalPackageID,
}
}
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized // Check if already initialized
if !m.init { if !m.init {
return return
@@ -101,8 +183,13 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
for i := range m.topology { for i := range m.topology {
t := &m.topology[i] t := &m.topology[i]
// skip hyperthreads
if t.isHT {
continue
}
// Read current frequency // Read current frequency
line, err := os.ReadFile(t.scalingCurFreqFile) line, err := ioutil.ReadFile(t.scalingCurFreqFile)
if err != nil { if err != nil {
cclog.ComponentError( cclog.ComponentError(
m.name, m.name,
@@ -117,10 +204,12 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
continue continue
} }
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil { if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
m.statsProcessedMetrics++
output <- y output <- y
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *CPUFreqCollector) Close() { func (m *CPUFreqCollector) Close() {

View File

@@ -1,13 +1,11 @@
## `cpufreq_cpuinfo` collector ## `cpufreq_cpuinfo` collector
```json ```json
"cpufreq": { "cpufreq": {
"exclude_metrics": [] "exclude_metrics": []
} }
``` ```
The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **hwthread** metrics. The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **cpu** metrics.
Metrics: Metrics:
* `cpufreq`
* `cpufreq`

View File

@@ -9,9 +9,9 @@ import (
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
sysconf "github.com/tklauser/go-sysconf" stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const CPUSTATFILE = `/proc/stat` const CPUSTATFILE = `/proc/stat`
@@ -22,19 +22,17 @@ type CpustatCollectorConfig struct {
type CpustatCollector struct { type CpustatCollector struct {
metricCollector metricCollector
config CpustatCollectorConfig config CpustatCollectorConfig
lastTimestamp time.Time // Store time stamp of last tick to derive values matches map[string]int
matches map[string]int cputags map[string]map[string]string
cputags map[string]map[string]string nodetags map[string]string
nodetags map[string]string statsProcessedMetrics int64
olddata map[string]map[string]int64
} }
func (m *CpustatCollector) Init(config json.RawMessage) error { func (m *CpustatCollector) Init(config json.RawMessage) error {
m.name = "CpustatCollector" m.name = "CpustatCollector"
m.setup() m.setup()
m.parallel = true m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
m.meta = map[string]string{"source": m.name, "group": "CPU"}
m.nodetags = map[string]string{"type": "node"} m.nodetags = map[string]string{"type": "node"}
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
@@ -79,73 +77,49 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
// Pre-generate tags for all CPUs // Pre-generate tags for all CPUs
num_cpus := 0 num_cpus := 0
m.cputags = make(map[string]map[string]string) m.cputags = make(map[string]map[string]string)
m.olddata = make(map[string]map[string]int64)
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
linefields := strings.Fields(line) linefields := strings.Fields(line)
if strings.Compare(linefields[0], "cpu") == 0 { if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
m.olddata["cpu"] = make(map[string]int64)
for k, v := range m.matches {
m.olddata["cpu"][k], _ = strconv.ParseInt(linefields[v], 0, 64)
}
} else if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
cpustr := strings.TrimLeft(linefields[0], "cpu") cpustr := strings.TrimLeft(linefields[0], "cpu")
cpu, _ := strconv.Atoi(cpustr) cpu, _ := strconv.Atoi(cpustr)
m.cputags[linefields[0]] = map[string]string{"type": "hwthread", "type-id": fmt.Sprintf("%d", cpu)} m.cputags[linefields[0]] = map[string]string{"type": "cpu", "type-id": fmt.Sprintf("%d", cpu)}
m.olddata[linefields[0]] = make(map[string]int64)
for k, v := range m.matches {
m.olddata[linefields[0]][k], _ = strconv.ParseInt(linefields[v], 0, 64)
}
num_cpus++ num_cpus++
} }
} }
m.lastTimestamp = time.Now() m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) { func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric) {
values := make(map[string]float64) values := make(map[string]float64)
clktck, _ := sysconf.Sysconf(sysconf.SC_CLK_TCK) total := 0.0
for match, index := range m.matches { for match, index := range m.matches {
if len(match) > 0 { if len(match) > 0 {
x, err := strconv.ParseInt(linefields[index], 0, 64) x, err := strconv.ParseInt(linefields[index], 0, 64)
if err == nil { if err == nil {
vdiff := x - m.olddata[linefields[0]][match] values[match] = float64(x)
m.olddata[linefields[0]][match] = x // Store new value for next run total += values[match]
values[match] = float64(vdiff) / float64(tsdelta.Seconds()) / float64(clktck)
} }
} }
} }
t := time.Now()
sum := float64(0)
for name, value := range values { for name, value := range values {
sum += value y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": (value * 100.0) / total}, t)
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
if err == nil { if err == nil {
y.AddTag("unit", "Percent") m.statsProcessedMetrics++
output <- y
}
}
if v, ok := values["cpu_idle"]; ok {
sum -= v
y, err := lp.NewMessage("cpu_used", tags, m.meta, map[string]interface{}{"value": sum * 100}, now)
if err == nil {
y.AddTag("unit", "Percent")
output <- y output <- y
} }
} }
} }
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
num_cpus := 0 num_cpus := 0
now := time.Now()
tsdelta := now.Sub(m.lastTimestamp)
file, err := os.Open(string(CPUSTATFILE)) file, err := os.Open(string(CPUSTATFILE))
if err != nil { if err != nil {
cclog.ComponentError(m.name, err.Error()) cclog.ComponentError(m.name, err.Error())
@@ -157,24 +131,24 @@ func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage
line := scanner.Text() line := scanner.Text()
linefields := strings.Fields(line) linefields := strings.Fields(line)
if strings.Compare(linefields[0], "cpu") == 0 { if strings.Compare(linefields[0], "cpu") == 0 {
m.parseStatLine(linefields, m.nodetags, output, now, tsdelta) m.parseStatLine(linefields, m.nodetags, output)
} else if strings.HasPrefix(linefields[0], "cpu") { } else if strings.HasPrefix(linefields[0], "cpu") {
m.parseStatLine(linefields, m.cputags[linefields[0]], output, now, tsdelta) m.parseStatLine(linefields, m.cputags[linefields[0]], output)
num_cpus++ num_cpus++
} }
} }
num_cpus_metric, err := lp.NewMessage("num_cpus", num_cpus_metric, err := lp.New("num_cpus",
m.nodetags, m.nodetags,
m.meta, m.meta,
map[string]interface{}{"value": int(num_cpus)}, map[string]interface{}{"value": int(num_cpus)},
now, time.Now(),
) )
if err == nil { if err == nil {
m.statsProcessedMetrics++
output <- num_cpus_metric output <- num_cpus_metric
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
m.lastTimestamp = now
} }
func (m *CpustatCollector) Close() { func (m *CpustatCollector) Close() {

View File

@@ -1,6 +1,5 @@
## `cpustat` collector ## `cpustat` collector
```json ```json
"cpustat": { "cpustat": {
"exclude_metrics": [ "exclude_metrics": [
@@ -9,19 +8,16 @@
} }
``` ```
The `cpustat` collector reads data from `/proc/stat` and outputs a handful **node** and **hwthread** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. The `cpustat` collector reads data from `/proc/stats` and outputs a handful **node** and **hwthread** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
Metrics: Metrics:
* `cpu_user`
* `cpu_user` with `unit=Percent` * `cpu_nice`
* `cpu_nice` with `unit=Percent` * `cpu_system`
* `cpu_system` with `unit=Percent` * `cpu_idle`
* `cpu_idle` with `unit=Percent` * `cpu_iowait`
* `cpu_iowait` with `unit=Percent` * `cpu_irq`
* `cpu_irq` with `unit=Percent` * `cpu_softirq`
* `cpu_softirq` with `unit=Percent` * `cpu_steal`
* `cpu_steal` with `unit=Percent` * `cpu_guest`
* `cpu_guest` with `unit=Percent` * `cpu_guest_nice`
* `cpu_guest_nice` with `unit=Percent`
* `cpu_used` = `cpu_* - cpu_idle` with `unit=Percent`
* `num_cpus`

View File

@@ -3,13 +3,14 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"io/ioutil"
"log" "log"
"os"
"os/exec" "os/exec"
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
influx "github.com/influxdata/line-protocol" influx "github.com/influxdata/line-protocol"
) )
@@ -23,17 +24,19 @@ type CustomCmdCollectorConfig struct {
type CustomCmdCollector struct { type CustomCmdCollector struct {
metricCollector metricCollector
handler *influx.MetricHandler handler *influx.MetricHandler
parser *influx.Parser parser *influx.Parser
config CustomCmdCollectorConfig config CustomCmdCollectorConfig
commands []string commands []string
files []string files []string
statsProcessedMetrics int64
statsProcessedCommands int64
statsProcessedFiles int64
} }
func (m *CustomCmdCollector) Init(config json.RawMessage) error { func (m *CustomCmdCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "CustomCmdCollector" m.name = "CustomCmdCollector"
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Custom"} m.meta = map[string]string{"source": m.name, "group": "Custom"}
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err = json.Unmarshal(config, &m.config)
@@ -48,12 +51,12 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
command := exec.Command(cmdfields[0], strings.Join(cmdfields[1:], " ")) command := exec.Command(cmdfields[0], strings.Join(cmdfields[1:], " "))
command.Wait() command.Wait()
_, err = command.Output() _, err = command.Output()
if err == nil { if err != nil {
m.commands = append(m.commands, c) m.commands = append(m.commands, c)
} }
} }
for _, f := range m.config.Files { for _, f := range m.config.Files {
_, err = os.ReadFile(f) _, err = ioutil.ReadFile(f)
if err == nil { if err == nil {
m.files = append(m.files, f) m.files = append(m.files, f)
} else { } else {
@@ -67,6 +70,9 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
m.handler = influx.NewMetricHandler() m.handler = influx.NewMetricHandler()
m.parser = influx.NewParser(m.handler) m.parser = influx.NewParser(m.handler)
m.parser.SetTimeFunc(DefaultTime) m.parser.SetTimeFunc(DefaultTime)
m.statsProcessedMetrics = 0
m.statsProcessedFiles = 0
m.statsProcessedCommands = 0
m.init = true m.init = true
return nil return nil
} }
@@ -75,7 +81,7 @@ var DefaultTime = func() time.Time {
return time.Unix(42, 0) return time.Unix(42, 0)
} }
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -99,11 +105,18 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
continue continue
} }
output <- lp.FromInfluxMetric(c) y := lp.FromInfluxMetric(c)
if err == nil {
m.statsProcessedMetrics++
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
output <- y
}
} }
m.statsProcessedCommands++
stats.ComponentStatInt(m.name, "processed_commands", m.statsProcessedCommands)
} }
for _, file := range m.files { for _, file := range m.files {
buffer, err := os.ReadFile(file) buffer, err := ioutil.ReadFile(file)
if err != nil { if err != nil {
log.Print(err) log.Print(err)
return return
@@ -118,8 +131,15 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
if skip { if skip {
continue continue
} }
output <- lp.FromInfluxMetric(f) y := lp.FromInfluxMetric(f)
if err == nil {
m.statsProcessedMetrics++
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
output <- y
}
} }
m.statsProcessedFiles++
stats.ComponentStatInt(m.name, "processed_files", m.statsProcessedFiles)
} }
} }

View File

@@ -3,13 +3,15 @@ package collectors
import ( import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"strings" "strings"
"syscall" "syscall"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// "log" // "log"
@@ -22,14 +24,12 @@ type DiskstatCollectorConfig struct {
type DiskstatCollector struct { type DiskstatCollector struct {
metricCollector metricCollector
//matches map[string]int config DiskstatCollectorConfig
config IOstatCollectorConfig statsProcessedMetrics int64
//devices map[string]IOstatCollectorEntry
} }
func (m *DiskstatCollector) Init(config json.RawMessage) error { func (m *DiskstatCollector) Init(config json.RawMessage) error {
m.name = "DiskstatCollector" m.name = "DiskstatCollector"
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Disk"} m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup() m.setup()
if len(config) > 0 { if len(config) > 0 {
@@ -44,11 +44,12 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
return err return err
} }
defer file.Close() defer file.Close()
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -78,41 +79,39 @@ func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessag
continue continue
} }
path := strings.Replace(linefields[1], `\040`, " ", -1) path := strings.Replace(linefields[1], `\040`, " ", -1)
stat := syscall.Statfs_t{ stat := syscall.Statfs_t{}
Blocks: 0,
Bsize: 0,
Bfree: 0,
}
err := syscall.Statfs(path, &stat) err := syscall.Statfs(path, &stat)
if err != nil { if err != nil {
continue fmt.Println(err.Error())
} return
if stat.Blocks == 0 || stat.Bsize == 0 {
continue
} }
tags := map[string]string{"type": "node", "device": linefields[0]} tags := map[string]string{"type": "node", "device": linefields[0]}
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000) total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
y, err := lp.NewMessage("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now()) y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
if err == nil { if err == nil {
y.AddMeta("unit", "GBytes") y.AddMeta("unit", "GBytes")
m.statsProcessedMetrics++
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
output <- y output <- y
} }
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000) free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
y, err = lp.NewMessage("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now()) y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
if err == nil { if err == nil {
y.AddMeta("unit", "GBytes") y.AddMeta("unit", "GBytes")
m.statsProcessedMetrics++
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
output <- y output <- y
} }
if total > 0 { perc := (100 * (total - free)) / total
perc := (100 * (total - free)) / total if perc > part_max_used {
if perc > part_max_used { part_max_used = perc
part_max_used = perc
}
} }
} }
y, err := lp.NewMessage("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now()) y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
if err == nil { if err == nil {
y.AddMeta("unit", "percent") y.AddMeta("unit", "percent")
m.statsProcessedMetrics++
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
output <- y output <- y
} }
} }

View File

@@ -5,7 +5,7 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"log" "log"
"os/exec" "os/exec"
"os/user" "os/user"
@@ -13,8 +13,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const DEFAULT_GPFS_CMD = "mmpmon" const DEFAULT_GPFS_CMD = "mmpmon"
@@ -31,11 +32,11 @@ type GpfsCollector struct {
Mmpmon string `json:"mmpmon_path,omitempty"` Mmpmon string `json:"mmpmon_path,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"` ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
SendBandwidths bool `json:"send_bandwidths"` SendBandwidths bool `json:"send_bandwidths"`
SendTotalValues bool `json:"send_total_values"`
} }
skipFS map[string]struct{} skipFS map[string]struct{}
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
lastState map[string]GpfsCollectorLastState lastState map[string]GpfsCollectorLastState
statsProcessedMetrics int64
} }
func (m *GpfsCollector) Init(config json.RawMessage) error { func (m *GpfsCollector) Init(config json.RawMessage) error {
@@ -47,7 +48,6 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "GpfsCollector" m.name = "GpfsCollector"
m.setup() m.setup()
m.parallel = true
// Set default mmpmon binary // Set default mmpmon binary
m.config.Mmpmon = DEFAULT_GPFS_CMD m.config.Mmpmon = DEFAULT_GPFS_CMD
@@ -72,7 +72,6 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
for _, fs := range m.config.ExcludeFilesystem { for _, fs := range m.config.ExcludeFilesystem {
m.skipFS[fs] = struct{}{} m.skipFS[fs] = struct{}{}
} }
m.lastState = make(map[string]GpfsCollectorLastState)
// GPFS / IBM Spectrum Scale file system statistics can only be queried by user root // GPFS / IBM Spectrum Scale file system statistics can only be queried by user root
user, err := user.Current() user, err := user.Current()
@@ -89,12 +88,12 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
return fmt.Errorf("failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err) return fmt.Errorf("failed to find mmpmon binary '%s': %v", m.config.Mmpmon, err)
} }
m.config.Mmpmon = p m.config.Mmpmon = p
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized // Check if already initialized
if !m.init { if !m.init {
return return
@@ -119,8 +118,8 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
cmd.Stderr = cmdStderr cmd.Stderr = cmdStderr
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
dataStdErr, _ := io.ReadAll(cmdStderr) dataStdErr, _ := ioutil.ReadAll(cmdStderr)
dataStdOut, _ := io.ReadAll(cmdStdout) dataStdOut, _ := ioutil.ReadAll(cmdStdout)
cclog.ComponentError( cclog.ComponentError(
m.name, m.name,
fmt.Sprintf("Read(): Failed to execute command \"%s\": %v\n", cmd.String(), err), fmt.Sprintf("Read(): Failed to execute command \"%s\": %v\n", cmd.String(), err),
@@ -165,16 +164,11 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
continue continue
} }
// Add filesystem tag
m.tags["filesystem"] = filesystem m.tags["filesystem"] = filesystem
if _, ok := m.lastState[filesystem]; !ok {
// Create initial last state m.lastState[filesystem] = GpfsCollectorLastState{
if m.config.SendBandwidths { bytesRead: -1,
if _, ok := m.lastState[filesystem]; !ok { bytesWritten: -1,
m.lastState[filesystem] = GpfsCollectorLastState{
bytesRead: -1,
bytesWritten: -1,
}
} }
} }
@@ -217,34 +211,16 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err)) fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
continue continue
} }
if y, err := if y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp); err == nil {
lp.NewMessage(
"gpfs_bytes_read",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesRead,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
output <- y output <- y
m.statsProcessedMetrics++
} }
if m.config.SendBandwidths { if m.config.SendBandwidths {
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 { if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
bwRead := float64(bytesRead-lastBytesRead) / timeDiff bwRead := float64(bytesRead-lastBytesRead) / timeDiff
if y, err := if y, err := lp.New("gpfs_bw_read", m.tags, m.meta, map[string]interface{}{"value": bwRead}, timestamp); err == nil {
lp.NewMessage(
"gpfs_bw_read",
m.tags,
m.meta,
map[string]interface{}{
"value": bwRead,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes/sec")
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
@@ -257,34 +233,16 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err)) fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
continue continue
} }
if y, err := if y, err := lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp); err == nil {
lp.NewMessage(
"gpfs_bytes_written",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesWritten,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
output <- y output <- y
m.statsProcessedMetrics++
} }
if m.config.SendBandwidths { if m.config.SendBandwidths {
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 { if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
if y, err := if y, err := lp.New("gpfs_bw_write", m.tags, m.meta, map[string]interface{}{"value": bwWrite}, timestamp); err == nil {
lp.NewMessage(
"gpfs_bw_write",
m.tags,
m.meta,
map[string]interface{}{
"value": bwWrite,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes/sec")
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
@@ -304,8 +262,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err)) fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil { if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// number of closes // number of closes
@@ -316,8 +275,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err)) fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil { if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// number of reads // number of reads
@@ -328,8 +288,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err)) fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil { if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// number of writes // number of writes
@@ -340,8 +301,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err)) fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil { if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// number of read directories // number of read directories
@@ -352,8 +314,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err)) fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil { if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// Number of inode updates // Number of inode updates
@@ -364,51 +327,12 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err)) fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
continue continue
} }
if y, err := lp.NewMessage("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil { if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
m.statsProcessedMetrics++
output <- y output <- y
} }
// Total values
if m.config.SendTotalValues {
bytesTotal := bytesRead + bytesWritten
if y, err :=
lp.NewMessage("gpfs_bytes_total",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesTotal,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
output <- y
}
iops := numReads + numWrites
if y, err :=
lp.NewMessage("gpfs_iops",
m.tags,
m.meta,
map[string]interface{}{
"value": iops,
},
timestamp,
); err == nil {
output <- y
}
metaops := numInodeUpdates + numCloses + numOpens + numReaddirs
if y, err :=
lp.NewMessage("gpfs_metaops",
m.tags,
m.meta,
map[string]interface{}{
"value": metaops,
},
timestamp,
); err == nil {
output <- y
}
}
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *GpfsCollector) Close() { func (m *GpfsCollector) Close() {

View File

@@ -6,8 +6,7 @@
"exclude_filesystem": [ "exclude_filesystem": [
"fs1" "fs1"
], ],
"send_bandwidths": true, "send_bandwidths" : true
"send_total_values": true
} }
``` ```
@@ -27,12 +26,8 @@ Metrics:
* `gpfs_num_opens` * `gpfs_num_opens`
* `gpfs_num_closes` * `gpfs_num_closes`
* `gpfs_num_reads` * `gpfs_num_reads`
* `gpfs_num_writes`
* `gpfs_num_readdirs` * `gpfs_num_readdirs`
* `gpfs_num_inode_updates` * `gpfs_num_inode_updates`
* `gpfs_bytes_total = gpfs_bytes_read + gpfs_bytes_written` (if `send_total_values == true`)
* `gpfs_iops = gpfs_num_reads + gpfs_num_writes` (if `send_total_values == true`)
* `gpfs_metaops = gpfs_num_inode_updates + gpfs_num_closes + gpfs_num_opens + gpfs_num_readdirs` (if `send_total_values == true`)
* `gpfs_bw_read` (if `send_bandwidths == true`) * `gpfs_bw_read` (if `send_bandwidths == true`)
* `gpfs_bw_write` (if `send_bandwidths == true`) * `gpfs_bw_write` (if `send_bandwidths == true`)

View File

@@ -2,10 +2,12 @@ package collectors
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"encoding/json" "encoding/json"
@@ -18,22 +20,17 @@ import (
const IB_BASEPATH = "/sys/class/infiniband/" const IB_BASEPATH = "/sys/class/infiniband/"
type InfinibandCollectorMetric struct { type InfinibandCollectorMetric struct {
name string path string
path string unit string
unit string
scale int64
addToIBTotal bool
addToIBTotalPkgs bool
currentState int64
lastState int64
} }
type InfinibandCollectorInfo struct { type InfinibandCollectorInfo struct {
LID string // IB local Identifier (LID) LID string // IB local Identifier (LID)
device string // IB device device string // IB device
port string // IB device port port string // IB device port
portCounterFiles []InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric portCounterFiles map[string]InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric
tagSet map[string]string // corresponding tag list tagSet map[string]string // corresponding tag list
lastState map[string]int64 // State from last measurement
} }
type InfinibandCollector struct { type InfinibandCollector struct {
@@ -41,11 +38,11 @@ type InfinibandCollector struct {
config struct { config struct {
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0 ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem
SendTotalValues bool `json:"send_total_values"` // Send computed total values
SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates
} }
info []InfinibandCollectorInfo info []*InfinibandCollectorInfo
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
statsProcessedMetrics int64
} }
// Init initializes the Infiniband collector by walking through files below IB_BASEPATH // Init initializes the Infiniband collector by walking through files below IB_BASEPATH
@@ -59,7 +56,6 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "InfinibandCollector" m.name = "InfinibandCollector"
m.setup() m.setup()
m.parallel = true
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
"group": "Network", "group": "Network",
@@ -89,7 +85,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
for _, path := range ibDirs { for _, path := range ibDirs {
// Skip, when no LID is assigned // Skip, when no LID is assigned
line, err := os.ReadFile(filepath.Join(path, "lid")) line, err := ioutil.ReadFile(filepath.Join(path, "lid"))
if err != nil { if err != nil {
continue continue
} }
@@ -117,39 +113,11 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
// Check access to counter files // Check access to counter files
countersDir := filepath.Join(path, "counters") countersDir := filepath.Join(path, "counters")
portCounterFiles := []InfinibandCollectorMetric{ portCounterFiles := map[string]InfinibandCollectorMetric{
{ "ib_recv": {path: filepath.Join(countersDir, "port_rcv_data"), unit: "bytes"},
name: "ib_recv", "ib_xmit": {path: filepath.Join(countersDir, "port_xmit_data"), unit: "bytes"},
path: filepath.Join(countersDir, "port_rcv_data"), "ib_recv_pkts": {path: filepath.Join(countersDir, "port_rcv_packets"), unit: "packets"},
unit: "bytes", "ib_xmit_pkts": {path: filepath.Join(countersDir, "port_xmit_packets"), unit: "packets"},
scale: 4,
addToIBTotal: true,
lastState: -1,
},
{
name: "ib_xmit",
path: filepath.Join(countersDir, "port_xmit_data"),
unit: "bytes",
scale: 4,
addToIBTotal: true,
lastState: -1,
},
{
name: "ib_recv_pkts",
path: filepath.Join(countersDir, "port_rcv_packets"),
unit: "packets",
scale: 1,
addToIBTotalPkgs: true,
lastState: -1,
},
{
name: "ib_xmit_pkts",
path: filepath.Join(countersDir, "port_xmit_packets"),
unit: "packets",
scale: 1,
addToIBTotalPkgs: true,
lastState: -1,
},
} }
for _, counter := range portCounterFiles { for _, counter := range portCounterFiles {
err := unix.Access(counter.path, unix.R_OK) err := unix.Access(counter.path, unix.R_OK)
@@ -158,8 +126,14 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
} }
} }
// Initialize last state
lastState := make(map[string]int64)
for counter := range portCounterFiles {
lastState[counter] = -1
}
m.info = append(m.info, m.info = append(m.info,
InfinibandCollectorInfo{ &InfinibandCollectorInfo{
LID: LID, LID: LID,
device: device, device: device,
port: port, port: port,
@@ -170,19 +144,20 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
"port": port, "port": port,
"lid": LID, "lid": LID,
}, },
lastState: lastState,
}) })
} }
if len(m.info) == 0 { if len(m.info) == 0 {
return fmt.Errorf("found no IB devices") return fmt.Errorf("found no IB devices")
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
// Read reads Infiniband counter files below IB_BASEPATH // Read reads Infiniband counter files below IB_BASEPATH
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized // Check if already initialized
if !m.init { if !m.init {
@@ -196,15 +171,11 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
// Save current timestamp // Save current timestamp
m.lastTimestamp = now m.lastTimestamp = now
for i := range m.info { for _, info := range m.info {
info := &m.info[i] for counterName, counterDef := range info.portCounterFiles {
var ib_total, ib_total_pkts int64
for i := range info.portCounterFiles {
counterDef := &info.portCounterFiles[i]
// Read counter file // Read counter file
line, err := os.ReadFile(counterDef.path) line, err := ioutil.ReadFile(counterDef.path)
if err != nil { if err != nil {
cclog.ComponentError( cclog.ComponentError(
m.name, m.name,
@@ -218,92 +189,36 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
if err != nil { if err != nil {
cclog.ComponentError( cclog.ComponentError(
m.name, m.name,
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterDef.name, data, err)) fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterName, data, err))
continue continue
} }
// Scale raw value
v *= counterDef.scale
// Save current state
counterDef.currentState = v
// Send absolut values // Send absolut values
if m.config.SendAbsoluteValues { if m.config.SendAbsoluteValues {
if y, err := if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
lp.NewMessage(
counterDef.name,
info.tagSet,
m.meta,
map[string]interface{}{
"value": counterDef.currentState,
},
now); err == nil {
y.AddMeta("unit", counterDef.unit) y.AddMeta("unit", counterDef.unit)
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
// Send derived values // Send derived values
if m.config.SendDerivedValues { if m.config.SendDerivedValues {
if counterDef.lastState >= 0 { if info.lastState[counterName] >= 0 {
rate := float64((counterDef.currentState - counterDef.lastState)) / timeDiff rate := float64((v - info.lastState[counterName])) / timeDiff
if y, err := if y, err := lp.New(counterName+"_bw", info.tagSet, m.meta, map[string]interface{}{"value": rate}, now); err == nil {
lp.NewMessage(
counterDef.name+"_bw",
info.tagSet,
m.meta,
map[string]interface{}{
"value": rate,
},
now); err == nil {
y.AddMeta("unit", counterDef.unit+"/sec") y.AddMeta("unit", counterDef.unit+"/sec")
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
counterDef.lastState = counterDef.currentState // Save current state
} info.lastState[counterName] = v
// Sum up total values
if m.config.SendTotalValues {
switch {
case counterDef.addToIBTotal:
ib_total += counterDef.currentState
case counterDef.addToIBTotalPkgs:
ib_total_pkts += counterDef.currentState
}
} }
} }
// Send total values
if m.config.SendTotalValues {
if y, err :=
lp.NewMessage(
"ib_total",
info.tagSet,
m.meta,
map[string]interface{}{
"value": ib_total,
},
now); err == nil {
y.AddMeta("unit", "bytes")
output <- y
}
if y, err :=
lp.NewMessage(
"ib_total_pkts",
info.tagSet,
m.meta,
map[string]interface{}{
"value": ib_total_pkts,
},
now); err == nil {
y.AddMeta("unit", "packets")
output <- y
}
}
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *InfinibandCollector) Close() { func (m *InfinibandCollector) Close() {

View File

@@ -17,16 +17,13 @@ LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
The devices can be filtered with the `exclude_devices` option in the configuration. The devices can be filtered with the `exclude_devices` option in the configuration.
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`. (See: <https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-class-infiniband>) For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`.
Metrics: Metrics:
* `ib_recv` * `ib_recv`
* `ib_xmit` * `ib_xmit`
* `ib_recv_pkts` * `ib_recv_pkts`
* `ib_xmit_pkts` * `ib_xmit_pkts`
* `ib_total = ib_recv + ib_xmit` (if `send_total_values == true`)
* `ib_total_pkts = ib_recv_pkts + ib_xmit_pkts` (if `send_total_values == true`)
* `ib_recv_bw` (if `send_derived_values == true`) * `ib_recv_bw` (if `send_derived_values == true`)
* `ib_xmit_bw` (if `send_derived_values == true`) * `ib_xmit_bw` (if `send_derived_values == true`)
* `ib_recv_pkts_bw` (if `send_derived_values == true`) * `ib_recv_pkts_bw` (if `send_derived_values == true`)

View File

@@ -4,8 +4,9 @@ import (
"bufio" "bufio"
"os" "os"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
// "log" // "log"
"encoding/json" "encoding/json"
@@ -29,15 +30,15 @@ type IOstatCollectorEntry struct {
type IOstatCollector struct { type IOstatCollector struct {
metricCollector metricCollector
matches map[string]int matches map[string]int
config IOstatCollectorConfig config IOstatCollectorConfig
devices map[string]IOstatCollectorEntry devices map[string]IOstatCollectorEntry
statsProcessedMetrics int64
} }
func (m *IOstatCollector) Init(config json.RawMessage) error { func (m *IOstatCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "IOstatCollector" m.name = "IOstatCollector"
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Disk"} m.meta = map[string]string{"source": m.name, "group": "Disk"}
m.setup() m.setup()
if len(config) > 0 { if len(config) > 0 {
@@ -103,11 +104,12 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
lastValues: values, lastValues: values,
} }
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return err return err
} }
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -139,9 +141,10 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
x, err := strconv.ParseInt(linefields[idx], 0, 64) x, err := strconv.ParseInt(linefields[idx], 0, 64)
if err == nil { if err == nil {
diff := x - entry.lastValues[name] diff := x - entry.lastValues[name]
y, err := lp.NewMessage(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now()) y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
entry.lastValues[name] = x entry.lastValues[name] = x
@@ -149,6 +152,7 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
} }
m.devices[device] = entry m.devices[device] = entry
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *IOstatCollector) Close() { func (m *IOstatCollector) Close() {

View File

@@ -1,116 +1,88 @@
package collectors package collectors
import ( import (
"bufio"
"bytes"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io"
"log" "log"
"os"
"os/exec" "os/exec"
"strconv" "strconv"
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const IPMITOOL_PATH = `ipmitool`
const IPMISENSORS_PATH = `ipmi-sensors` const IPMISENSORS_PATH = `ipmi-sensors`
type IpmiCollectorConfig struct {
ExcludeDevices []string `json:"exclude_devices"`
IpmitoolPath string `json:"ipmitool_path"`
IpmisensorsPath string `json:"ipmisensors_path"`
}
type IpmiCollector struct { type IpmiCollector struct {
metricCollector metricCollector
config struct { //tags map[string]string
ExcludeDevices []string `json:"exclude_devices"` //matches map[string]string
IpmitoolPath string `json:"ipmitool_path"` config IpmiCollectorConfig
IpmisensorsPath string `json:"ipmisensors_path"` ipmitool string
} ipmisensors string
ipmitool string statsProcessedMetrics int64
ipmisensors string
} }
func (m *IpmiCollector) Init(config json.RawMessage) error { func (m *IpmiCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
m.name = "IpmiCollector" m.name = "IpmiCollector"
m.setup() m.setup()
m.parallel = true m.meta = map[string]string{"source": m.name, "group": "IPMI"}
m.meta = map[string]string{ m.config.IpmitoolPath = string(IPMITOOL_PATH)
"source": m.name, m.config.IpmisensorsPath = string(IPMISENSORS_PATH)
"group": "IPMI", m.ipmitool = ""
} m.ipmisensors = ""
// default path to IPMI tools
m.config.IpmitoolPath = "ipmitool"
m.config.IpmisensorsPath = "ipmi-sensors"
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
return err return err
} }
} }
// Check if executables ipmitool or ipmisensors are found
p, err := exec.LookPath(m.config.IpmitoolPath) p, err := exec.LookPath(m.config.IpmitoolPath)
if err == nil { if err == nil {
command := exec.Command(p) m.ipmitool = p
err := command.Run()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
m.ipmitool = ""
} else {
m.ipmitool = p
}
} }
p, err = exec.LookPath(m.config.IpmisensorsPath) p, err = exec.LookPath(m.config.IpmisensorsPath)
if err == nil { if err == nil {
command := exec.Command(p) m.ipmisensors = p
err := command.Run()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
m.ipmisensors = ""
} else {
m.ipmisensors = p
}
} }
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 { if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
return errors.New("no usable IPMI reader found") return errors.New("no IPMI reader found")
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) { func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMetric) {
// Setup ipmitool command
command := exec.Command(cmd, "sensor") command := exec.Command(cmd, "sensor")
stdout, _ := command.StdoutPipe() command.Wait()
errBuf := new(bytes.Buffer) stdout, err := command.Output()
command.Stderr = errBuf if err != nil {
log.Print(err)
// start command
if err := command.Start(); err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("readIpmiTool(): Failed to start command \"%s\": %v", command.String(), err),
)
return return
} }
// Read command output ll := strings.Split(string(stdout), "\n")
scanner := bufio.NewScanner(stdout)
for scanner.Scan() { for _, line := range ll {
lv := strings.Split(scanner.Text(), "|") lv := strings.Split(line, "|")
if len(lv) < 3 { if len(lv) < 3 {
continue continue
} }
v, err := strconv.ParseFloat(strings.TrimSpace(lv[1]), 64) v, err := strconv.ParseFloat(strings.Trim(lv[1], " "), 64)
if err == nil { if err == nil {
name := strings.ToLower(strings.Replace(strings.TrimSpace(lv[0]), " ", "_", -1)) name := strings.ToLower(strings.Replace(strings.Trim(lv[0], " "), " ", "_", -1))
unit := strings.TrimSpace(lv[2]) unit := strings.Trim(lv[2], " ")
if unit == "Volts" { if unit == "Volts" {
unit = "Volts" unit = "Volts"
} else if unit == "degrees C" { } else if unit == "degrees C" {
@@ -121,27 +93,17 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
unit = "Watts" unit = "Watts"
} }
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now()) y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
if err == nil { if err == nil {
y.AddMeta("unit", unit) y.AddMeta("unit", unit)
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
// Wait for command end
if err := command.Wait(); err != nil {
errMsg, _ := io.ReadAll(errBuf)
cclog.ComponentError(
m.name,
fmt.Sprintf("readIpmiTool(): Failed to wait for the end of command \"%s\": %v\n", command.String(), err),
)
cclog.ComponentError(m.name, fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", strings.TrimSpace(string(errMsg))))
return
}
} }
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) { func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMetric) {
command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate") command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate")
command.Wait() command.Wait()
@@ -159,30 +121,32 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
v, err := strconv.ParseFloat(lv[3], 64) v, err := strconv.ParseFloat(lv[3], 64)
if err == nil { if err == nil {
name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1)) name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1))
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now()) y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
if err == nil { if err == nil {
if len(lv) > 4 { if len(lv) > 4 {
y.AddMeta("unit", lv[4]) y.AddMeta("unit", lv[4])
} }
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
} }
} }
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
}
if len(m.config.IpmitoolPath) > 0 { if len(m.config.IpmitoolPath) > 0 {
m.readIpmiTool(m.config.IpmitoolPath, output) _, err := os.Stat(m.config.IpmitoolPath)
if err == nil {
m.readIpmiTool(m.config.IpmitoolPath, output)
}
} else if len(m.config.IpmisensorsPath) > 0 { } else if len(m.config.IpmisensorsPath) > 0 {
m.readIpmiSensors(m.config.IpmisensorsPath, output) _, err := os.Stat(m.config.IpmisensorsPath)
if err == nil {
m.readIpmiSensors(m.config.IpmisensorsPath, output)
}
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *IpmiCollector) Close() { func (m *IpmiCollector) Close() {

View File

@@ -8,6 +8,9 @@
} }
``` ```
The `ipmistat` collector reads data from `ipmitool` (`ipmitool sensor`) or `ipmi-sensors` (`ipmi-sensors --sdr-cache-recreate --comma-separated-output`). The `ipmistat` collector reads data from `ipmitool` (`ipmitool sensor`) or `ipmi-sensors` (`ipmi-sensors --sdr-cache-recreate --comma-separated-output`).
The metrics depend on the output of the underlying tools but contain temperature, power and energy metrics. The metrics depend on the output of the underlying tools but contain temperature, power and energy metrics.

File diff suppressed because it is too large Load Diff

View File

@@ -3,77 +3,37 @@
The `likwid` collector is probably the most complicated collector. The LIKWID library is included as static library with *direct* access mode. The *direct* access mode is suitable if the daemon is executed by a root user. The static library does not contain the performance groups, so all information needs to be provided in the configuration. The `likwid` collector is probably the most complicated collector. The LIKWID library is included as static library with *direct* access mode. The *direct* access mode is suitable if the daemon is executed by a root user. The static library does not contain the performance groups, so all information needs to be provided in the configuration.
```json The `likwid` configuration consists of two parts, the "eventsets" and "globalmetrics":
"likwid": { - An event set list itself has two parts, the "events" and a set of derivable "metrics". Each of the "events" is a counter:event pair in LIKWID's syntax. The "metrics" are a list of formulas to derive the metric value from the measurements of the "events". Each metric has a name, the formula, a scope and a publish flag. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. The scope tells the Collector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the collector whether a metric should be sent to the router.
"force_overwrite" : false, - The global metrics are metrics which require data from all event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a scope and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. So, the idea is to derive a metric in the "eventsets" section and reuse it in the "globalmetrics" part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`publish=false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
"invalid_to_zero" : false,
"liblikwid_path" : "/path/to/liblikwid.so",
"accessdaemon_path" : "/folder/that/contains/likwid-accessD",
"access_mode" : "direct or accessdaemon or perf_event",
"lockfile_path" : "/var/run/likwid.lock",
"eventsets": [
{
"events" : {
"COUNTER0": "EVENT0",
"COUNTER1": "EVENT1"
},
"metrics" : [
{
"name": "sum_01",
"calc": "COUNTER0 + COUNTER1",
"publish": false,
"unit": "myunit",
"type": "hwthread"
}
]
}
],
"globalmetrics" : [
{
"name": "global_sum",
"calc": "sum_01",
"publish": true,
"unit": "myunit",
"type": "hwthread"
}
]
}
```
The `likwid` configuration consists of two parts, the `eventsets` and `globalmetrics`:
- An event set list itself has two parts, the `events` and a set of derivable `metrics`. Each of the `events` is a `counter:event` pair in LIKWID's syntax. The `metrics` are a list of formulas to derive the metric value from the measurements of the `events`' values. Each metric has a name, the formula, a type and a publish flag. There is an optional `unit` field. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. You can optionally use `time` for the measurement time and `inverseClock` for `1.0/baseCpuFrequency`. The type tells the LikwidCollector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the LikwidCollector whether a metric should be sent to the router or is only used internally to compute a global metric.
- The `globalmetrics` are metrics which require data from multiple event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a type and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. Also `time` and `inverseClock` cannot be used anymore. So, the idea is to derive a metric in the `eventsets` section and reuse it in the `globalmetrics` part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`"publish": false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
Additional options: Additional options:
- `access_mode` : Method to use for hardware performance monitoring (`direct` access as root user, `accessdaemon` for the daemon mode)
- `accessdaemon_path`: Folder with the access daemon `likwid-accessD`, commonly `$LIKWID_INSTALL_LOC/sbin`
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements - `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`. See below in [seperate section](./likwidMetric.md#invalid_to_zero-option) - `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`.
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested. - `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`) - `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD`
- `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so` - `liblikwid_path`: Location of `liblikwid.so`
- `lockfile_path`: Location of LIKWID's lock file if multiple tools should access the hardware counters. Default `/var/run/likwid.lock`
### Available metric types ### Available metric scopes
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the LikwidCollector provides the specification of a `type` for each metric. Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the collector provides the specification of a 'scope' for each metric.
- `hwthread` : One metric per CPU hardware thread with the tags `"type" : "hwthread"` and `"type-id" : "$hwthread_id"` - `cpu` : One metric per CPU hardware thread with the tags `"type" : "cpu"` and `"type-id" : "$cpu_id"`
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"` - `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
**Note:** You cannot specify `socket` type for a metric that is measured at `hwthread` type, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the type of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific. **Note:** You cannot specify `socket` scope for a metric that is measured at `cpu` scope, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the scope of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
As a guideline: As a guideline:
- All counters `FIXCx`, `PMCy` and `TMAz` have the scope `cpu`
- All counters `FIXCx`, `PMCy` and `TMAz` have the type `hwthread` - All counters names containing `BOX` have the scope `socket`
- All counters names containing `BOX` have the type `socket` - All `PWRx` counters have scope `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `cpu` scope
- All `PWRx` counters have type `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `hwthread` type - All `DFCx` counters have scope `socket`
- All `DFCx` counters have type `socket`
### Help with the configuration ### Help with the configuration
The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`): The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`):
``` ```
$ likwid-perfctr -i $ likwid-perfctr -i
[...] [...]
@@ -90,7 +50,6 @@ $ scripts/likwid_perfgroup_to_cc_config.py ICX MEM_DP
{ {
"events": { "events": {
"FIXC0": "INSTR_RETIRED_ANY", "FIXC0": "INSTR_RETIRED_ANY",
"FIXC1": "CPU_CLK_UNHALTED_CORE",
"..." : "..." "..." : "..."
}, },
"metrics" : [ "metrics" : [
@@ -99,7 +58,7 @@ $ scripts/likwid_perfgroup_to_cc_config.py ICX MEM_DP
"name": "Runtime (RDTSC) [s]", "name": "Runtime (RDTSC) [s]",
"publish": true, "publish": true,
"unit": "seconds" "unit": "seconds"
"type": "hwthread" "scope": "cpu"
}, },
{ {
"..." : "..." "..." : "..."
@@ -115,40 +74,22 @@ You can copy this JSON and add it to the `eventsets` list. If you specify multip
LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job): LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job):
Before (SLURM prolog, ...) Before (SLURM prolog, ...)
```
```bash $ chwon $JOBUSER /var/run/likwid.lock
chown $JOBUSER /var/run/likwid.lock
``` ```
After (SLURM epilog, ...) After (SLURM epilog, ...)
```bash
chown $CCUSER /var/run/likwid.lock
``` ```
$ chwon $CCUSER /var/run/likwid.lock
### `invalid_to_zero` option ```
In some cases LIKWID returns `0.0` for some events that are further used in processing and maybe used as divisor in a calculation. After evaluation of a metric, the result might be `NaN` or `+-Inf`. These resulting metrics are commonly not created and forwarded to the router because the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#float) does not support these special floating-point values. If you want to have them sent, this option forces these metric values to be `0.0` instead.
One might think this does not happen often but often used metrics in the world of performance engineering like Instructions-per-Cycle (IPC) or more frequently the actual CPU clock are derived with events like `CPU_CLK_UNHALTED_CORE` (Intel) which do not increment in halted state (as the name implies). In there are different power management systems in a chip which can cause a hardware thread to go in such a state. Moreover, if no cycles are executed by the core, also many other events are not incremented as well (like `INSTR_RETIRED_ANY` for retired instructions and part of IPC).
### `lockfile_path` option
LIKWID can be configured with a lock file with which the access to the performance monitoring registers can be disabled (only the owner of the lock file is allowed to access the registers). When the `lockfile_path` option is set, the collector subscribes to changes to this file to stop monitoring if the owner of the lock file changes. This feature is useful when users should be able to perform own hardware performance counter measurements through LIKWID or any other tool.
### `send_*_total values` option
- `send_core_total_values`: Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU core.
- `send_socket_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU socket.
- `send_node_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per node.
### Example configuration ### Example configuration
#### AMD Zen3
```json ```json
"likwid": { "likwid": {
"force_overwrite" : false, "force_overwrite" : false,
"invalid_to_zero" : false, "nan_to_zero" : false,
"eventsets": [ "eventsets": [
{ {
"events": { "events": {
@@ -167,20 +108,20 @@ LIKWID can be configured with a lock file with which the access to the performan
{ {
"name": "ipc", "name": "ipc",
"calc": "PMC0/PMC1", "calc": "PMC0/PMC1",
"type": "hwthread", "type": "cpu",
"publish": true "publish": true
}, },
{ {
"name": "flops_any", "name": "flops_any",
"calc": "0.000001*PMC2/time", "calc": "0.000001*PMC2/time",
"unit": "MFlops/s", "unit": "MFlops/s",
"type": "hwthread", "type": "cpu",
"publish": true "publish": true
}, },
{ {
"name": "clock", "name": "clock",
"calc": "0.000001*(FIXC1/FIXC2)/inverseClock", "calc": "0.000001*(FIXC1/FIXC2)/inverseClock",
"type": "hwthread", "type": "cpu",
"unit": "MHz", "unit": "MHz",
"publish": true "publish": true
}, },
@@ -241,10 +182,9 @@ LIKWID can be configured with a lock file with which the access to the performan
### How to get the eventsets and metrics from LIKWID ### How to get the eventsets and metrics from LIKWID
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility. The `likwid` collector reads hardware performance counters at a **cpu** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference: The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
``` ```
EVENTSET -> "events": { EVENTSET -> "events": {
FIXC1 ACTUAL_CPU_CLOCK -> "FIXC1": "ACTUAL_CPU_CLOCK", FIXC1 ACTUAL_CPU_CLOCK -> "FIXC1": "ACTUAL_CPU_CLOCK",
@@ -263,10 +203,10 @@ METRICS -> "metrics": [
IPC PMC0/PMC1 -> { IPC PMC0/PMC1 -> {
-> "name" : "IPC", -> "name" : "IPC",
-> "calc" : "PMC0/PMC1", -> "calc" : "PMC0/PMC1",
-> "type": "hwthread", -> "scope": "cpu",
-> "publish": true -> "publish": true
-> } -> }
-> ] -> ]
``` ```
The script `scripts/likwid_perfgroup_to_cc_config.py` might help you. The script `scripts/likwid_perfgroup_to_cc_config.py` might help you.

View File

@@ -3,21 +3,24 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "io/ioutil"
"strconv" "strconv"
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
//
// LoadavgCollector collects: // LoadavgCollector collects:
// * load average of last 1, 5 & 15 minutes // * load average of last 1, 5 & 15 minutes
// * number of processes currently runnable // * number of processes currently runnable
// * total number of processes in system // * total number of processes in system
// //
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html // See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
//
const LOADAVGFILE = "/proc/loadavg" const LOADAVGFILE = "/proc/loadavg"
type LoadavgCollector struct { type LoadavgCollector struct {
@@ -30,11 +33,11 @@ type LoadavgCollector struct {
config struct { config struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"` ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
} }
statsProcessedMetrics int64
} }
func (m *LoadavgCollector) Init(config json.RawMessage) error { func (m *LoadavgCollector) Init(config json.RawMessage) error {
m.name = "LoadavgCollector" m.name = "LoadavgCollector"
m.parallel = true
m.setup() m.setup()
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
@@ -62,19 +65,22 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
for i, name := range m.proc_matches { for i, name := range m.proc_matches {
_, m.proc_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name) _, m.proc_skips[i] = stringArrayContains(m.config.ExcludeMetrics, name)
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
buffer, err := os.ReadFile(LOADAVGFILE) buffer, err := ioutil.ReadFile(LOADAVGFILE)
if err != nil { if err != nil {
cclog.ComponentError( if err != nil {
m.name, cclog.ComponentError(
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err)) m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
}
return return
} }
now := time.Now() now := time.Now()
@@ -92,9 +98,10 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
if m.load_skips[i] { if m.load_skips[i] {
continue continue
} }
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now) y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
@@ -111,12 +118,13 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
if m.proc_skips[i] { if m.proc_skips[i] {
continue continue
} }
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now) y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *LoadavgCollector) Close() { func (m *LoadavgCollector) Close() {

View File

@@ -10,8 +10,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const LUSTRE_SYSFS = `/sys/fs/lustre` const LUSTRE_SYSFS = `/sys/fs/lustre`
@@ -37,13 +38,14 @@ type LustreMetricDefinition struct {
type LustreCollector struct { type LustreCollector struct {
metricCollector metricCollector
tags map[string]string tags map[string]string
config LustreCollectorConfig config LustreCollectorConfig
lctl string lctl string
sudoCmd string sudoCmd string
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
definitions []LustreMetricDefinition // Combined list without excluded metrics definitions []LustreMetricDefinition // Combined list without excluded metrics
stats map[string]map[string]int64 // Data for last value per device and metric stats map[string]map[string]int64 // Data for last value per device and metric
statsProcessedMetrics int64
} }
func (m *LustreCollector) getDeviceDataCommand(device string) []string { func (m *LustreCollector) getDeviceDataCommand(device string) []string {
@@ -101,7 +103,7 @@ func getMetricData(lines []string, prefix string, offset int) (int64, error) {
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite") // llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
// devdir := filepath.Join(llitedir, device) // devdir := filepath.Join(llitedir, device)
// statsfile := filepath.Join(devdir, "stats") // statsfile := filepath.Join(devdir, "stats")
// buffer, err := os.ReadFile(statsfile) // buffer, err := ioutil.ReadFile(statsfile)
// if err != nil { // if err != nil {
// return make([]string, 0) // return make([]string, 0)
// } // }
@@ -288,7 +290,6 @@ var LustreDeriveMetrics = []LustreMetricDefinition{
func (m *LustreCollector) Init(config json.RawMessage) error { func (m *LustreCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "LustreCollector" m.name = "LustreCollector"
m.parallel = true
if len(config) > 0 { if len(config) > 0 {
err = json.Unmarshal(config, &m.config) err = json.Unmarshal(config, &m.config)
if err != nil { if err != nil {
@@ -373,11 +374,12 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
} }
} }
m.lastTimestamp = time.Now() m.lastTimestamp = time.Now()
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -388,7 +390,7 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
for _, def := range m.definitions { for _, def := range m.definitions {
var use_x int64 var use_x int64
var err error var err error
var y lp.CCMessage var y lp.CCMetric
x, err := getMetricData(data, def.lineprefix, def.lineoffset) x, err := getMetricData(data, def.lineprefix, def.lineoffset)
if err == nil { if err == nil {
use_x = x use_x = x
@@ -399,19 +401,19 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
switch def.calc { switch def.calc {
case "none": case "none":
value = use_x value = use_x
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "difference": case "difference":
value = use_x - devData[def.name] value = use_x - devData[def.name]
if value.(int64) < 0 { if value.(int64) < 0 {
value = 0 value = 0
} }
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "derivative": case "derivative":
value = float64(use_x-devData[def.name]) / tdiff.Seconds() value = float64(use_x-devData[def.name]) / tdiff.Seconds()
if value.(float64) < 0 { if value.(float64) < 0 {
value = 0 value = 0
} }
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
} }
if err == nil { if err == nil {
y.AddTag("device", device) y.AddTag("device", device)
@@ -419,11 +421,13 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
y.AddMeta("unit", def.unit) y.AddMeta("unit", def.unit)
} }
output <- y output <- y
m.statsProcessedMetrics++
} }
devData[def.name] = use_x devData[def.name] = use_x
} }
} }
m.lastTimestamp = now m.lastTimestamp = now
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *LustreCollector) Close() { func (m *LustreCollector) Close() {

View File

@@ -12,8 +12,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const MEMSTATFILE = "/proc/meminfo" const MEMSTATFILE = "/proc/meminfo"
@@ -32,12 +33,13 @@ type MemstatCollectorNode struct {
type MemstatCollector struct { type MemstatCollector struct {
metricCollector metricCollector
stats map[string]int64 stats map[string]int64
tags map[string]string tags map[string]string
matches map[string]string matches map[string]string
config MemstatCollectorConfig config MemstatCollectorConfig
nodefiles map[int]MemstatCollectorNode nodefiles map[int]MemstatCollectorNode
sendMemUsed bool sendMemUsed bool
statsProcessedMetrics int64
} }
type MemstatStats struct { type MemstatStats struct {
@@ -68,8 +70,7 @@ func getStats(filename string) map[string]MemstatStats {
} else if len(linefields) == 5 { } else if len(linefields) == 5 {
v, err := strconv.ParseFloat(linefields[3], 64) v, err := strconv.ParseFloat(linefields[3], 64)
if err == nil { if err == nil {
cclog.ComponentDebug("getStats", strings.Trim(linefields[2], ":"), v, linefields[4]) stats[strings.Trim(linefields[0], ":")] = MemstatStats{
stats[strings.Trim(linefields[2], ":")] = MemstatStats{
value: v, value: v,
unit: linefields[4], unit: linefields[4],
} }
@@ -82,7 +83,6 @@ func getStats(filename string) map[string]MemstatStats {
func (m *MemstatCollector) Init(config json.RawMessage) error { func (m *MemstatCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "MemstatCollector" m.name = "MemstatCollector"
m.parallel = true
m.config.NodeStats = true m.config.NodeStats = true
m.config.NumaStats = false m.config.NumaStats = false
if len(config) > 0 { if len(config) > 0 {
@@ -155,11 +155,12 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
} }
} }
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return err return err
} }
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -175,11 +176,12 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
} }
} }
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now()) y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil { if err == nil {
if len(unit) > 0 { if len(unit) > 0 {
y.AddMeta("unit", unit) y.AddMeta("unit", unit)
} }
m.statsProcessedMetrics++
output <- y output <- y
} }
} }
@@ -188,31 +190,28 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
unit := "" unit := ""
if totalVal, total := stats["MemTotal"]; total { if totalVal, total := stats["MemTotal"]; total {
if freeVal, free := stats["MemFree"]; free { if freeVal, free := stats["MemFree"]; free {
memUsed = totalVal.value - freeVal.value
if len(totalVal.unit) > 0 {
unit = totalVal.unit
} else if len(freeVal.unit) > 0 {
unit = freeVal.unit
}
if bufVal, buffers := stats["Buffers"]; buffers { if bufVal, buffers := stats["Buffers"]; buffers {
memUsed -= bufVal.value
if len(bufVal.unit) > 0 && len(unit) == 0 {
unit = bufVal.unit
}
if cacheVal, cached := stats["Cached"]; cached { if cacheVal, cached := stats["Cached"]; cached {
memUsed -= cacheVal.value memUsed = totalVal.value - (freeVal.value + bufVal.value + cacheVal.value)
if len(cacheVal.unit) > 0 && len(unit) == 0 { if len(totalVal.unit) > 0 {
unit = totalVal.unit
} else if len(freeVal.unit) > 0 {
unit = freeVal.unit
} else if len(bufVal.unit) > 0 {
unit = bufVal.unit
} else if len(cacheVal.unit) > 0 {
unit = cacheVal.unit unit = cacheVal.unit
} }
} }
} }
} }
} }
y, err := lp.NewMessage("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now()) y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
if err == nil { if err == nil {
if len(unit) > 0 { if len(unit) > 0 {
y.AddMeta("unit", unit) y.AddMeta("unit", unit)
} }
m.statsProcessedMetrics++
output <- y output <- y
} }
} }
@@ -229,6 +228,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
sendStats(stats, nodeConf.tags) sendStats(stats, nodeConf.tags)
} }
} }
stats.ComponentStatInt(m.name, "collected_metrics", m.statsProcessedMetrics)
} }
func (m *MemstatCollector) Close() { func (m *MemstatCollector) Close() {

View File

@@ -3,25 +3,27 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"log"
"strconv"
"strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
) )
type MetricCollector interface { type MetricCollector interface {
Name() string // Name of the metric collector Name() string // Name of the metric collector
Init(config json.RawMessage) error // Initialize metric collector Init(config json.RawMessage) error // Initialize metric collector
Initialized() bool // Is metric collector initialized? Initialized() bool // Is metric collector initialized?
Parallel() bool Read(duration time.Duration, output chan lp.CCMetric) // Read metrics from metric collector
Read(duration time.Duration, output chan lp.CCMessage) // Read metrics from metric collector
Close() // Close / finish metric collector Close() // Close / finish metric collector
} }
type metricCollector struct { type metricCollector struct {
name string // name of the metric name string // name of the metric
init bool // is metric collector initialized? init bool // is metric collector initialized?
parallel bool // can the metric collector be executed in parallel with others meta map[string]string // static meta data tags
meta map[string]string // static meta data tags
} }
// Name returns the name of the metric collector // Name returns the name of the metric collector
@@ -29,11 +31,6 @@ func (c *metricCollector) Name() string {
return c.name return c.name
} }
// Name returns the name of the metric collector
func (c *metricCollector) Parallel() bool {
return c.parallel
}
// Setup is for future use // Setup is for future use
func (c *metricCollector) setup() error { func (c *metricCollector) setup() error {
return nil return nil
@@ -68,6 +65,58 @@ func stringArrayContains(array []string, str string) (int, bool) {
return -1, false return -1, false
} }
// SocketList returns the list of physical sockets as read from /proc/cpuinfo
func SocketList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
var packs []int
for _, line := range ll {
if strings.HasPrefix(line, "physical id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return packs
}
_, found := intArrayContains(packs, int(id))
if !found {
packs = append(packs, int(id))
}
}
}
return packs
}
// CpuList returns the list of physical CPUs (in contrast to logical CPUs) as read from /proc/cpuinfo
func CpuList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
var cpulist []int
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[2], 10, 32)
if err != nil {
log.Print(err)
return cpulist
}
_, found := intArrayContains(cpulist, int(id))
if !found {
cpulist = append(cpulist, int(id))
}
}
}
return cpulist
}
// RemoveFromStringList removes the string r from the array of strings s // RemoveFromStringList removes the string r from the array of strings s
// If r is not contained in the array an error is returned // If r is not contained in the array an error is returned
func RemoveFromStringList(s []string, r string) ([]string, error) { func RemoveFromStringList(s []string, r string) ([]string, error) {

View File

@@ -9,8 +9,9 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const NETSTATFILE = "/proc/net/dev" const NETSTATFILE = "/proc/net/dev"
@@ -32,14 +33,14 @@ type NetstatCollectorMetric struct {
type NetstatCollector struct { type NetstatCollector struct {
metricCollector metricCollector
config NetstatCollectorConfig config NetstatCollectorConfig
matches map[string][]NetstatCollectorMetric matches map[string][]NetstatCollectorMetric
lastTimestamp time.Time lastTimestamp time.Time
statsProcessedMetrics int64
} }
func (m *NetstatCollector) Init(config json.RawMessage) error { func (m *NetstatCollector) Init(config json.RawMessage) error {
m.name = "NetstatCollector" m.name = "NetstatCollector"
m.parallel = true
m.setup() m.setup()
m.lastTimestamp = time.Now() m.lastTimestamp = time.Now()
@@ -102,7 +103,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
// Check if device is a included device // Check if device is a included device
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok { if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
tags := map[string]string{"stype": "network", "stype-id": dev, "type": "node"} tags := map[string]string{"device": dev, "type": "node"}
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"} meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"} meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"} meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
@@ -149,11 +150,12 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
if len(m.matches) == 0 { if len(m.matches) == 0 {
return errors.New("no devices to collector metrics found") return errors.New("no devices to collector metrics found")
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -197,15 +199,17 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
continue continue
} }
if m.config.SendAbsoluteValues { if m.config.SendAbsoluteValues {
if y, err := lp.NewMessage(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil { if y, err := lp.New(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
if m.config.SendDerivedValues { if m.config.SendDerivedValues {
if metric.lastValue >= 0 { if metric.lastValue >= 0 {
rate := float64(v-metric.lastValue) / timeDiff rate := float64(v-metric.lastValue) / timeDiff
if y, err := lp.NewMessage(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil { if y, err := lp.New(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
metric.lastValue = v metric.lastValue = v
@@ -213,6 +217,7 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
} }
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *NetstatCollector) Close() { func (m *NetstatCollector) Close() {

View File

@@ -23,5 +23,5 @@ Metrics:
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`) * `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`) * `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
The device name is added as tag `stype=network,stype-id=<device>`. The device name is added as tag `device`.

View File

@@ -11,7 +11,8 @@ import (
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// First part contains the code for the general NfsCollector. // First part contains the code for the general NfsCollector.
@@ -32,7 +33,8 @@ type nfsCollector struct {
Nfsstats string `json:"nfsstat"` Nfsstats string `json:"nfsstat"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"` ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
} }
data map[string]NfsCollectorData data map[string]NfsCollectorData
statsProcessedMetrics int64
} }
func (m *nfsCollector) initStats() error { func (m *nfsCollector) initStats() error {
@@ -113,12 +115,12 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
} }
m.data = make(map[string]NfsCollectorData) m.data = make(map[string]NfsCollectorData)
m.initStats() m.initStats()
m.statsProcessedMetrics = 0
m.init = true m.init = true
m.parallel = true
return nil return nil
} }
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -140,12 +142,14 @@ func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
continue continue
} }
value := data.current - data.last value := data.current - data.last
y, err := lp.NewMessage(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp) y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil { if err == nil {
y.AddMeta("version", m.version) y.AddMeta("version", m.version)
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *nfsCollector) Close() { func (m *nfsCollector) Close() {

View File

@@ -1,166 +0,0 @@
package collectors
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
)
// These are the fields we read from the JSON configuration
type NfsIOStatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
UseServerAddressAsSType bool `json:"use_server_as_stype,omitempty"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type NfsIOStatCollector struct {
metricCollector
config NfsIOStatCollectorConfig // the configuration structure
meta map[string]string // default meta information
tags map[string]string // default tags
data map[string]map[string]int64 // data storage for difference calculation
key string // which device info should be used as subtype ID? 'server' or 'mntpoint', see NfsIOStatCollectorConfig.UseServerAddressAsSType
}
var deviceRegex = regexp.MustCompile(`device (?P<server>[^ ]+) mounted on (?P<mntpoint>[^ ]+) with fstype nfs(?P<version>\d*) statvers=[\d\.]+`)
var bytesRegex = regexp.MustCompile(`\s+bytes:\s+(?P<nread>[^ ]+) (?P<nwrite>[^ ]+) (?P<dread>[^ ]+) (?P<dwrite>[^ ]+) (?P<nfsread>[^ ]+) (?P<nfswrite>[^ ]+) (?P<pageread>[^ ]+) (?P<pagewrite>[^ ]+)`)
func resolve_regex_fields(s string, regex *regexp.Regexp) map[string]string {
fields := make(map[string]string)
groups := regex.SubexpNames()
for _, match := range regex.FindAllStringSubmatch(s, -1) {
for groupIdx, group := range match {
if len(groups[groupIdx]) > 0 {
fields[groups[groupIdx]] = group
}
}
}
return fields
}
func (m *NfsIOStatCollector) readNfsiostats() map[string]map[string]int64 {
data := make(map[string]map[string]int64)
filename := "/proc/self/mountstats"
stats, err := os.ReadFile(filename)
if err != nil {
return data
}
lines := strings.Split(string(stats), "\n")
var current map[string]string = nil
for _, l := range lines {
// Is this a device line with mount point, remote target and NFS version?
dev := resolve_regex_fields(l, deviceRegex)
if len(dev) > 0 {
if _, ok := stringArrayContains(m.config.ExcludeFilesystem, dev[m.key]); !ok {
current = dev
if len(current["version"]) == 0 {
current["version"] = "3"
}
}
}
if len(current) > 0 {
// Byte line parsing (if found the device for it)
bytes := resolve_regex_fields(l, bytesRegex)
if len(bytes) > 0 {
data[current[m.key]] = make(map[string]int64)
for name, sval := range bytes {
if _, ok := stringArrayContains(m.config.ExcludeMetrics, name); !ok {
val, err := strconv.ParseInt(sval, 10, 64)
if err == nil {
data[current[m.key]][name] = val
}
}
}
current = nil
}
}
}
return data
}
func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
var err error = nil
m.name = "NfsIOStatCollector"
m.setup()
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"}
m.tags = map[string]string{"type": "node"}
m.config.UseServerAddressAsSType = false
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
m.key = "mntpoint"
if m.config.UseServerAddressAsSType {
m.key = "server"
}
m.data = m.readNfsiostats()
m.init = true
return err
}
func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
timestamp := time.Now()
// Get the current values for all mountpoints
newdata := m.readNfsiostats()
for mntpoint, values := range newdata {
// Was the mount point already present in the last iteration
if old, ok := m.data[mntpoint]; ok {
// Calculate the difference of old and new values
for i := range values {
x := values[i] - old[i]
y, err := lp.NewMessage(fmt.Sprintf("nfsio_%s", i), m.tags, m.meta, map[string]interface{}{"value": x}, timestamp)
if err == nil {
if strings.HasPrefix(i, "page") {
y.AddMeta("unit", "4K_Pages")
}
y.AddTag("stype", "filesystem")
y.AddTag("stype-id", mntpoint)
// Send it to output channel
output <- y
}
// Update old to the new value for the next iteration
old[i] = values[i]
}
} else {
// First time we see this mount point, store all values
m.data[mntpoint] = values
}
}
// Reset entries that do not exist anymore
for mntpoint := range m.data {
found := false
for new := range newdata {
if new == mntpoint {
found = true
break
}
}
if !found {
m.data[mntpoint] = nil
}
}
}
func (m *NfsIOStatCollector) Close() {
// Unset flag
m.init = false
}

View File

@@ -1,27 +0,0 @@
## `nfsiostat` collector
```json
"nfsiostat": {
"exclude_metrics": [
"nfsio_oread"
],
"exclude_filesystems" : [
"/mnt",
],
"use_server_as_stype": false
}
```
The `nfsiostat` collector reads data from `/proc/self/mountstats` and outputs a handful **node** metrics for each NFS filesystem. If a metric or filesystem is not required, it can be excluded from forwarding it to the sink.
Metrics:
* `nfsio_nread`: Bytes transferred by normal `read()` calls
* `nfsio_nwrite`: Bytes transferred by normal `write()` calls
* `nfsio_oread`: Bytes transferred by `read()` calls with `O_DIRECT`
* `nfsio_owrite`: Bytes transferred by `write()` calls with `O_DIRECT`
* `nfsio_pageread`: Pages transferred by `read()` calls
* `nfsio_pagewrite`: Pages transferred by `write()` calls
* `nfsio_nfsread`: Bytes transferred for reading from the server
* `nfsio_nfswrite`: Pages transferred by writing to the server
The `nfsiostat` collector adds the mountpoint to the tags as `stype=filesystem,stype-id=<mountpoint>`. If the server address should be used instead of the mountpoint, use the `use_server_as_stype` config setting.

View File

@@ -10,42 +10,34 @@ import (
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// Non-Uniform Memory Access (NUMA) policy hit/miss statistics //
// Numa policy hit/miss statistics
// //
// numa_hit: // numa_hit:
// // A process wanted to allocate memory from this node, and succeeded.
// A process wanted to allocate memory from this node, and succeeded.
//
// numa_miss: // numa_miss:
// // A process wanted to allocate memory from another node,
// A process wanted to allocate memory from another node, // but ended up with memory from this node.
// but ended up with memory from this node.
//
// numa_foreign: // numa_foreign:
// // A process wanted to allocate on this node,
// A process wanted to allocate on this node, // but ended up with memory from another node.
// but ended up with memory from another node.
//
// local_node: // local_node:
// // A process ran on this node's CPU,
// A process ran on this node's CPU, // and got memory from this node.
// and got memory from this node.
//
// other_node: // other_node:
// // A process ran on a different node's CPU
// A process ran on a different node's CPU // and got memory from this node.
// and got memory from this node.
//
// interleave_hit: // interleave_hit:
// // Interleaving wanted to allocate from this node
// Interleaving wanted to allocate from this node // and succeeded.
// and succeeded.
// //
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html // See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
//
type NUMAStatsCollectorTopolgy struct { type NUMAStatsCollectorTopolgy struct {
file string file string
tagSet map[string]string tagSet map[string]string
@@ -53,7 +45,8 @@ type NUMAStatsCollectorTopolgy struct {
type NUMAStatsCollector struct { type NUMAStatsCollector struct {
metricCollector metricCollector
topology []NUMAStatsCollectorTopolgy topology []NUMAStatsCollectorTopolgy
statsProcessedMetrics int64
} }
func (m *NUMAStatsCollector) Init(config json.RawMessage) error { func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
@@ -63,7 +56,6 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
} }
m.name = "NUMAStatsCollector" m.name = "NUMAStatsCollector"
m.parallel = true
m.setup() m.setup()
m.meta = map[string]string{ m.meta = map[string]string{
"source": m.name, "source": m.name,
@@ -90,14 +82,12 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
tagSet: map[string]string{"memoryDomain": node}, tagSet: map[string]string{"memoryDomain": node},
}) })
} }
m.statsProcessedMetrics = 0
// Initialized
cclog.ComponentDebug(m.name, "initialized", len(m.topology), "NUMA domains")
m.init = true m.init = true
return nil return nil
} }
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -130,7 +120,7 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err)) fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
continue continue
} }
y, err := lp.NewMessage( y, err := lp.New(
"numastats_"+key, "numastats_"+key,
t.tagSet, t.tagSet,
m.meta, m.meta,
@@ -139,11 +129,13 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
) )
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
file.Close() file.Close()
} }
stats.ComponentStatInt(m.name, "collected_metrics", m.statsProcessedMetrics)
} }
func (m *NUMAStatsCollector) Close() { func (m *NUMAStatsCollector) Close() {

View File

@@ -1,17 +1,15 @@
## `numastat` collector ## `numastat` collector
```json ```json
"numastats": {} "numastat": {}
``` ```
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: <https://www.kernel.org/doc/html/latest/admin-guide/numastat.html> The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
Metrics: Metrics:
* `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded. * `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded.
* `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node. * `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node.
* `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node. * `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node.
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node. * `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node. * `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded. * `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.

File diff suppressed because it is too large Load Diff

View File

@@ -3,74 +3,38 @@
```json ```json
"nvidia": { "nvidia": {
"exclude_devices": [ "exclude_devices" : [
"0","1", "0000000:ff:01.0" "0","1"
], ],
"exclude_metrics": [ "exclude_metrics": [
"nv_fb_mem_used", "nv_fb_memory",
"nv_fan" "nv_fan"
], ]
"process_mig_devices": false,
"use_pci_info_as_type_id": true,
"add_pci_info_tag": false,
"add_uuid_meta": false,
"add_board_number_meta": false,
"add_serial_meta": false,
"use_uuid_for_mig_device": false,
"use_slice_for_mig_device": false
} }
``` ```
The `nvidia` collector can be configured to leave out specific devices with the `exclude_devices` option. It takes IDs as supplied to the NVML with `nvmlDeviceGetHandleByIndex()` or the PCI address in NVML format (`%08X:%02X:%02X.0`). Metrics (listed below) that should not be sent to the MetricRouter can be excluded with the `exclude_metrics` option. Commonly only the physical GPUs are monitored. If MIG devices should be analyzed as well, set `process_mig_devices` (adds `stype=mig,stype-id=<mig_index>`). With the options `use_uuid_for_mig_device` and `use_slice_for_mig_device`, the `<mig_index>` can be replaced with the UUID (e.g. `MIG-6a9f7cc8-6d5b-5ce0-92de-750edc4d8849`) or the MIG slice name (e.g. `1g.5gb`).
The metrics sent by the `nvidia` collector use `accelerator` as `type` tag. For the `type-id`, it uses the device handle index by default. With the `use_pci_info_as_type_id` option, the PCI ID is used instead. If both values should be added as tags, activate the `add_pci_info_tag` option. It uses the device handle index as `type-id` and adds the PCI ID as separate `pci_identifier` tag.
Optionally, it is possible to add the UUID, the board part number and the serial to the meta informations. They are not sent to the sinks (if not configured otherwise).
Metrics: Metrics:
* `nv_util` * `nv_util`
* `nv_mem_util` * `nv_mem_util`
* `nv_fb_mem_total` * `nv_mem_total`
* `nv_fb_mem_used` * `nv_fb_memory`
* `nv_bar1_mem_total`
* `nv_bar1_mem_used`
* `nv_temp` * `nv_temp`
* `nv_fan` * `nv_fan`
* `nv_ecc_mode` * `nv_ecc_mode`
* `nv_perf_state` * `nv_perf_state`
* `nv_power_usage` * `nv_power_usage_report`
* `nv_graphics_clock` * `nv_graphics_clock_report`
* `nv_sm_clock` * `nv_sm_clock_report`
* `nv_mem_clock` * `nv_mem_clock_report`
* `nv_video_clock`
* `nv_max_graphics_clock` * `nv_max_graphics_clock`
* `nv_max_sm_clock` * `nv_max_sm_clock`
* `nv_max_mem_clock` * `nv_max_mem_clock`
* `nv_max_video_clock` * `nv_ecc_db_error`
* `nv_ecc_uncorrected_error` * `nv_ecc_sb_error`
* `nv_ecc_corrected_error` * `nv_power_man_limit`
* `nv_power_max_limit`
* `nv_encoder_util` * `nv_encoder_util`
* `nv_decoder_util` * `nv_decoder_util`
* `nv_remapped_rows_corrected`
* `nv_remapped_rows_uncorrected`
* `nv_remapped_rows_pending`
* `nv_remapped_rows_failure`
* `nv_compute_processes`
* `nv_graphics_processes`
* `nv_violation_power`
* `nv_violation_thermal`
* `nv_violation_sync_boost`
* `nv_violation_board_limit`
* `nv_violation_low_util`
* `nv_violation_reliability`
* `nv_violation_below_app_clock`
* `nv_violation_below_base_clock`
* `nv_nvlink_crc_flit_errors`
* `nv_nvlink_crc_errors`
* `nv_nvlink_ecc_errors`
* `nv_nvlink_replay_errors`
* `nv_nvlink_recovery_errors`
Some metrics add the additional sub type tag (`stype`) like the `nv_nvlink_*` metrics set `stype=nvlink,stype-id=<link_number>`. It uses a separate `type` in the metrics. The output metric looks like this:
`<name>,type=accelerator,type-id=<nvidia-gpu-id> value=<metric value> <timestamp>`

View File

@@ -1,262 +0,0 @@
package collectors
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
)
// running average power limit (RAPL) monitoring attributes for a zone
type RAPLZoneInfo struct {
// tags describing the RAPL zone:
// * zone_name, subzone_name: e.g. psys, dram, core, uncore, package-0
// * zone_id: e.g. 0:1 (zone 0 sub zone 1)
tags map[string]string
energyFilepath string // path to a file containing the zones current energy counter in micro joules
energy int64 // current reading of the energy counter in micro joules
energyTimestamp time.Time // timestamp when energy counter was read
maxEnergyRange int64 // Range of the above energy counter in micro-joules
}
type RAPLCollector struct {
metricCollector
config struct {
// Exclude IDs for RAPL zones, e.g.
// * 0 for zone 0
// * 0:1 for zone 0 subzone 1
ExcludeByID []string `json:"exclude_device_by_id,omitempty"`
// Exclude names for RAPL zones, e.g. psys, dram, core, uncore, package-0
ExcludeByName []string `json:"exclude_device_by_name,omitempty"`
}
RAPLZoneInfo []RAPLZoneInfo
meta map[string]string // default meta information
}
// Init initializes the running average power limit (RAPL) collector
func (m *RAPLCollector) Init(config json.RawMessage) error {
// Check if already initialized
if m.init {
return nil
}
var err error = nil
m.name = "RAPLCollector"
m.setup()
m.parallel = true
m.meta = map[string]string{
"source": m.name,
"group": "energy",
"unit": "Watt",
}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Configure excluded RAPL zones
isIDExcluded := make(map[string]bool)
if m.config.ExcludeByID != nil {
for _, ID := range m.config.ExcludeByID {
isIDExcluded[ID] = true
}
}
isNameExcluded := make(map[string]bool)
if m.config.ExcludeByName != nil {
for _, name := range m.config.ExcludeByName {
isNameExcluded[name] = true
}
}
// readZoneInfo reads RAPL monitoring attributes for a zone given by zonePath
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
readZoneInfo := func(zonePath string) (z struct {
name string // zones name e.g. psys, dram, core, uncore, package-0
energyFilepath string // path to a file containing the zones current energy counter in micro joules
energy int64 // current reading of the energy counter in micro joules
energyTimestamp time.Time // timestamp when energy counter was read
maxEnergyRange int64 // Range of the above energy counter in micro-joules
ok bool // Are all information available?
}) {
// zones name e.g. psys, dram, core, uncore, package-0
foundName := false
if v, err :=
os.ReadFile(
filepath.Join(zonePath, "name")); err == nil {
foundName = true
z.name = strings.TrimSpace(string(v))
}
// path to a file containing the zones current energy counter in micro joules
z.energyFilepath = filepath.Join(zonePath, "energy_uj")
// current reading of the energy counter in micro joules
foundEnergy := false
if v, err := os.ReadFile(z.energyFilepath); err == nil {
// timestamp when energy counter was read
z.energyTimestamp = time.Now()
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
foundEnergy = true
z.energy = i
}
}
// Range of the above energy counter in micro-joules
foundMaxEnergyRange := false
if v, err :=
os.ReadFile(
filepath.Join(zonePath, "max_energy_range_uj")); err == nil {
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
foundMaxEnergyRange = true
z.maxEnergyRange = i
}
}
// Are all information available?
z.ok = foundName && foundEnergy && foundMaxEnergyRange
return
}
powerCapPrefix := "/sys/devices/virtual/powercap"
controlType := "intel-rapl"
controlTypePath := filepath.Join(powerCapPrefix, controlType)
// Find all RAPL zones
zonePrefix := filepath.Join(controlTypePath, controlType+":")
zonesPath, err := filepath.Glob(zonePrefix + "*")
if err != nil || zonesPath == nil {
return fmt.Errorf("unable to find any zones under %s", controlTypePath)
}
for _, zonePath := range zonesPath {
zoneID := strings.TrimPrefix(zonePath, zonePrefix)
z := readZoneInfo(zonePath)
if z.ok &&
!isIDExcluded[zoneID] &&
!isNameExcluded[z.name] {
// Add RAPL monitoring attributes for a zone
m.RAPLZoneInfo =
append(
m.RAPLZoneInfo,
RAPLZoneInfo{
tags: map[string]string{
"id": zoneID,
"zone_name": z.name,
},
energyFilepath: z.energyFilepath,
energy: z.energy,
energyTimestamp: z.energyTimestamp,
maxEnergyRange: z.maxEnergyRange,
})
}
// find all sub zones for the given zone
subZonePrefix := filepath.Join(zonePath, controlType+":"+zoneID+":")
subZonesPath, err := filepath.Glob(subZonePrefix + "*")
if err != nil || subZonesPath == nil {
continue
}
for _, subZonePath := range subZonesPath {
subZoneID := strings.TrimPrefix(subZonePath, subZonePrefix)
sz := readZoneInfo(subZonePath)
if len(zoneID) > 0 && len(z.name) > 0 &&
sz.ok &&
!isIDExcluded[zoneID+":"+subZoneID] &&
!isNameExcluded[sz.name] {
m.RAPLZoneInfo =
append(
m.RAPLZoneInfo,
RAPLZoneInfo{
tags: map[string]string{
"id": zoneID + ":" + subZoneID,
"zone_name": z.name,
"sub_zone_name": sz.name,
},
energyFilepath: sz.energyFilepath,
energy: sz.energy,
energyTimestamp: sz.energyTimestamp,
maxEnergyRange: sz.maxEnergyRange,
})
}
}
}
if m.RAPLZoneInfo == nil {
return fmt.Errorf("no running average power limit (RAPL) device found in %s", controlTypePath)
}
// Initialized
cclog.ComponentDebug(
m.name,
"initialized",
len(m.RAPLZoneInfo),
"zones with running average power limit (RAPL) monitoring attributes")
m.init = true
return err
}
// Read reads running average power limit (RAPL) monitoring attributes for all initialized zones
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMessage) {
for i := range m.RAPLZoneInfo {
p := &m.RAPLZoneInfo[i]
// Read current value of the energy counter in micro joules
if v, err := os.ReadFile(p.energyFilepath); err == nil {
energyTimestamp := time.Now()
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
energy := i
// Compute average power (Δ energy / Δ time)
energyDiff := energy - p.energy
if energyDiff < 0 {
// Handle overflow:
// ( p.maxEnergyRange - p.energy ) + energy
// = p.maxEnergyRange + ( energy - p.energy )
// = p.maxEnergyRange + diffEnergy
energyDiff += p.maxEnergyRange
}
timeDiff := energyTimestamp.Sub(p.energyTimestamp)
averagePower := float64(energyDiff) / float64(timeDiff.Microseconds())
y, err := lp.NewMessage(
"rapl_average_power",
p.tags,
m.meta,
map[string]interface{}{"value": averagePower},
energyTimestamp)
if err == nil {
output <- y
}
// Save current energy counter state
p.energy = energy
p.energyTimestamp = energyTimestamp
}
}
}
}
// Close closes running average power limit (RAPL) metric collector
func (m *RAPLCollector) Close() {
// Unset flag
m.init = false
}

View File

@@ -1,18 +0,0 @@
# Running average power limit (RAPL) metric collector
This collector reads running average power limit (RAPL) monitoring attributes to compute average power consumption metrics. See <https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes>.
The Likwid metric collector provides similar functionality.
## Configuration
```json
"rapl": {
"exclude_device_by_id": ["0:1", "0:2"],
"exclude_device_by_name": ["psys"]
}
```
## Metrics
* `rapl_average_power`: average power consumption in Watt. The average is computed over the entire runtime from the last measurement to the current measurement

View File

@@ -1,319 +0,0 @@
package collectors
import (
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
)
type RocmSmiCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeDevices []string `json:"exclude_devices,omitempty"`
AddPciInfoTag bool `json:"add_pci_info_tag,omitempty"`
UsePciInfoAsTypeId bool `json:"use_pci_info_as_type_id,omitempty"`
AddSerialMeta bool `json:"add_serial_meta,omitempty"`
}
type RocmSmiCollectorDevice struct {
device rocm_smi.DeviceHandle
index int
tags map[string]string // default tags
meta map[string]string // default meta information
excludeMetrics map[string]bool // copy of exclude metrics from config
}
type RocmSmiCollector struct {
metricCollector
config RocmSmiCollectorConfig // the configuration structure
devices []RocmSmiCollectorDevice
}
// Functions to implement MetricCollector interface
// Init(...), Read(...), Close()
// See: metricCollector.go
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *RocmSmiCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "RocmSmiCollector"
// This is for later use, also call it early
m.setup()
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
//m.meta = map[string]string{"source": m.name, "group": "AMD"}
// Define tags sent with each metric
// The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag)
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
//m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
ret := rocm_smi.Init()
if ret != rocm_smi.STATUS_SUCCESS {
err = errors.New("failed to initialize ROCm SMI library")
cclog.ComponentError(m.name, err.Error())
return err
}
numDevs, ret := rocm_smi.NumMonitorDevices()
if ret != rocm_smi.STATUS_SUCCESS {
err = errors.New("failed to get number of GPUs from ROCm SMI library")
cclog.ComponentError(m.name, err.Error())
return err
}
exclDev := func(s string) bool {
skip_device := false
for _, excl := range m.config.ExcludeDevices {
if excl == s {
skip_device = true
break
}
}
return skip_device
}
m.devices = make([]RocmSmiCollectorDevice, 0)
for i := 0; i < numDevs; i++ {
str_i := fmt.Sprintf("%d", i)
if exclDev(str_i) {
continue
}
device, ret := rocm_smi.DeviceGetHandleByIndex(i)
if ret != rocm_smi.STATUS_SUCCESS {
err = fmt.Errorf("failed to get handle for GPU %d", i)
cclog.ComponentError(m.name, err.Error())
return err
}
pciInfo, ret := rocm_smi.DeviceGetPciInfo(device)
if ret != rocm_smi.STATUS_SUCCESS {
err = fmt.Errorf("failed to get PCI information for GPU %d", i)
cclog.ComponentError(m.name, err.Error())
return err
}
pciId := fmt.Sprintf(
"%08X:%02X:%02X.%X",
pciInfo.Domain,
pciInfo.Bus,
pciInfo.Device,
pciInfo.Function)
if exclDev(pciId) {
continue
}
dev := RocmSmiCollectorDevice{
device: device,
tags: map[string]string{
"type": "accelerator",
"type-id": str_i,
},
meta: map[string]string{
"source": m.name,
"group": "AMD",
},
}
if m.config.UsePciInfoAsTypeId {
dev.tags["type-id"] = pciId
} else if m.config.AddPciInfoTag {
dev.tags["pci_identifier"] = pciId
}
if m.config.AddSerialMeta {
serial, ret := rocm_smi.DeviceGetSerialNumber(device)
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Unable to get serial number for device at index", i, ":", rocm_smi.StatusStringNoError(ret))
} else {
dev.meta["serial"] = serial
}
}
// Add excluded metrics
dev.excludeMetrics = map[string]bool{}
for _, e := range m.config.ExcludeMetrics {
dev.excludeMetrics[e] = true
}
dev.index = i
m.devices = append(m.devices, dev)
}
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
// Create a sample metric
timestamp := time.Now()
for _, dev := range m.devices {
metrics, ret := rocm_smi.DeviceGetMetrics(dev.device)
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Unable to get metrics for device at index", dev.index, ":", rocm_smi.StatusStringNoError(ret))
continue
}
if !dev.excludeMetrics["rocm_gfx_util"] {
value := metrics.Average_gfx_activity
y, err := lp.NewMessage("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_umc_util"] {
value := metrics.Average_umc_activity
y, err := lp.NewMessage("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_mm_util"] {
value := metrics.Average_mm_activity
y, err := lp.NewMessage("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_avg_power"] {
value := metrics.Average_socket_power
y, err := lp.NewMessage("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_mem"] {
value := metrics.Temperature_mem
y, err := lp.NewMessage("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_hotspot"] {
value := metrics.Temperature_hotspot
y, err := lp.NewMessage("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_edge"] {
value := metrics.Temperature_edge
y, err := lp.NewMessage("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
value := metrics.Temperature_vrgfx
y, err := lp.NewMessage("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
value := metrics.Temperature_vrsoc
y, err := lp.NewMessage("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrmem"] {
value := metrics.Temperature_vrmem
y, err := lp.NewMessage("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_gfx_clock"] {
value := metrics.Average_gfxclk_frequency
y, err := lp.NewMessage("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_soc_clock"] {
value := metrics.Average_socclk_frequency
y, err := lp.NewMessage("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_u_clock"] {
value := metrics.Average_uclk_frequency
y, err := lp.NewMessage("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v0_clock"] {
value := metrics.Average_vclk0_frequency
y, err := lp.NewMessage("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v1_clock"] {
value := metrics.Average_vclk1_frequency
y, err := lp.NewMessage("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d0_clock"] {
value := metrics.Average_dclk0_frequency
y, err := lp.NewMessage("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d1_clock"] {
value := metrics.Average_dclk1_frequency
y, err := lp.NewMessage("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_hbm"] {
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
value := metrics.Temperature_hbm[i]
y, err := lp.NewMessage("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddTag("stype", "device")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
output <- y
}
}
}
}
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *RocmSmiCollector) Close() {
// Unset flag
ret := rocm_smi.Shutdown()
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Failed to shutdown ROCm SMI library")
}
m.init = false
}

View File

@@ -1,47 +0,0 @@
## `rocm_smi` collector
```json
"rocm_smi": {
"exclude_devices": [
"0","1", "0000000:ff:01.0"
],
"exclude_metrics": [
"rocm_mm_util",
"rocm_temp_vrsoc"
],
"use_pci_info_as_type_id": true,
"add_pci_info_tag": false,
"add_serial_meta": false,
}
```
The `rocm_smi` collector can be configured to leave out specific devices with the `exclude_devices` option. It takes logical IDs in the list of available devices or the PCI address similar to NVML format (`%08X:%02X:%02X.0`). Metrics (listed below) that should not be sent to the MetricRouter can be excluded with the `exclude_metrics` option.
The metrics sent by the `rocm_smi` collector use `accelerator` as `type` tag. For the `type-id`, it uses the device handle index by default. With the `use_pci_info_as_type_id` option, the PCI ID is used instead. If both values should be added as tags, activate the `add_pci_info_tag` option. It uses the device handle index as `type-id` and adds the PCI ID as separate `pci_identifier` tag.
Optionally, it is possible to add the serial to the meta informations. They are not sent to the sinks (if not configured otherwise).
Metrics:
* `rocm_gfx_util`
* `rocm_umc_util`
* `rocm_mm_util`
* `rocm_avg_power`
* `rocm_temp_mem`
* `rocm_temp_hotspot`
* `rocm_temp_edge`
* `rocm_temp_vrgfx`
* `rocm_temp_vrsoc`
* `rocm_temp_vrmem`
* `rocm_gfx_clock`
* `rocm_soc_clock`
* `rocm_u_clock`
* `rocm_v0_clock`
* `rocm_v1_clock`
* `rocm_d0_clock`
* `rocm_d1_clock`
* `rocm_temp_hbm`
Some metrics add the additional sub type tag (`stype`) like the `rocm_temp_hbm` metrics set `stype=device,stype-id=<HBM_slice_number>`.

View File

@@ -4,8 +4,9 @@ import (
"encoding/json" "encoding/json"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// These are the fields we read from the JSON configuration // These are the fields we read from the JSON configuration
@@ -17,9 +18,10 @@ type SampleCollectorConfig struct {
// defined by metricCollector (name, init, ...) // defined by metricCollector (name, init, ...)
type SampleCollector struct { type SampleCollector struct {
metricCollector metricCollector
config SampleCollectorConfig // the configuration structure config SampleTimerCollectorConfig // the configuration structure
meta map[string]string // default meta information meta map[string]string // default meta information
tags map[string]string // default tags tags map[string]string // default tags
statsCount int64
} }
// Functions to implement MetricCollector interface // Functions to implement MetricCollector interface
@@ -32,26 +34,17 @@ type SampleCollector struct {
func (m *SampleCollector) Init(config json.RawMessage) error { func (m *SampleCollector) Init(config json.RawMessage) error {
var err error = nil var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions // Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SampleCollector" m.name = "InternalCollector"
// This is for later use, also call it early // This is for later use, also call it early
m.setup() m.setup()
// Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors actually doing measurements
// because they should not measure the execution of the other collectors
m.parallel = true
// Define meta information sent with each metric // Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta()) // (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"} m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
// Define tags sent with each metric // Define tags sent with each metric
// The 'type' tag is always needed, it defines the granularity of the metric // The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system // node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag) // socket -> CPU socket (requires socket ID as 'type-id' tag)
// die -> CPU die (requires CPU die ID as 'type-id' tag) // cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
// memoryDomain -> NUMA domain (requires NUMA domain ID as 'type-id' tag)
// llc -> Last level cache (requires last level cache ID as 'type-id' tag)
// core -> single CPU core that may consist of multiple hardware threads (SMT) (requires core ID as 'type-id' tag)
// hwthtread -> single CPU hardware thread (requires hardware thread ID as 'type-id' tag)
// accelerator -> A accelerator device like GPU or FPGA (requires an accelerator ID as 'type-id' tag)
m.tags = map[string]string{"type": "node"} m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration // Read in the JSON configuration
if len(config) > 0 { if len(config) > 0 {
@@ -67,6 +60,9 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
// for all topological entities (sockets, NUMA domains, ...) // for all topological entities (sockets, NUMA domains, ...)
// Return some useful error message in case of any failures // Return some useful error message in case of any failures
// Initialize counts for statistics
m.statsCount = 0
// Set this flag only if everything is initialized properly, all required files exist, ... // Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true m.init = true
return err return err
@@ -74,7 +70,7 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
// Read collects all metrics belonging to the sample collector // Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager // and sends them through the output channel to the collector manager
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric // Create a sample metric
timestamp := time.Now() timestamp := time.Now()
@@ -85,12 +81,15 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage)
// stop := readState() // stop := readState()
// value = (stop - start) / interval.Seconds() // value = (stop - start) / interval.Seconds()
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp) y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil { if err == nil {
// Send it to output channel // Send it to output channel
output <- y output <- y
// increment count for each sent metric or any other operation
m.statsCount++
} }
// Set stats for the component
stats.ComponentStatInt(m.name, "count", m.statsCount)
} }
// Close metric collector: close network connection, close files, close libraries, ... // Close metric collector: close network connection, close files, close libraries, ...

View File

@@ -5,8 +5,8 @@ import (
"sync" "sync"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
) )
// These are the fields we read from the JSON configuration // These are the fields we read from the JSON configuration
@@ -25,7 +25,7 @@ type SampleTimerCollector struct {
config SampleTimerCollectorConfig // the configuration structure config SampleTimerCollectorConfig // the configuration structure
interval time.Duration // the interval parsed from configuration interval time.Duration // the interval parsed from configuration
ticker *time.Ticker // own timer ticker *time.Ticker // own timer
output chan lp.CCMessage // own internal output channel output chan lp.CCMetric // own internal output channel
} }
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error { func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
@@ -38,7 +38,7 @@ func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
// (Can also be dynamic or this is the basic set with extension through AddMeta()) // (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"} m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
// Define tags sent with each metric // Define tags sent with each metric
// The 'type' tag is always needed, it defines the granularity of the metric // The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system // node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag) // socket -> CPU socket (requires socket ID as 'type-id' tag)
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag) // cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
@@ -60,7 +60,7 @@ func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
// Storage for output channel // Storage for output channel
m.output = nil m.output = nil
// Management channel for the timer function. // Mangement channel for the timer function.
m.done = make(chan bool) m.done = make(chan bool)
// Create the own ticker // Create the own ticker
m.ticker = time.NewTicker(m.interval) m.ticker = time.NewTicker(m.interval)
@@ -94,20 +94,20 @@ func (m *SampleTimerCollector) ReadMetrics(timestamp time.Time) {
value := 1.0 value := 1.0
// If you want to measure something for a specific amount of time, use interval // If you want to measure something for a specific amout of time, use interval
// start := readState() // start := readState()
// time.Sleep(interval) // time.Sleep(interval)
// stop := readState() // stop := readState()
// value = (stop - start) / interval.Seconds() // value = (stop - start) / interval.Seconds()
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp) y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil && m.output != nil { if err == nil && m.output != nil {
// Send it to output channel if we have a valid channel // Send it to output channel if we have a valid channel
m.output <- y m.output <- y
} }
} }
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Capture output channel // Capture output channel
m.output = output m.output = output
} }

View File

@@ -1,154 +0,0 @@
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"math"
"os"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
)
const SCHEDSTATFILE = `/proc/schedstat`
// These are the fields we read from the JSON configuration
type SchedstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type SchedstatCollector struct {
metricCollector
config SchedstatCollectorConfig // the configuration structure
lastTimestamp time.Time // Store time stamp of last tick to derive values
meta map[string]string // default meta information
cputags map[string]map[string]string // default tags
olddata map[string]map[string]int64 // default tags
}
// Functions to implement MetricCollector interface
// Init(...), Read(...), Close()
// See: metricCollector.go
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *SchedstatCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SchedstatCollector"
// This is for later use, also call it early
m.setup()
// Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors acutally doing measurements
// because they should not measure the execution of the other collectors
m.parallel = true
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SCHEDSTAT"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Check input file
file, err := os.Open(string(SCHEDSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
// Pre-generate tags for all CPUs
num_cpus := 0
m.cputags = make(map[string]map[string]string)
m.olddata = make(map[string]map[string]int64)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
cpustr := strings.TrimLeft(linefields[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
running, _ := strconv.ParseInt(linefields[7], 10, 64)
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
m.cputags[linefields[0]] = map[string]string{"type": "hwthread", "type-id": fmt.Sprintf("%d", cpu)}
m.olddata[linefields[0]] = map[string]int64{"running": running, "waiting": waiting}
num_cpus++
}
}
// Save current timestamp
m.lastTimestamp = time.Now()
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
running, _ := strconv.ParseInt(linefields[7], 10, 64)
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
diff_running := running - m.olddata[linefields[0]]["running"]
diff_waiting := waiting - m.olddata[linefields[0]]["waiting"]
var l_running float64 = float64(diff_running) / tsdelta.Seconds() / (math.Pow(1000, 3))
var l_waiting float64 = float64(diff_waiting) / tsdelta.Seconds() / (math.Pow(1000, 3))
m.olddata[linefields[0]]["running"] = running
m.olddata[linefields[0]]["waiting"] = waiting
value := l_running + l_waiting
y, err := lp.NewMessage("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
// Send it to output channel
output <- y
}
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
if !m.init {
return
}
//timestamps
now := time.Now()
tsdelta := now.Sub(m.lastTimestamp)
file, err := os.Open(string(SCHEDSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") {
m.ParseProcLine(linefields, m.cputags[linefields[0]], output, now, tsdelta)
}
}
m.lastTimestamp = now
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *SchedstatCollector) Close() {
// Unset flag
m.init = false
}

View File

@@ -1,11 +0,0 @@
## `schedstat` collector
```json
"schedstat": {
}
```
The `schedstat` collector reads data from /proc/schedstat and calculates a load value, separated by hwthread. This might be useful to detect bad cpu pinning on shared nodes etc.
Metric:
* `cpu_load_core`

View File

@@ -1,144 +0,0 @@
package collectors
import (
"encoding/json"
"runtime"
"syscall"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
)
type SelfCollectorConfig struct {
MemStats bool `json:"read_mem_stats"`
GoRoutines bool `json:"read_goroutines"`
CgoCalls bool `json:"read_cgo_calls"`
Rusage bool `json:"read_rusage"`
}
type SelfCollector struct {
metricCollector
config SelfCollectorConfig // the configuration structure
meta map[string]string // default meta information
tags map[string]string // default tags
}
func (m *SelfCollector) Init(config json.RawMessage) error {
var err error = nil
m.name = "SelfCollector"
m.setup()
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "Self"}
m.tags = map[string]string{"type": "node"}
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
m.init = true
return err
}
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
timestamp := time.Now()
if m.config.MemStats {
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
y, err := lp.NewMessage("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
if err == nil {
output <- y
}
}
if m.config.GoRoutines {
y, err := lp.NewMessage("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
if err == nil {
output <- y
}
}
if m.config.CgoCalls {
y, err := lp.NewMessage("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
if err == nil {
output <- y
}
}
if m.config.Rusage {
var rusage syscall.Rusage
err := syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
if err == nil {
sec, nsec := rusage.Utime.Unix()
t := float64(sec) + (float64(nsec) * 1e-9)
y, err := lp.NewMessage("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
if err == nil {
y.AddMeta("unit", "seconds")
output <- y
}
sec, nsec = rusage.Stime.Unix()
t = float64(sec) + (float64(nsec) * 1e-9)
y, err = lp.NewMessage("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
if err == nil {
y.AddMeta("unit", "seconds")
output <- y
}
y, err = lp.NewMessage("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
if err == nil {
output <- y
}
}
}
}
func (m *SelfCollector) Close() {
m.init = false
}

View File

@@ -1,34 +0,0 @@
## `self` collector
```json
"self": {
"read_mem_stats" : true,
"read_goroutines" : true,
"read_cgo_calls" : true,
"read_rusage" : true
}
```
The `self` collector reads the data from the `runtime` and `syscall` packages, so monitors the execution of the cc-metric-collector itself.
Metrics:
* If `read_mem_stats == true`:
* `total_alloc`: The metric reports cumulative bytes allocated for heap objects.
* `heap_alloc`: The metric reports bytes of allocated heap objects.
* `heap_sys`: The metric reports bytes of heap memory obtained from the OS.
* `heap_idle`: The metric reports bytes in idle (unused) spans.
* `heap_inuse`: The metric reports bytes in in-use spans.
* `heap_released`: The metric reports bytes of physical memory returned to the OS.
* `heap_objects`: The metric reports the number of allocated heap objects.
* If `read_goroutines == true`:
* `num_goroutines`: The metric reports the number of goroutines that currently exist.
* If `read_cgo_calls == true`:
* `num_cgo_calls`: The metric reports the number of cgo calls made by the current process.
* If `read_rusage == true`:
* `rusage_user_time`: The metric reports the amount of time that this process has been scheduled in user mode.
* `rusage_system_time`: The metric reports the amount of time that this process has been scheduled in kernel mode.
* `rusage_vol_ctx_switch`: The metric reports the amount of voluntary context switches.
* `rusage_invol_ctx_switch`: The metric reports the amount of involuntary context switches.
* `rusage_signals`: The metric reports the number of signals received.
* `rusage_major_pgfaults`: The metric reports the number of major faults the process has made which have required loading a memory page from disk.
* `rusage_minor_pgfaults`: The metric reports the number of minor faults the process has made which have not required loading a memory page from disk.

View File

@@ -3,14 +3,15 @@ package collectors
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "io/ioutil"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html // See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
@@ -40,7 +41,8 @@ type TempCollector struct {
ReportMaxTemp bool `json:"report_max_temperature"` ReportMaxTemp bool `json:"report_max_temperature"`
ReportCriticalTemp bool `json:"report_critical_temperature"` ReportCriticalTemp bool `json:"report_critical_temperature"`
} }
sensors []*TempCollectorSensor sensors []*TempCollectorSensor
statsProcessedMetrics int64
} }
func (m *TempCollector) Init(config json.RawMessage) error { func (m *TempCollector) Init(config json.RawMessage) error {
@@ -50,7 +52,6 @@ func (m *TempCollector) Init(config json.RawMessage) error {
} }
m.name = "TempCollector" m.name = "TempCollector"
m.parallel = true
m.setup() m.setup()
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &m.config) err := json.Unmarshal(config, &m.config)
@@ -83,14 +84,14 @@ func (m *TempCollector) Init(config json.RawMessage) error {
// sensor name // sensor name
nameFile := filepath.Join(filepath.Dir(file), "name") nameFile := filepath.Join(filepath.Dir(file), "name")
name, err := os.ReadFile(nameFile) name, err := ioutil.ReadFile(nameFile)
if err == nil { if err == nil {
sensor.name = strings.TrimSpace(string(name)) sensor.name = strings.TrimSpace(string(name))
} }
// sensor label // sensor label
labelFile := strings.TrimSuffix(file, "_input") + "_label" labelFile := strings.TrimSuffix(file, "_input") + "_label"
label, err := os.ReadFile(labelFile) label, err := ioutil.ReadFile(labelFile)
if err == nil { if err == nil {
sensor.label = strings.TrimSpace(string(label)) sensor.label = strings.TrimSpace(string(label))
} }
@@ -117,10 +118,6 @@ func (m *TempCollector) Init(config json.RawMessage) error {
} }
// Sensor file // Sensor file
_, err = os.ReadFile(file)
if err != nil {
continue
}
sensor.file = file sensor.file = file
// Sensor tags // Sensor tags
@@ -139,7 +136,7 @@ func (m *TempCollector) Init(config json.RawMessage) error {
// max temperature // max temperature
if m.config.ReportMaxTemp { if m.config.ReportMaxTemp {
maxTempFile := strings.TrimSuffix(file, "_input") + "_max" maxTempFile := strings.TrimSuffix(file, "_input") + "_max"
if buffer, err := os.ReadFile(maxTempFile); err == nil { if buffer, err := ioutil.ReadFile(maxTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil { if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.maxTempName = strings.Replace(sensor.metricName, "temp", "max_temp", 1) sensor.maxTempName = strings.Replace(sensor.metricName, "temp", "max_temp", 1)
sensor.maxTemp = x / 1000 sensor.maxTemp = x / 1000
@@ -150,7 +147,7 @@ func (m *TempCollector) Init(config json.RawMessage) error {
// critical temperature // critical temperature
if m.config.ReportCriticalTemp { if m.config.ReportCriticalTemp {
criticalTempFile := strings.TrimSuffix(file, "_input") + "_crit" criticalTempFile := strings.TrimSuffix(file, "_input") + "_crit"
if buffer, err := os.ReadFile(criticalTempFile); err == nil { if buffer, err := ioutil.ReadFile(criticalTempFile); err == nil {
if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil { if x, err := strconv.ParseInt(strings.TrimSpace(string(buffer)), 10, 64); err == nil {
sensor.critTempName = strings.Replace(sensor.metricName, "temp", "crit_temp", 1) sensor.critTempName = strings.Replace(sensor.metricName, "temp", "crit_temp", 1)
sensor.critTemp = x / 1000 sensor.critTemp = x / 1000
@@ -167,15 +164,16 @@ func (m *TempCollector) Init(config json.RawMessage) error {
} }
// Finished initialization // Finished initialization
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
for _, sensor := range m.sensors { for _, sensor := range m.sensors {
// Read sensor file // Read sensor file
buffer, err := os.ReadFile(sensor.file) buffer, err := ioutil.ReadFile(sensor.file)
if err != nil { if err != nil {
cclog.ComponentError( cclog.ComponentError(
m.name, m.name,
@@ -190,7 +188,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
continue continue
} }
x /= 1000 x /= 1000
y, err := lp.NewMessage( y, err := lp.New(
sensor.metricName, sensor.metricName,
sensor.tags, sensor.tags,
m.meta, m.meta,
@@ -199,11 +197,12 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
) )
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
// max temperature // max temperature
if m.config.ReportMaxTemp && sensor.maxTemp != 0 { if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
y, err := lp.NewMessage( y, err := lp.New(
sensor.maxTempName, sensor.maxTempName,
sensor.tags, sensor.tags,
m.meta, m.meta,
@@ -212,12 +211,13 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
) )
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
// critical temperature // critical temperature
if m.config.ReportCriticalTemp && sensor.critTemp != 0 { if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
y, err := lp.NewMessage( y, err := lp.New(
sensor.critTempName, sensor.critTempName,
sensor.tags, sensor.tags,
m.meta, m.meta,
@@ -226,10 +226,11 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
) )
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *TempCollector) Close() { func (m *TempCollector) Close() {

View File

@@ -9,7 +9,8 @@ import (
"strings" "strings"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
stats "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
) )
const MAX_NUM_PROCS = 10 const MAX_NUM_PROCS = 10
@@ -21,14 +22,14 @@ type TopProcsCollectorConfig struct {
type TopProcsCollector struct { type TopProcsCollector struct {
metricCollector metricCollector
tags map[string]string tags map[string]string
config TopProcsCollectorConfig config TopProcsCollectorConfig
statsProcessedMetrics int64
} }
func (m *TopProcsCollector) Init(config json.RawMessage) error { func (m *TopProcsCollector) Init(config json.RawMessage) error {
var err error var err error
m.name = "TopProcsCollector" m.name = "TopProcsCollector"
m.parallel = true
m.tags = map[string]string{"type": "node"} m.tags = map[string]string{"type": "node"}
m.meta = map[string]string{"source": m.name, "group": "TopProcs"} m.meta = map[string]string{"source": m.name, "group": "TopProcs"}
if len(config) > 0 { if len(config) > 0 {
@@ -49,11 +50,12 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
if err != nil { if err != nil {
return errors.New("failed to execute command") return errors.New("failed to execute command")
} }
m.statsProcessedMetrics = 0
m.init = true m.init = true
return nil return nil
} }
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessage) { func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init { if !m.init {
return return
} }
@@ -68,11 +70,13 @@ func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessag
lines := strings.Split(string(stdout), "\n") lines := strings.Split(string(stdout), "\n")
for i := 1; i < m.config.Num_procs+1; i++ { for i := 1; i < m.config.Num_procs+1; i++ {
name := fmt.Sprintf("topproc%d", i) name := fmt.Sprintf("topproc%d", i)
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now()) y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
if err == nil { if err == nil {
output <- y output <- y
m.statsProcessedMetrics++
} }
} }
stats.ComponentStatInt(m.name, "processed_metrics", m.statsProcessedMetrics)
} }
func (m *TopProcsCollector) Close() { func (m *TopProcsCollector) Close() {

View File

@@ -1,8 +1,8 @@
{ {
"sinks": "./sinks.json", "sinks": "sinks.json",
"collectors" : "./collectors.json", "collectors" : "collectors.json",
"receivers" : "./receivers.json", "receivers" : "receivers.json",
"router" : "./router.json", "router" : "router.json",
"interval": "10s", "interval": 10,
"duration": "1s" "duration": 1
} }

View File

@@ -1,74 +0,0 @@
# Building the cc-metric-collector
In most cases, a simple `make` in the main folder is enough to get a `cc-metric-collector` binary. It is basically a `go build` but some collectors require additional tasks. There is currently no Golang interface to LIKWID, so it uses `cgo` to create bindings but `cgo` requires the LIKWID header files. Therefore, it checks whether LIKWID is installed and if not it downloads LIKWID and copies the headers.
## System integration
The main configuration settings for system integration are pre-defined in `scripts/cc-metric-collector.config`. The file contains the UNIX user and group used for execution, the PID file location and other settings. Adjust it accordingly and copy it to `/etc/default/cc-metric-collector`
```bash
$ install --mode 644 \
--owner $CC_USER \
--group $CC_GROUP \
scripts/cc-metric-collector.config /etc/default/cc-metric-collector
$ edit /etc/default/cc-metric-collector
```
### SysVinit and similar
If you are using a init system based in `/etc/init.d` daemons, you can use the sample `scripts/cc-metric-collector.init`. It reads the basic configuration from `/etc/default/cc-metric-collector`
```bash
$ install --mode 755 \
--owner $CC_USER \
--group $CC_GROUP \
scripts/cc-metric-collector.init /etc/init.d/cc-metric-collector
```
### Systemd
If you are using `systemd` as init system, you can use the sample systemd service file `scripts/cc-metric-collector.service`, the configuration file `scripts/cc-metric-collector.config`.
```bash
$ install --mode 644 \
--owner $CC_USER \
--group $CC_GROUP \
scripts/cc-metric-collector.service /etc/systemd/system/cc-metric-collector.service
$ systemctl enable cc-metric-collector
```
## Packaging
### RPM
In order to get a RPM packages for cc-metric-collector, just use:
```bash
$ make RPM
```
It uses the RPM SPEC file `scripts/cc-metric-collector.spec` and requires the RPM tools (`rpm` and `rpmspec`) and `git`.
### DEB
In order to get very simple Debian packages for cc-metric-collector, just use:
```bash
$ make DEB
```
It uses the DEB control file `scripts/cc-metric-collector.control` and requires `dpkg-deb`, `awk`, `sed` and `git`. It creates only a binary deb package.
_This option is not well tested and therefore experimental_
### Customizing RPMs or DEB packages
If you want to customize the RPMs or DEB packages for your local system, use the following workflow.
- (if there is already a fork in the private account, delete it and wait until Github realizes the deletion)
- Fork the cc-metric-collector repository (if Github hasn't realized it, it creates a fork named cc-metric-collector2)
- Go to private cc-metric-collector repository and enable Github Actions
- Do changes to the scripts, code, ... Commit and push your changes.
- Tag the new commit with `v0.x.y-<myversion>` (`git tag v0.x.y-<myversion>`)
- Push tags to repository (`git push --tags`)
- Wait until the Release action finishes. It creates fresh RPMs and DEBs in your private repository on the Releases page.

View File

@@ -12,8 +12,8 @@ The global file contains the paths to the other four files and some global optio
"collectors" : "collectors.json", "collectors" : "collectors.json",
"receivers" : "receivers.json", "receivers" : "receivers.json",
"router" : "router.json", "router" : "router.json",
"interval": "10s", "interval": 10,
"duration": "1s" "duration": 1
} }
``` ```

View File

@@ -1,23 +0,0 @@
# The ClusterCockpit Project
The ClusterCockpit project is a joined project of computing centers in Europe to set up a cluster monitoring stack for small to mid-sized computing centers under the lead of NHR@FAU.
# The ClusterCockpit Stack
In cluster environment, there are commonly a lot of systems dedicated for computation, backend servers for file systems and frontend servers for the user interaction and cluster control. The ClusterCockpit Stack is mainly used for monitoring the compute systems with some interaction to the frontend servers. It consists of multiple components:
- cc-metric-collector: Monitor resource usage on the compute systems
- cc-metric-store: In-memory database
- cc-backend & cc-frontend: The web-based visualizer
# CC Metric Collector
The CC Metric Collector project was started to provide a useful set of metrics for HPC and data science related compute systems. It runs as a system daemon and gathers system data periodically to forward the metrics to one or more databases. One of the provided backends can be used for the cc-metric-store but many others exist like InfluxDB time-series databases, the Ganglia Monitoring System or the Prometheus Monitoring System.
The data is gathered by so-called "Collectors", forwarded to an internal router for on-the-fly manipulation (tagging, aggregation, ...) which pushes the metrics to the different metric writers called "Sinks". There is a forth component, the "Receivers", which receive data through some networking system like a HTTP server at any time.
# CC Metric Store
The CC Metric Store is a data management system with short-term in-memory and long-term file-base metric storage.
# CC Backend and CC Frontend
The CC Backend and Frontend form together the web interface for ClusterCockpit.

46
go.mod
View File

@@ -1,45 +1,19 @@
module github.com/ClusterCockpit/cc-metric-collector module github.com/ClusterCockpit/cc-metric-collector
go 1.21.1 go 1.16
toolchain go1.22.1
require ( require (
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900 github.com/NVIDIA/go-nvml v0.11.1-0
github.com/ClusterCockpit/cc-units v0.4.0 github.com/influxdata/influxdb-client-go/v2 v2.7.0
github.com/ClusterCockpit/go-rocm-smi v0.3.0
github.com/NVIDIA/go-nvml v0.12.0-2
github.com/PaesslerAG/gval v1.2.2
github.com/expr-lang/expr v1.16.9
github.com/fsnotify/fsnotify v1.7.0
github.com/gorilla/mux v1.8.1
github.com/influxdata/influxdb-client-go/v2 v2.13.0
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf
github.com/influxdata/line-protocol/v2 v2.2.1 github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc
github.com/nats-io/nats.go v1.36.0 golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
github.com/prometheus/client_golang v1.19.0 gopkg.in/Knetic/govaluate.v2 v2.3.0
github.com/stmcginnis/gofish v0.15.0
github.com/tklauser/go-sysconf v0.3.13
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/sys v0.18.0
) )
require ( require (
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/PaesslerAG/gval v1.1.2
github.com/beorn7/perks v1.0.1 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/nats-io/nats-server/v2 v2.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect google.golang.org/protobuf v1.27.1 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/oapi-codegen/runtime v1.1.1 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.49.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.22.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
) )

231
go.sum
View File

@@ -1,120 +1,145 @@
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900 h1:6+WNav16uWTEDC09hkZKEHfBhtc91p/ZcjgCtyntuIg= github.com/NVIDIA/go-nvml v0.11.1-0 h1:XHSz3zZKC4NCP2ja1rI7++DXFhA+uDhdYa3MykCTGHY=
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900/go.mod h1:EbYeC5t+Y0kW1Q1pP2n9zMqbeYEJITG8YGvAUihXVn4= github.com/NVIDIA/go-nvml v0.11.1-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0= github.com/PaesslerAG/gval v1.1.2 h1:EROKxV4/fAKWb0Qoj7NOxmHZA7gcpjOV9XgiRZMRCUU=
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw= github.com/PaesslerAG/gval v1.1.2/go.mod h1:Fa8gfkCmUsELXgayr8sfL/sw+VzCVoa03dcOcR/if2w=
github.com/ClusterCockpit/go-rocm-smi v0.3.0 h1:1qZnSpG7/NyLtc7AjqnUL9Jb8xtqG1nMVgp69rJfaR8=
github.com/ClusterCockpit/go-rocm-smi v0.3.0/go.mod h1:+I3UMeX3OlizXDf1WpGD43W4KGZZGVSGmny6rTeOnWA=
github.com/NVIDIA/go-nvml v0.11.6-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
github.com/NVIDIA/go-nvml v0.12.0-2 h1:Sg239yy7jmopu/cuvYauoMj9fOpcGMngxVxxS1EBXeY=
github.com/NVIDIA/go-nvml v0.12.0-2/go.mod h1:7ruy85eOM73muOc/I37euONSwEyFqZsv5ED9AogD4G0=
github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E=
github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/influxdata/influxdb-client-go/v2 v2.7.0 h1:QgP5mlBE9sGnzplpnf96pr+p7uqlIlL4W2GAP3n+XZg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/influxdata/influxdb-client-go/v2 v2.7.0/go.mod h1:Y/0W1+TZir7ypoQZYd2IrnVOKB3Tq6oegAQeSVN/+EU=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s=
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY=
github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE=
github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM=
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY=
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/nats-server/v2 v2.7.0 h1:UpqcAM93FI7AHlCyI2FD5QcV3QuHNCauQF2LBVU0238=
github.com/nats-io/nats-server/v2 v2.7.0/go.mod h1:cjxtMhZsZovK1XS2iiapCduR8HuqB/YpFamL0qntIcw=
github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc h1:SHr4MUUZJ/fAC0uSm2OzWOJYsHpapmR86mpw7q1qPXU=
github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI=
github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stmcginnis/gofish v0.15.0 h1:8TG41+lvJk/0Nf8CIIYErxbMlQUy80W0JFRZP3Ld82A=
github.com/stmcginnis/gofish v0.15.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1 h1:P7S/GeHBAFEZIYp0ePPs2kHXoazz8q2KsyxHyQVGCJg= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1/go.mod h1:9CWpnTUmlQkfdpdutA1nNf4iE5lAVt3QZOu0Z6hahBE= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sys v0.0.0-20210122093101-04d7465088b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/Knetic/govaluate.v2 v2.3.0 h1:naJVc9CZlWA8rC8f5mvECJD7jreTrn7FvGXjBthkHJQ=
gopkg.in/Knetic/govaluate.v2 v2.3.0/go.mod h1:NW0gr10J8s7aNghEg6uhdxiEaBvc0+8VgJjVViHUKp4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -0,0 +1,32 @@
# ClusterCockpit metrics
As described in the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications), the whole ClusterCockpit stack uses metrics in the InfluxDB line protocol format. This is also the input and output format for the ClusterCockpit Metric Collector but internally it uses an extended format while processing, named CCMetric.
It is basically a copy of the [InfluxDB line protocol](https://github.com/influxdata/line-protocol) `MutableMetric` interface with one extension. Besides the tags and fields, it contains a list of meta information (re-using the `Tag` structure of the original protocol):
```golang
type ccMetric struct {
name string // same as
tags []*influx.Tag // original
fields []*influx.Field // Influx
tm time.Time // line-protocol
meta []*influx.Tag
}
type CCMetric interface {
influx.MutableMetric // the same functions as defined by influx.MutableMetric
RemoveTag(key string) // this is not published by the original influx.MutableMetric
Meta() map[string]string
MetaList() []*inlux.Tag
AddMeta(key, value string)
HasMeta(key string) bool
GetMeta(key string) (string, bool)
RemoveMeta(key string)
}
```
The `CCMetric` interface provides the same functions as the `MutableMetric` like `{Add, Remove, Has}{Tag, Field}` and additionally provides `{Add, Remove, Has}Meta`.
The InfluxDB protocol creates a new metric with `influx.New(name, tags, fields, time)` while CCMetric uses `ccMetric.New(name, tags, meta, fields, time)` where `tags` and `meta` are both of type `map[string]string`.
You can copy a CCMetric with `FromMetric(other CCMetric) CCMetric`. If you get an `influx.Metric` from a function, like the line protocol parser, you can use `FromInfluxMetric(other influx.Metric) CCMetric` to get a CCMetric out of it (see `NatsReceiver` for an example).

View File

@@ -7,7 +7,6 @@ import (
influxdb2 "github.com/influxdata/influxdb-client-go/v2" influxdb2 "github.com/influxdata/influxdb-client-go/v2"
write "github.com/influxdata/influxdb-client-go/v2/api/write" write "github.com/influxdata/influxdb-client-go/v2/api/write"
lp "github.com/influxdata/line-protocol" // MIT license lp "github.com/influxdata/line-protocol" // MIT license
"golang.org/x/exp/maps"
) )
// Most functions are derived from github.com/influxdata/line-protocol/metric.go // Most functions are derived from github.com/influxdata/line-protocol/metric.go
@@ -51,7 +50,6 @@ type CCMetric interface {
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
HasField(key string) (ok bool) // Check if a field key is present HasField(key string) (ok bool) // Check if a field key is present
RemoveField(key string) // Remove a field addressed by its key RemoveField(key string) // Remove a field addressed by its key
String() string // Return line-protocol like string
} }
// String implements the stringer interface for data type ccMetric // String implements the stringer interface for data type ccMetric
@@ -65,11 +63,9 @@ func (m *ccMetric) String() string {
// ToLineProtocol generates influxDB line protocol for data type ccMetric // ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) { func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) {
p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm) p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)
for key, use_as_tag := range metaAsTags { for key, ok1 := range metaAsTags {
if use_as_tag { if val, ok2 := m.GetMeta(key); ok1 && ok2 {
if value, ok := m.GetMeta(key); ok { p.AddTag(key, val)
p.AddTag(key, value)
}
} }
} }
return p return p
@@ -196,13 +192,19 @@ func New(
) (CCMetric, error) { ) (CCMetric, error) {
m := &ccMetric{ m := &ccMetric{
name: name, name: name,
tags: maps.Clone(tags), tags: make(map[string]string, len(tags)),
meta: maps.Clone(meta), meta: make(map[string]string, len(meta)),
fields: make(map[string]interface{}, len(fields)), fields: make(map[string]interface{}, len(fields)),
tm: tm, tm: tm,
} }
// deep copy fields // deep copy tags, meta data tags and fields
for k, v := range tags {
m.tags[k] = v
}
for k, v := range meta {
m.meta[k] = v
}
for k, v := range fields { for k, v := range fields {
v := convertField(v) v := convertField(v)
if v == nil { if v == nil {
@@ -215,15 +217,26 @@ func New(
} }
// FromMetric copies the metric <other> // FromMetric copies the metric <other>
func FromMetric(other CCMetric) CCMetric { func FromMetric(other ccMetric) CCMetric {
m := &ccMetric{
return &ccMetric{
name: other.Name(), name: other.Name(),
tags: maps.Clone(other.Tags()), tags: make(map[string]string, len(other.tags)),
meta: maps.Clone(other.Meta()), meta: make(map[string]string, len(other.meta)),
fields: maps.Clone(other.Fields()), fields: make(map[string]interface{}, len(other.fields)),
tm: other.Time(), tm: other.Time(),
} }
// deep copy tags, meta data tags and fields
for key, value := range other.tags {
m.tags[key] = value
}
for key, value := range other.meta {
m.meta[key] = value
}
for key, value := range other.fields {
m.fields[key] = value
}
return m
} }
// FromInfluxMetric copies the influxDB line protocol metric <other> // FromInfluxMetric copies the influxDB line protocol metric <other>
@@ -247,10 +260,8 @@ func FromInfluxMetric(other lp.Metric) CCMetric {
} }
// convertField converts data types of fields by the following schemata: // convertField converts data types of fields by the following schemata:
// // *float32, *float64, float32, float64 -> float64
// *float32, *float64, float32, float64 -> float64 // *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
//
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64 // *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
// *[]byte, *string, []byte, string -> string // *[]byte, *string, []byte, string -> string
// *bool, bool -> bool // *bool, bool -> bool

View File

@@ -0,0 +1,427 @@
package ccTopology
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
cclogger "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
)
const SYSFS_NUMABASE = `/sys/devices/system/node`
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
const PROCFS_CPUINFO = `/proc/cpuinfo`
// intArrayContains scans an array of ints if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
func intArrayContains(array []int, str int) (int, bool) {
for i, a := range array {
if a == str {
return i, true
}
}
return -1, false
}
func fileToInt(path string) int {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
var id int64
//_, err = fmt.Scanf("%d", sbuffer, &id)
id, err = strconv.ParseInt(sbuffer, 10, 32)
if err != nil {
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
return -1
}
return int(id)
}
func SocketList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
var packs []int
for _, line := range ll {
if strings.HasPrefix(line, "physical id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return packs
}
_, found := intArrayContains(packs, int(id))
if !found {
packs = append(packs, int(id))
}
}
}
return packs
}
func CpuList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
cpulist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[2], 10, 32)
if err != nil {
log.Print(err)
return cpulist
}
_, found := intArrayContains(cpulist, int(id))
if !found {
cpulist = append(cpulist, int(id))
}
}
}
return cpulist
}
func CoreList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
corelist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "core id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return corelist
}
_, found := intArrayContains(corelist, int(id))
if !found {
corelist = append(corelist, int(id))
}
}
}
return corelist
}
func NumaNodeList() []int {
numaList := make([]int, 0)
globPath := filepath.Join(string(SYSFS_NUMABASE), "node*")
regexPath := filepath.Join(string(SYSFS_NUMABASE), "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "NumaNodeList", err.Error())
}
for _, f := range files {
if !regex.MatchString(f) {
continue
}
finfo, err := os.Lstat(f)
if err != nil {
continue
}
if !finfo.IsDir() {
continue
}
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
if _, found := intArrayContains(numaList, id); !found {
numaList = append(numaList, id)
}
}
}
}
return numaList
}
func DieList() []int {
cpulist := CpuList()
dielist := make([]int, 0)
for _, c := range cpulist {
diepath := filepath.Join(string(SYSFS_CPUBASE), fmt.Sprintf("cpu%d", c), "topology/die_id")
dieid := fileToInt(diepath)
if dieid > 0 {
_, found := intArrayContains(dielist, int(dieid))
if !found {
dielist = append(dielist, int(dieid))
}
}
}
if len(dielist) > 0 {
return dielist
}
return SocketList()
}
type CpuEntry struct {
Cpuid int
SMT int
Core int
Socket int
Numadomain int
Die int
}
func CpuData() []CpuEntry {
fileToInt := func(path string) int {
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
//cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
var id int64
//_, err = fmt.Scanf("%d", sbuffer, &id)
id, err = strconv.ParseInt(sbuffer, 10, 32)
if err != nil {
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
return -1
}
return int(id)
}
getCore := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/core_id", basepath))
}
getSocket := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/physical_package_id", basepath))
}
getDie := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/die_id", basepath))
}
getSMT := func(cpuid int, basepath string) int {
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/thread_siblings_list", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
}
threadlist := make([]int, 0)
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
for _, x := range strings.Split(sbuffer, ",") {
id, err := strconv.ParseInt(x, 10, 32)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
}
threadlist = append(threadlist, int(id))
}
for i, x := range threadlist {
if x == cpuid {
return i
}
}
return 1
}
getNumaDomain := func(basepath string) int {
globPath := filepath.Join(basepath, "node*")
regexPath := filepath.Join(basepath, "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getNumaDomain", err.Error())
}
for _, f := range files {
finfo, err := os.Lstat(f)
if err == nil && finfo.IsDir() {
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
return id
}
}
}
}
return 0
}
clist := make([]CpuEntry, 0)
for _, c := range CpuList() {
clist = append(clist, CpuEntry{Cpuid: c})
}
for i, centry := range clist {
centry.Socket = -1
centry.Numadomain = -1
centry.Die = -1
centry.Core = -1
// Set base directory for topology lookup
cpustr := fmt.Sprintf("cpu%d", centry.Cpuid)
base := filepath.Join("/sys/devices/system/cpu", cpustr)
topoBase := filepath.Join(base, "topology")
// Lookup CPU core id
centry.Core = getCore(topoBase)
// Lookup CPU socket id
centry.Socket = getSocket(topoBase)
// Lookup CPU die id
centry.Die = getDie(topoBase)
if centry.Die < 0 {
centry.Die = centry.Socket
}
// Lookup SMT thread id
centry.SMT = getSMT(centry.Cpuid, topoBase)
// Lookup NUMA domain id
centry.Numadomain = getNumaDomain(base)
// Update values in output list
clist[i] = centry
}
return clist
}
type CpuInformation struct {
NumHWthreads int
SMTWidth int
NumSockets int
NumDies int
NumCores int
NumNumaDomains int
}
func CpuInfo() CpuInformation {
var c CpuInformation
smtList := make([]int, 0)
numaList := make([]int, 0)
dieList := make([]int, 0)
socketList := make([]int, 0)
coreList := make([]int, 0)
cdata := CpuData()
for _, d := range cdata {
if _, ok := intArrayContains(smtList, d.SMT); !ok {
smtList = append(smtList, d.SMT)
}
if _, ok := intArrayContains(numaList, d.Numadomain); !ok {
numaList = append(numaList, d.Numadomain)
}
if _, ok := intArrayContains(dieList, d.Die); !ok {
dieList = append(dieList, d.Die)
}
if _, ok := intArrayContains(socketList, d.Socket); !ok {
socketList = append(socketList, d.Socket)
}
if _, ok := intArrayContains(coreList, d.Core); !ok {
coreList = append(coreList, d.Core)
}
}
c.NumNumaDomains = len(numaList)
c.SMTWidth = len(smtList)
c.NumDies = len(dieList)
c.NumCores = len(coreList)
c.NumSockets = len(socketList)
c.NumHWthreads = len(cdata)
return c
}
func GetCpuSocket(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Socket
}
}
return -1
}
func GetCpuNumaDomain(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Numadomain
}
}
return -1
}
func GetCpuDie(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Die
}
}
return -1
}
func GetCpuCore(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Core
}
}
return -1
}
func GetSocketCpus(socket int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Socket == socket {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetNumaDomainCpus(domain int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Numadomain == domain {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetDieCpus(die int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Die == die {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}
func GetCoreCpus(core int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Core == core {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpulist
}

View File

@@ -9,10 +9,10 @@ import (
"sync" "sync"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology" topo "github.com/ClusterCockpit/cc-metric-collector/internal/ccTopology"
"github.com/PaesslerAG/gval" "github.com/PaesslerAG/gval"
) )
@@ -31,14 +31,14 @@ type metricAggregator struct {
functions []*MetricAggregatorIntervalConfig functions []*MetricAggregatorIntervalConfig
constants map[string]interface{} constants map[string]interface{}
language gval.Language language gval.Language
output chan lp.CCMessage output chan lp.CCMetric
} }
type MetricAggregator interface { type MetricAggregator interface {
AddAggregation(name, function, condition string, tags, meta map[string]string) error AddAggregation(name, function, condition string, tags, meta map[string]string) error
DeleteAggregation(name string) error DeleteAggregation(name string) error
Init(output chan lp.CCMessage) error Init(output chan lp.CCMetric) error
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric)
} }
var metricCacheLanguage = gval.NewLanguage( var metricCacheLanguage = gval.NewLanguage(
@@ -74,7 +74,7 @@ var evaluables = struct {
mapping: make(map[string]gval.Evaluable), mapping: make(map[string]gval.Evaluable),
} }
func (c *metricAggregator) Init(output chan lp.CCMessage) error { func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.output = output c.output = output
c.functions = make([]*MetricAggregatorIntervalConfig, 0) c.functions = make([]*MetricAggregatorIntervalConfig, 0)
c.constants = make(map[string]interface{}) c.constants = make(map[string]interface{})
@@ -112,7 +112,7 @@ func (c *metricAggregator) Init(output chan lp.CCMessage) error {
return nil return nil
} }
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage) { func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric) {
vars := make(map[string]interface{}) vars := make(map[string]interface{})
for k, v := range c.constants { for k, v := range c.constants {
vars[k] = v vars[k] = v
@@ -121,13 +121,8 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
vars["endtime"] = endtime vars["endtime"] = endtime
for _, f := range c.functions { for _, f := range c.functions {
cclog.ComponentDebug("MetricCache", "COLLECT", f.Name, "COND", f.Condition) cclog.ComponentDebug("MetricCache", "COLLECT", f.Name, "COND", f.Condition)
var valuesFloat64 []float64 values := make([]float64, 0)
var valuesFloat32 []float32 matches := make([]lp.CCMetric, 0)
var valuesInt []int
var valuesInt32 []int32
var valuesInt64 []int64
var valuesBool []bool
matches := make([]lp.CCMessage, 0)
for _, m := range metrics { for _, m := range metrics {
vars["metric"] = m vars["metric"] = m
//value, err := gval.Evaluate(f.Condition, vars, c.language) //value, err := gval.Evaluate(f.Condition, vars, c.language)
@@ -141,17 +136,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
if valid { if valid {
switch x := v.(type) { switch x := v.(type) {
case float64: case float64:
valuesFloat64 = append(valuesFloat64, x) values = append(values, x)
case float32: case float32:
valuesFloat32 = append(valuesFloat32, x)
case int: case int:
valuesInt = append(valuesInt, x)
case int32:
valuesInt32 = append(valuesInt32, x)
case int64: case int64:
valuesInt64 = append(valuesInt64, x) values = append(values, float64(x))
case bool: case bool:
valuesBool = append(valuesBool, x) if x {
values = append(values, float64(1.0))
} else {
values = append(values, float64(0.0))
}
default: default:
cclog.ComponentError("MetricCache", "COLLECT ADD VALUE", v, "FAILED") cclog.ComponentError("MetricCache", "COLLECT ADD VALUE", v, "FAILED")
} }
@@ -160,63 +155,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
} }
} }
delete(vars, "metric") delete(vars, "metric")
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function)
// Check, that only values of one type were collected vars["values"] = values
countValueTypes := 0
if len(valuesFloat64) > 0 {
countValueTypes += 1
}
if len(valuesFloat32) > 0 {
countValueTypes += 1
}
if len(valuesInt) > 0 {
countValueTypes += 1
}
if len(valuesInt32) > 0 {
countValueTypes += 1
}
if len(valuesInt64) > 0 {
countValueTypes += 1
}
if len(valuesBool) > 0 {
countValueTypes += 1
}
if countValueTypes > 1 {
cclog.ComponentError("MetricCache", "Collected values of different types")
}
var len_values int
switch {
case len(valuesFloat64) > 0:
vars["values"] = valuesFloat64
len_values = len(valuesFloat64)
case len(valuesFloat32) > 0:
vars["values"] = valuesFloat32
len_values = len(valuesFloat32)
case len(valuesInt) > 0:
vars["values"] = valuesInt
len_values = len(valuesInt)
case len(valuesInt32) > 0:
vars["values"] = valuesInt32
len_values = len(valuesInt32)
case len(valuesInt64) > 0:
vars["values"] = valuesInt64
len_values = len(valuesInt64)
case len(valuesBool) > 0:
vars["values"] = valuesBool
len_values = len(valuesBool)
}
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function)
vars["metrics"] = matches vars["metrics"] = matches
if len_values > 0 { if len(values) > 0 {
value, err := gval.Evaluate(f.Function, vars, c.language) value, err := gval.Evaluate(f.Function, vars, c.language)
if err != nil { if err != nil {
cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function, ":", err.Error()) cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function, ":", err.Error())
break break
} }
copy_tags := func(tags map[string]string, metrics []lp.CCMessage) map[string]string { copy_tags := func(tags map[string]string, metrics []lp.CCMetric) map[string]string {
out := make(map[string]string) out := make(map[string]string)
for key, value := range tags { for key, value := range tags {
switch value { switch value {
@@ -233,7 +182,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
} }
return out return out
} }
copy_meta := func(meta map[string]string, metrics []lp.CCMessage) map[string]string { copy_meta := func(meta map[string]string, metrics []lp.CCMetric) map[string]string {
out := make(map[string]string) out := make(map[string]string)
for key, value := range meta { for key, value := range meta {
switch value { switch value {
@@ -253,18 +202,18 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
tags := copy_tags(f.Tags, matches) tags := copy_tags(f.Tags, matches)
meta := copy_meta(f.Meta, matches) meta := copy_meta(f.Meta, matches)
var m lp.CCMessage var m lp.CCMetric
switch t := value.(type) { switch t := value.(type) {
case float64: case float64:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime) m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case float32: case float32:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime) m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case int: case int:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime) m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case int64: case int64:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime) m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case string: case string:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime) m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
default: default:
cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name) cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name)
} }
@@ -367,7 +316,7 @@ func EvalBoolCondition(condition string, params map[string]interface{}) (bool, e
return value, err return value, err
} }
func EvalFloat64Condition(condition string, params map[string]float64) (float64, error) { func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
evaluables.mutex.Lock() evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition] evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock() evaluables.mutex.Unlock()
@@ -389,7 +338,7 @@ func EvalFloat64Condition(condition string, params map[string]float64) (float64,
return value, err return value, err
} }
func NewAggregator(output chan lp.CCMessage) (MetricAggregator, error) { func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
a := new(metricAggregator) a := new(metricAggregator)
err := a.Init(output) err := a.Init(output)
if err != nil { if err != nil {

View File

@@ -3,167 +3,162 @@ package metricAggregator
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"regexp" "regexp"
"sort"
"strings" "strings"
"golang.org/x/exp/slices" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
topo "github.com/ClusterCockpit/cc-metric-collector/internal/ccTopology"
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
) )
/* /*
* Arithmetic functions on value arrays * Arithmetic functions on value arrays
*/ */
func sumAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) { // Sum up values
if len(values) == 0 { func sumfunc(args ...interface{}) (interface{}, error) {
return 0.0, errors.New("sum function requires at least one argument") s := 0.0
values, ok := args[0].([]float64)
if ok {
cclog.ComponentDebug("MetricCache", "SUM FUNC START")
for _, x := range values {
s += x
}
cclog.ComponentDebug("MetricCache", "SUM FUNC END", s)
} else {
cclog.ComponentDebug("MetricCache", "SUM FUNC CAST FAILED")
} }
var sum T return s, nil
for _, value := range values {
sum += value
}
return sum, nil
} }
// Sum up values // Get the minimum value
func sumfunc(args interface{}) (interface{}, error) { func minfunc(args ...interface{}) (interface{}, error) {
var err error = nil
var err error switch values := args[0].(type) {
switch values := args.(type) {
case []float64: case []float64:
return sumAnyType(values) var s float64 = math.MaxFloat64
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []float32: case []float32:
return sumAnyType(values) var s float32 = math.MaxFloat32
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int: case []int:
return sumAnyType(values) var s int = int(math.MaxInt32)
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int64: case []int64:
return sumAnyType(values) var s int64 = math.MaxInt64
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int32: case []int32:
return sumAnyType(values) var s int32 = math.MaxInt32
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
default: default:
err = errors.New("function 'sum' only on list of values (float64, float32, int, int32, int64)") err = errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
} }
return 0.0, err return 0.0, err
} }
func minAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("min function requires at least one argument")
}
return slices.Min(values), nil
}
// Get the minimum value
func minfunc(args interface{}) (interface{}, error) {
switch values := args.(type) {
case []float64:
return minAnyType(values)
case []float32:
return minAnyType(values)
case []int:
return minAnyType(values)
case []int64:
return minAnyType(values)
case []int32:
return minAnyType(values)
default:
return 0.0, errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
}
}
func avgAnyType[T float64 | float32 | int | int32 | int64](values []T) (float64, error) {
if len(values) == 0 {
return 0.0, errors.New("average function requires at least one argument")
}
sum, err := sumAnyType[T](values)
return float64(sum) / float64(len(values)), err
}
// Get the average or mean value // Get the average or mean value
func avgfunc(args interface{}) (interface{}, error) { func avgfunc(args ...interface{}) (interface{}, error) {
switch values := args.(type) { switch values := args[0].(type) {
case []float64: case []float64:
return avgAnyType(values) var s float64 = 0
for _, x := range values {
s += x
}
return s / float64(len(values)), nil
case []float32: case []float32:
return avgAnyType(values) var s float32 = 0
for _, x := range values {
s += x
}
return s / float32(len(values)), nil
case []int: case []int:
return avgAnyType(values) var s int = 0
for _, x := range values {
s += x
}
return s / len(values), nil
case []int64: case []int64:
return avgAnyType(values) var s int64 = 0
case []int32: for _, x := range values {
return avgAnyType(values) s += x
default: }
return 0.0, errors.New("function 'average' only on list of values (float64, float32, int, int32, int64)") return s / int64(len(values)), nil
} }
} return 0.0, nil
func maxAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("max function requires at least one argument")
}
return slices.Max(values), nil
} }
// Get the maximum value // Get the maximum value
func maxfunc(args interface{}) (interface{}, error) { func maxfunc(args ...interface{}) (interface{}, error) {
switch values := args.(type) { s := 0.0
case []float64: values, ok := args[0].([]float64)
return maxAnyType(values) if ok {
case []float32: for _, x := range values {
return maxAnyType(values) if x > s {
case []int: s = x
return maxAnyType(values) }
case []int64: }
return maxAnyType(values)
case []int32:
return maxAnyType(values)
default:
return 0.0, errors.New("function 'max' only on list of values (float64, float32, int, int32, int64)")
} }
} return s, nil
func medianAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("median function requires at least one argument")
}
slices.Sort(values)
var median T
if midPoint := len(values) % 2; midPoint == 0 {
median = (values[midPoint-1] + values[midPoint]) / 2
} else {
median = values[midPoint]
}
return median, nil
} }
// Get the median value // Get the median value
func medianfunc(args interface{}) (interface{}, error) { func medianfunc(args ...interface{}) (interface{}, error) {
switch values := args.(type) { switch values := args[0].(type) {
case []float64: case []float64:
return medianAnyType(values) sort.Float64s(values)
case []float32: return values[len(values)/2], nil
return medianAnyType(values) // case []float32:
// sort.Float64s(values)
// return values[len(values)/2], nil
case []int: case []int:
return medianAnyType(values) sort.Ints(values)
case []int64: return values[len(values)/2], nil
return medianAnyType(values)
case []int32: // case []int64:
return medianAnyType(values) // sort.Ints(values)
default: // return values[len(values)/2], nil
return 0.0, errors.New("function 'median' only on list of values (float64, float32, int, int32, int64)") // case []int32:
// sort.Ints(values)
// return values[len(values)/2], nil
} }
return 0.0, errors.New("function 'median()' only on lists of type float64 and int")
} }
/* /*
* Get number of values in list. Returns always an int * Get number of values in list. Returns always an int
*/ */
func lenfunc(args interface{}) (interface{}, error) { func lenfunc(args ...interface{}) (interface{}, error) {
var err error = nil var err error = nil
var length int = 0 var length int = 0
switch values := args.(type) { switch values := args[0].(type) {
case []float64: case []float64:
length = len(values) length = len(values)
case []float32: case []float32:
@@ -248,49 +243,49 @@ func matchfunc(args ...interface{}) (interface{}, error) {
*/ */
// for a given cpuid, it returns the core id // for a given cpuid, it returns the core id
func getCpuCoreFunc(args interface{}) (interface{}, error) { func getCpuCoreFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args.(type) { switch cpuid := args[0].(type) {
case int: case int:
return topo.GetHwthreadCore(cpuid), nil return topo.GetCpuCore(cpuid), nil
} }
return -1, errors.New("function 'getCpuCore' accepts only an 'int' cpuid") return -1, errors.New("function 'getCpuCore' accepts only an 'int' cpuid")
} }
// for a given cpuid, it returns the socket id // for a given cpuid, it returns the socket id
func getCpuSocketFunc(args interface{}) (interface{}, error) { func getCpuSocketFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args.(type) { switch cpuid := args[0].(type) {
case int: case int:
return topo.GetHwthreadSocket(cpuid), nil return topo.GetCpuSocket(cpuid), nil
} }
return -1, errors.New("function 'getCpuCore' accepts only an 'int' cpuid") return -1, errors.New("function 'getCpuCore' accepts only an 'int' cpuid")
} }
// for a given cpuid, it returns the id of the NUMA node // for a given cpuid, it returns the id of the NUMA node
func getCpuNumaDomainFunc(args interface{}) (interface{}, error) { func getCpuNumaDomainFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args.(type) { switch cpuid := args[0].(type) {
case int: case int:
return topo.GetHwthreadNumaDomain(cpuid), nil return topo.GetCpuNumaDomain(cpuid), nil
} }
return -1, errors.New("function 'getCpuNuma' accepts only an 'int' cpuid") return -1, errors.New("function 'getCpuNuma' accepts only an 'int' cpuid")
} }
// for a given cpuid, it returns the id of the CPU die // for a given cpuid, it returns the id of the CPU die
func getCpuDieFunc(args interface{}) (interface{}, error) { func getCpuDieFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args.(type) { switch cpuid := args[0].(type) {
case int: case int:
return topo.GetHwthreadDie(cpuid), nil return topo.GetCpuDie(cpuid), nil
} }
return -1, errors.New("function 'getCpuDie' accepts only an 'int' cpuid") return -1, errors.New("function 'getCpuDie' accepts only an 'int' cpuid")
} }
// for a given core id, it returns the list of cpuids // for a given core id, it returns the list of cpuids
func getCpuListOfCoreFunc(args interface{}) (interface{}, error) { func getCpuListOfCoreFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0) cpulist := make([]int, 0)
switch in := args.(type) { switch in := args[0].(type) {
case int: case int:
for _, c := range topo.CpuData() { for _, c := range topo.CpuData() {
if c.Core == in { if c.Core == in {
cpulist = append(cpulist, c.CpuID) cpulist = append(cpulist, c.Cpuid)
} }
} }
} }
@@ -298,13 +293,13 @@ func getCpuListOfCoreFunc(args interface{}) (interface{}, error) {
} }
// for a given socket id, it returns the list of cpuids // for a given socket id, it returns the list of cpuids
func getCpuListOfSocketFunc(args interface{}) (interface{}, error) { func getCpuListOfSocketFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0) cpulist := make([]int, 0)
switch in := args.(type) { switch in := args[0].(type) {
case int: case int:
for _, c := range topo.CpuData() { for _, c := range topo.CpuData() {
if c.Socket == in { if c.Socket == in {
cpulist = append(cpulist, c.CpuID) cpulist = append(cpulist, c.Cpuid)
} }
} }
} }
@@ -312,13 +307,13 @@ func getCpuListOfSocketFunc(args interface{}) (interface{}, error) {
} }
// for a given id of a NUMA domain, it returns the list of cpuids // for a given id of a NUMA domain, it returns the list of cpuids
func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) { func getCpuListOfNumaDomainFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0) cpulist := make([]int, 0)
switch in := args.(type) { switch in := args[0].(type) {
case int: case int:
for _, c := range topo.CpuData() { for _, c := range topo.CpuData() {
if c.NumaDomain == in { if c.Numadomain == in {
cpulist = append(cpulist, c.CpuID) cpulist = append(cpulist, c.Cpuid)
} }
} }
} }
@@ -326,13 +321,13 @@ func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) {
} }
// for a given CPU die id, it returns the list of cpuids // for a given CPU die id, it returns the list of cpuids
func getCpuListOfDieFunc(args interface{}) (interface{}, error) { func getCpuListOfDieFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0) cpulist := make([]int, 0)
switch in := args.(type) { switch in := args[0].(type) {
case int: case int:
for _, c := range topo.CpuData() { for _, c := range topo.CpuData() {
if c.Die == in { if c.Die == in {
cpulist = append(cpulist, c.CpuID) cpulist = append(cpulist, c.Cpuid)
} }
} }
} }
@@ -340,8 +335,8 @@ func getCpuListOfDieFunc(args interface{}) (interface{}, error) {
} }
// wrapper function to get a list of all cpuids of the node // wrapper function to get a list of all cpuids of the node
func getCpuListOfNode() (interface{}, error) { func getCpuListOfNode(args ...interface{}) (interface{}, error) {
return topo.HwthreadList(), nil return topo.CpuList(), nil
} }
// helper function to get the cpuid list for a CCMetric type tag set (type and type-id) // helper function to get the cpuid list for a CCMetric type tag set (type and type-id)
@@ -353,14 +348,14 @@ func getCpuListOfType(args ...interface{}) (interface{}, error) {
case string: case string:
switch typ { switch typ {
case "node": case "node":
return topo.HwthreadList(), nil return topo.CpuList(), nil
case "socket": case "socket":
return getCpuListOfSocketFunc(args[1]) return getCpuListOfSocketFunc(args[1])
case "numadomain": case "numadomain":
return getCpuListOfNumaDomainFunc(args[1]) return getCpuListOfNumaDomainFunc(args[1])
case "core": case "core":
return getCpuListOfCoreFunc(args[1]) return getCpuListOfCoreFunc(args[1])
case "hwthread": case "cpu":
var cpu int var cpu int
switch id := args[1].(type) { switch id := args[1].(type) {

View File

@@ -1,21 +1,15 @@
# CC Metric Router # CC Metric Router
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMessages](https://pkg.go.dev/github.com/ClusterCockpit/cc-energy-manager@v0.0.0-20240919152819-92a17f2da4f7/pkg/cc-message. The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMetrics](../ccMetric/README.md).
# Configuration # Configuration
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) with option `process_messages`.
```json ```json
{ {
"num_cache_intervals" : 1, "num_cache_intervals" : 1,
"interval_timestamp" : true, "interval_timestamp" : true,
"hostname_tag" : "hostname", "hostname_tag" : "hostname",
"max_forward" : 50, "max_forward" : 50,
"process_messages": {
"see": "pkg/messageProcessor/README.md"
},
"add_tags" : [ "add_tags" : [
{ {
"key" : "cluster", "key" : "cluster",
@@ -58,19 +52,12 @@ The CCMetric router sits in between the collectors and the sinks and can be used
], ],
"rename_metrics" : { "rename_metrics" : {
"metric_12345" : "mymetric" "metric_12345" : "mymetric"
},
"normalize_units" : true,
"change_unit_prefix" : {
"mem_used" : "G",
"mem_total" : "G"
} }
} }
``` ```
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval. There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) (option `process_messages`) instead of `add_tags`, `delete_tags`, `drop_metrics`, `drop_metrics_if`, `rename_metrics`, `normalize_units` and `change_unit_prefix`. These options are deprecated and will be removed in future versions. Until then, they are added to the message processor.
# Processing order in the router # Processing order in the router
- Add the `hostname_tag` tag (if sent by collectors or cache) - Add the `hostname_tag` tag (if sent by collectors or cache)
@@ -104,8 +91,6 @@ Every time the router receives a metric through any of the channels, it tries to
# The `rename_metrics` option # The `rename_metrics` option
__deprecated__
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname` In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
```json ```json
@@ -117,8 +102,6 @@ In the ClusterCockpit world we specified a set of standard metrics. Since some c
# Conditional manipulation of tags (`add_tags` and `del_tags`) # Conditional manipulation of tags (`add_tags` and `del_tags`)
__deprecated__
Common config format: Common config format:
```json ```json
{ {
@@ -130,8 +113,6 @@ Common config format:
## The `del_tags` option ## The `del_tags` option
__deprecated__
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags. The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case. If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
@@ -143,8 +124,6 @@ Never delete tags:
## The `add_tags` option ## The `add_tags` option
__deprecated__
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`. In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric. If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
@@ -186,8 +165,6 @@ In some cases, you want to drop a metric and don't get it forwarded to the sinks
## The `drop_metrics` section ## The `drop_metrics` section
__deprecated__
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
```json ```json
@@ -203,8 +180,6 @@ The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
## The `drop_metrics_if` section ## The `drop_metrics_if` section
__deprecated__
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`). This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
```json ```json
@@ -217,25 +192,10 @@ This option takes a list of evaluable conditions and performs them one after the
``` ```
The first line is comparable with the example in `drop_metrics`, it drops all metrics starting with `drop_metric_` and ending with a number. The second line drops all metrics of the first hardware thread (**not** recommended) The first line is comparable with the example in `drop_metrics`, it drops all metrics starting with `drop_metric_` and ending with a number. The second line drops all metrics of the first hardware thread (**not** recommended)
# Manipulating the metric units
## The `normalize_units` option
__deprecated__
The cc-metric-collector tries to read the data from the system as it is reported. If available, it tries to read the metric unit from the system as well (e.g. from `/proc/meminfo`). The problem is that, depending on the source, the metric units are named differently. Just think about `byte`, `Byte`, `B`, `bytes`, ...
The [cc-units](https://github.com/ClusterCockpit/cc-units) package provides us a normalization option to use the same metric unit name for all metrics. It this option is set to true, all `unit` meta tags are normalized.
## The `change_unit_prefix` section
__deprecated__
It is often the case that metrics are reported by the system using a rather outdated unit prefix (like `/proc/meminfo` still uses kByte despite current memory sizes are in the GByte range). If you want to change the prefix of a unit, you can do that with the help of [cc-units](https://github.com/ClusterCockpit/cc-units). The setting works on the metric name and requires the new prefix for the metric. The cc-units package determines the scaling factor.
# Aggregate metric values of the current interval with the `interval_aggregates` option # Aggregate metric values of the current interval with the `interval_aggregates` option
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0 and is **experimental** **Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning. In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
@@ -279,22 +239,3 @@ Use cases for `interval_aggregates`:
} }
} }
``` ```
# Order of operations
The router performs the above mentioned options in a specific order. In order to get the logic you want for a specific metric, it is crucial to know the processing order:
- Add the `hostname` tag (c)
- Manipulate the timestamp to the interval timestamp (c,r)
- Drop metrics based on `drop_metrics` and `drop_metrics_if` (c,r)
- Add tags based on `add_tags` (c,r)
- Delete tags based on `del_tags` (c,r)
- Rename metric based on `rename_metric` (c,r)
- Add tags based on `add_tags` to still work if the configuration uses the new name (c,r)
- Delete tags based on `del_tags` to still work if the configuration uses the new name (c,r)
- Normalize units when `normalize_units` is set (c,r)
- Convert unit prefix based on `change_unit_prefix` (c,r)
Legend:
- 'c' if metric is coming from a collector
- 'r' if metric is coming from a receiver

View File

@@ -0,0 +1,17 @@
# Stats API
The Stats API can be used for debugging. It publishes counts at an HTTP endpoint as JSON from different componenets of the CC Metric Collector.
# Configuration
The Stats API has an own configuration file to specify the listen host and port. The defaults are `localhost` and `8080`.
```json
{
"bindhost" : "",
"port" : "8080",
"publish_collectorstate" : true
}
```
The `bindhost` and `port` can be used to specify the listen host and port. The `publish_collectorstate` needs to be `true`, otherwise nothing is presented. This option is for future use if we need to publish more infos using different domains.

View File

@@ -0,0 +1,232 @@
package metricRouter
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
"github.com/gorilla/mux"
)
type statsApiConfig struct {
PublishCollectorState bool `json:"publish_collectorstate"`
Host string `json:"bindhost"`
Port string `json:"port"`
}
// Metric cache data structure
type statsApi struct {
name string
input chan lp.CCMetric
indone chan bool
outdone chan bool
config statsApiConfig
wg *sync.WaitGroup
statsWg sync.WaitGroup
ticker mct.MultiChanTicker
tickchan chan time.Time
server *http.Server
router *mux.Router
lock sync.Mutex
baseurl string
stats map[string]map[string]int64
outStats map[string]map[string]int64
}
type StatsApi interface {
Start()
Close()
StatsFunc(w http.ResponseWriter, r *http.Request)
}
var statsApiServer *statsApi = nil
func (a *statsApi) updateStats(point lp.CCMetric) {
switch point.Name() {
case "_stats":
if name, nok := point.GetMeta("source"); nok {
var compStats map[string]int64
var ok bool
if compStats, ok = a.stats[name]; !ok {
a.stats[name] = make(map[string]int64)
compStats = a.stats[name]
}
for k, v := range point.Fields() {
switch value := v.(type) {
case int:
compStats[k] = int64(value)
case uint:
compStats[k] = int64(value)
case int32:
compStats[k] = int64(value)
case uint32:
compStats[k] = int64(value)
case int64:
compStats[k] = int64(value)
case uint64:
compStats[k] = int64(value)
default:
cclog.ComponentDebug(a.name, "Unusable stats for", k, ". Values should be int64")
}
}
a.stats[name] = compStats
}
}
}
func (a *statsApi) Start() {
a.ticker.AddChannel(a.tickchan)
a.wg.Add(1)
a.statsWg.Add(1)
go func() {
a.stats = make(map[string]map[string]int64)
defer a.statsWg.Done()
for {
select {
case <-a.indone:
cclog.ComponentDebug(a.name, "INPUT DONE")
close(a.indone)
return
case p := <-a.input:
a.lock.Lock()
a.updateStats(p)
a.lock.Unlock()
}
}
}()
a.statsWg.Add(1)
go func() {
a.outStats = make(map[string]map[string]int64)
defer a.statsWg.Done()
a.lock.Lock()
for comp, compData := range a.stats {
var outData map[string]int64
var ok bool
if outData, ok = a.outStats[comp]; !ok {
outData = make(map[string]int64)
}
for k, v := range compData {
outData[k] = v
}
a.outStats[comp] = outData
}
a.lock.Unlock()
for {
select {
case <-a.outdone:
cclog.ComponentDebug(a.name, "OUTPUT DONE")
close(a.outdone)
return
case <-a.tickchan:
a.lock.Lock()
for comp, compData := range a.stats {
var outData map[string]int64
var ok bool
if outData, ok = a.outStats[comp]; !ok {
outData = make(map[string]int64)
}
for k, v := range compData {
outData[k] = v
}
a.outStats[comp] = outData
}
a.lock.Unlock()
}
}
}()
a.statsWg.Add(1)
go func() {
defer a.statsWg.Done()
err := a.server.ListenAndServe()
if err != nil && err.Error() != "http: Server closed" {
cclog.ComponentError(a.name, err.Error())
}
cclog.ComponentDebug(a.name, "SERVER DONE")
}()
cclog.ComponentDebug(a.name, "STARTED")
}
func (a *statsApi) StatsFunc(w http.ResponseWriter, r *http.Request) {
data, err := json.Marshal(a.outStats)
if err == nil {
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, string(data))
}
}
// Close finishes / stops the metric cache
func (a *statsApi) Close() {
cclog.ComponentDebug(a.name, "CLOSE")
a.indone <- true
a.outdone <- true
a.server.Shutdown(context.Background())
// wait for close of channel r.done
<-a.indone
<-a.outdone
a.statsWg.Wait()
a.wg.Done()
//a.wg.Wait()
}
func NewStatsApi(ticker mct.MultiChanTicker, wg *sync.WaitGroup, statsApiConfigfile string) (StatsApi, error) {
a := new(statsApi)
a.name = "StatsApi"
a.config.Host = "localhost"
a.config.Port = "8080"
configFile, err := os.Open(statsApiConfigfile)
if err != nil {
cclog.ComponentError(a.name, err.Error())
return nil, err
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
err = jsonParser.Decode(&a.config)
if err != nil {
cclog.ComponentError(a.name, err.Error())
return nil, err
}
a.input = make(chan lp.CCMetric)
a.ticker = ticker
a.tickchan = make(chan time.Time)
a.wg = wg
a.indone = make(chan bool)
a.outdone = make(chan bool)
a.router = mux.NewRouter()
a.baseurl = fmt.Sprintf("%s:%s", a.config.Host, a.config.Port)
a.server = &http.Server{Addr: a.baseurl, Handler: a.router}
if a.config.PublishCollectorState {
a.router.HandleFunc("/", a.StatsFunc)
}
statsApiServer = a
return a, nil
}
func ComponentStatInt(component string, key string, value int64) {
if statsApiServer == nil {
return
}
y, err := lp.New("_stats", map[string]string{}, map[string]string{"source": component}, map[string]interface{}{key: value}, time.Now())
if err == nil {
statsApiServer.input <- y
}
}
func ComponentStatString(component string, key string, value int64) {
if statsApiServer == nil {
return
}
y, err := lp.New("_stats", map[string]string{}, map[string]string{"source": component}, map[string]interface{}{key: value}, time.Now())
if err == nil {
statsApiServer.input <- y
}
}

View File

@@ -4,11 +4,11 @@ import (
"sync" "sync"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator" agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
) )
type metricCachePeriod struct { type metricCachePeriod struct {
@@ -16,7 +16,7 @@ type metricCachePeriod struct {
stopstamp time.Time stopstamp time.Time
numMetrics int numMetrics int
sizeMetrics int sizeMetrics int
metrics []lp.CCMessage metrics []lp.CCMetric
} }
// Metric cache data structure // Metric cache data structure
@@ -29,21 +29,21 @@ type metricCache struct {
ticker mct.MultiChanTicker ticker mct.MultiChanTicker
tickchan chan time.Time tickchan chan time.Time
done chan bool done chan bool
output chan lp.CCMessage output chan lp.CCMetric
aggEngine agg.MetricAggregator aggEngine agg.MetricAggregator
} }
type MetricCache interface { type MetricCache interface {
Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
Start() Start()
Add(metric lp.CCMessage) Add(metric lp.CCMetric)
GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric)
AddAggregation(name, function, condition string, tags, meta map[string]string) error AddAggregation(name, function, condition string, tags, meta map[string]string) error
DeleteAggregation(name string) error DeleteAggregation(name string) error
Close() Close()
} }
func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error { func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
var err error = nil var err error = nil
c.done = make(chan bool) c.done = make(chan bool)
c.wg = wg c.wg = wg
@@ -55,7 +55,7 @@ func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker,
p := new(metricCachePeriod) p := new(metricCachePeriod)
p.numMetrics = 0 p.numMetrics = 0
p.sizeMetrics = 0 p.sizeMetrics = 0
p.metrics = make([]lp.CCMessage, 0) p.metrics = make([]lp.CCMetric, 0)
c.intervals = append(c.intervals, p) c.intervals = append(c.intervals, p)
} }
@@ -124,7 +124,7 @@ func (c *metricCache) Start() {
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start()) // Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
// The intervals list is used as round-robin buffer and the metric list grows dynamically and // The intervals list is used as round-robin buffer and the metric list grows dynamically and
// to avoid reallocations // to avoid reallocations
func (c *metricCache) Add(metric lp.CCMessage) { func (c *metricCache) Add(metric lp.CCMetric) {
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods { if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
c.lock.Lock() c.lock.Lock()
p := c.intervals[c.curPeriod] p := c.intervals[c.curPeriod]
@@ -153,10 +153,10 @@ func (c *metricCache) DeleteAggregation(name string) error {
// Get all metrics of a interval. The index is the difference to the current interval, so index=0 // Get all metrics of a interval. The index is the difference to the current interval, so index=0
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index // is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
// is given (negative index, index larger than configured number of total intervals, ...) // is given (negative index, index larger than configured number of total intervals, ...)
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage) { func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
var start time.Time = time.Now() var start time.Time = time.Now()
var stop time.Time = time.Now() var stop time.Time = time.Now()
var metrics []lp.CCMessage var metrics []lp.CCMetric
if index >= 0 && index < c.numPeriods { if index >= 0 && index < c.numPeriods {
pindex := c.curPeriod - index pindex := c.curPeriod - index
if pindex < 0 { if pindex < 0 {
@@ -168,10 +168,10 @@ func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage
metrics = c.intervals[pindex].metrics metrics = c.intervals[pindex].metrics
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics //return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
} else { } else {
metrics = make([]lp.CCMessage, 0) metrics = make([]lp.CCMetric, 0)
} }
} else { } else {
metrics = make([]lp.CCMessage, 0) metrics = make([]lp.CCMetric, 0)
} }
return start, stop, metrics return start, stop, metrics
} }
@@ -182,7 +182,7 @@ func (c *metricCache) Close() {
c.done <- true c.done <- true
} }
func NewCache(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) { func NewCache(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
c := new(metricCache) c := new(metricCache)
err := c.Init(output, ticker, wg, numPeriods) err := c.Init(output, ticker, wg, numPeriods)
if err != nil { if err != nil {

View File

@@ -2,18 +2,16 @@ package metricRouter
import ( import (
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"strings" "strings"
"sync" "sync"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator" agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor" mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
) )
const ROUTER_MAX_FORWARD = 50 const ROUTER_MAX_FORWARD = 50
@@ -37,36 +35,39 @@ type metricRouterConfig struct {
IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval? IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval?
NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
NormalizeUnits bool `json:"normalize_units"` // Check unit meta flag and normalize it using cc-units dropMetrics map[string]bool // Internal map for O(1) lookup
ChangeUnitPrefix map[string]string `json:"change_unit_prefix"` // Add prefix that should be applied to the metrics
// dropMetrics map[string]bool // Internal map for O(1) lookup
MessageProcessor json.RawMessage `json:"process_message,omitempty"`
} }
// Metric router data structure // Metric router data structure
type metricRouter struct { type metricRouter struct {
hostname string // Hostname used in tags hostname string // Hostname used in tags
coll_input chan lp.CCMessage // Input channel from CollectorManager coll_input chan lp.CCMetric // Input channel from CollectorManager
recv_input chan lp.CCMessage // Input channel from ReceiveManager recv_input chan lp.CCMetric // Input channel from ReceiveManager
cache_input chan lp.CCMessage // Input channel from MetricCache cache_input chan lp.CCMetric // Input channel from MetricCache
outputs []chan lp.CCMessage // List of all output channels outputs []chan lp.CCMetric // List of all output channels
done chan bool // channel to finish / stop metric router done chan bool // channel to finish / stop metric router
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
timestamp time.Time // timestamp periodically updated by ticker each interval timestamp time.Time // timestamp periodically updated by ticker each interval
ticker mct.MultiChanTicker // periodically ticking once each interval timerdone chan bool // channel to finish / stop timestamp updater
config metricRouterConfig // json encoded config for metric router ticker mct.MultiChanTicker // periodically ticking once each interval
cache MetricCache // pointer to MetricCache config metricRouterConfig // json encoded config for metric router
cachewg sync.WaitGroup // wait group for MetricCache cache MetricCache // pointer to MetricCache
maxForward int // number of metrics to forward maximally in one iteration cachewg sync.WaitGroup // wait group for MetricCache
mp mp.MessageProcessor maxForward int // number of metrics to forward maximally in one iteration
statsCollForward int64
statsRecvForward int64
statsCacheForward int64
statsTotalForward int64
statsDropped int64
statsRenamed int64
} }
// MetricRouter access functions // MetricRouter access functions
type MetricRouter interface { type MetricRouter interface {
Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error
AddCollectorInput(input chan lp.CCMessage) AddCollectorInput(input chan lp.CCMetric)
AddReceiverInput(input chan lp.CCMessage) AddReceiverInput(input chan lp.CCMetric)
AddOutput(output chan lp.CCMessage) AddOutput(output chan lp.CCMetric)
Start() Start()
Close() Close()
} }
@@ -78,9 +79,9 @@ type MetricRouter interface {
// * ticker (from variable ticker) // * ticker (from variable ticker)
// * configuration (read from config file in variable routerConfigFile) // * configuration (read from config file in variable routerConfigFile)
func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error { func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error {
r.outputs = make([]chan lp.CCMessage, 0) r.outputs = make([]chan lp.CCMetric, 0)
r.done = make(chan bool) r.done = make(chan bool)
r.cache_input = make(chan lp.CCMessage) r.cache_input = make(chan lp.CCMetric)
r.wg = wg r.wg = wg
r.ticker = ticker r.ticker = ticker
r.config.MaxForward = ROUTER_MAX_FORWARD r.config.MaxForward = ROUTER_MAX_FORWARD
@@ -122,56 +123,44 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta) r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
} }
} }
p, err := mp.NewMessageProcessor() r.config.dropMetrics = make(map[string]bool)
if err != nil {
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
for _, mname := range r.config.DropMetrics { for _, mname := range r.config.DropMetrics {
r.mp.AddDropMessagesByName(mname) r.config.dropMetrics[mname] = true
} }
for _, cond := range r.config.DropMetricsIf { r.statsCollForward = 0
r.mp.AddDropMessagesByCondition(cond) r.statsRecvForward = 0
} r.statsCacheForward = 0
for _, data := range r.config.AddTags { r.statsTotalForward = 0
cond := data.Condition r.statsDropped = 0
if cond == "*" { r.statsRenamed = 0
cond = "true"
}
r.mp.AddAddTagsByCondition(cond, data.Key, data.Value)
}
for _, data := range r.config.DelTags {
cond := data.Condition
if cond == "*" {
cond = "true"
}
r.mp.AddDeleteTagsByCondition(cond, data.Key, data.Value)
}
for oldname, newname := range r.config.RenameMetrics {
r.mp.AddRenameMetricByName(oldname, newname)
}
for metricName, prefix := range r.config.ChangeUnitPrefix {
r.mp.AddChangeUnitPrefix(fmt.Sprintf("name == '%s'", metricName), prefix)
}
r.mp.SetNormalizeUnits(r.config.NormalizeUnits)
r.mp.AddAddTagsByCondition("true", r.config.HostnameTagName, r.hostname)
// r.config.dropMetrics = make(map[string]bool)
// for _, mname := range r.config.DropMetrics {
// r.config.dropMetrics[mname] = true
// }
return nil return nil
} }
func getParamMap(point lp.CCMessage) map[string]interface{} { // StartTimer starts a timer which updates timestamp periodically
func (r *metricRouter) StartTimer() {
m := make(chan time.Time)
r.ticker.AddChannel(m)
r.timerdone = make(chan bool)
r.wg.Add(1)
go func() {
defer r.wg.Done()
for {
select {
case <-r.timerdone:
close(r.timerdone)
cclog.ComponentDebug("MetricRouter", "TIMER DONE")
return
case t := <-m:
cclog.ComponentDebug("MetricRouter", "INTERVAL_TICK", t.Unix())
r.timestamp = t
}
}
}()
cclog.ComponentDebug("MetricRouter", "TIMER START")
}
func getParamMap(point lp.CCMetric) map[string]interface{} {
params := make(map[string]interface{}) params := make(map[string]interface{})
params["metric"] = point params["metric"] = point
params["name"] = point.Name() params["name"] = point.Name()
@@ -189,7 +178,7 @@ func getParamMap(point lp.CCMessage) map[string]interface{} {
} }
// DoAddTags adds a tag when condition is fullfiled // DoAddTags adds a tag when condition is fullfiled
func (r *metricRouter) DoAddTags(point lp.CCMessage) { func (r *metricRouter) DoAddTags(point lp.CCMetric) {
var conditionMatches bool var conditionMatches bool
for _, m := range r.config.AddTags { for _, m := range r.config.AddTags {
if m.Condition == "*" { if m.Condition == "*" {
@@ -211,89 +200,56 @@ func (r *metricRouter) DoAddTags(point lp.CCMessage) {
} }
// DoDelTags removes a tag when condition is fullfiled // DoDelTags removes a tag when condition is fullfiled
// func (r *metricRouter) DoDelTags(point lp.CCMessage) { func (r *metricRouter) DoDelTags(point lp.CCMetric) {
// var conditionMatches bool var conditionMatches bool
// for _, m := range r.config.DelTags { for _, m := range r.config.DelTags {
// if m.Condition == "*" { if m.Condition == "*" {
// // Condition is always matched // Condition is always matched
// conditionMatches = true conditionMatches = true
// } else { } else {
// // Evaluate condition // Evaluate condition
// var err error var err error
// conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point)) conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
// if err != nil { if err != nil {
// cclog.ComponentError("MetricRouter", err.Error()) cclog.ComponentError("MetricRouter", err.Error())
// conditionMatches = false conditionMatches = false
// } }
// } }
// if conditionMatches { if conditionMatches {
// point.RemoveTag(m.Key) point.RemoveTag(m.Key)
// } }
// } }
// } }
// Conditional test whether a metric should be dropped // Conditional test whether a metric should be dropped
// func (r *metricRouter) dropMetric(point lp.CCMessage) bool { func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
// // Simple drop check // Simple drop check
// if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok { if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
// return conditionMatches return conditionMatches
// } }
// // Checking the dropping conditions // Checking the dropping conditions
// for _, m := range r.config.DropMetricsIf { for _, m := range r.config.DropMetricsIf {
// conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point)) conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
// if err != nil { if err != nil {
// cclog.ComponentError("MetricRouter", err.Error()) cclog.ComponentError("MetricRouter", err.Error())
// conditionMatches = false conditionMatches = false
// } }
// if conditionMatches { if conditionMatches {
// return conditionMatches return conditionMatches
// } }
// } }
// // No dropping condition met // No dropping condition met
// return false return false
// } }
// func (r *metricRouter) prepareUnit(point lp.CCMessage) bool {
// if r.config.NormalizeUnits {
// if in_unit, ok := point.GetMeta("unit"); ok {
// u := units.NewUnit(in_unit)
// if u.Valid() {
// point.AddMeta("unit", u.Short())
// }
// }
// }
// if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
// newPrefix := units.NewPrefix(newP)
// if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
// u := units.NewUnit(in_unit)
// if u.Valid() {
// cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
// conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
// if conv != nil && out_unit.Valid() {
// if val, ok := point.GetField("value"); ok {
// point.AddField("value", conv(val))
// point.AddMeta("unit", out_unit.Short())
// }
// }
// }
// }
// }
// return true
// }
// Start starts the metric router // Start starts the metric router
func (r *metricRouter) Start() { func (r *metricRouter) Start() {
// start timer if configured // start timer if configured
r.timestamp = time.Now() r.timestamp = time.Now()
timeChan := make(chan time.Time)
if r.config.IntervalStamp { if r.config.IntervalStamp {
r.ticker.AddChannel(timeChan) r.StartTimer()
} }
// Router manager is done // Router manager is done
@@ -304,75 +260,80 @@ func (r *metricRouter) Start() {
// Forward takes a received metric, adds or deletes tags // Forward takes a received metric, adds or deletes tags
// and forwards it to the output channels // and forwards it to the output channels
// forward := func(point lp.CCMessage) { forward := func(point lp.CCMetric) {
// cclog.ComponentDebug("MetricRouter", "FORWARD", point) cclog.ComponentDebug("MetricRouter", "FORWARD", point)
// r.DoAddTags(point) r.DoAddTags(point)
// r.DoDelTags(point) r.DoDelTags(point)
// name := point.Name() name := point.Name()
// if new, ok := r.config.RenameMetrics[name]; ok { if new, ok := r.config.RenameMetrics[name]; ok {
// point.SetName(new) r.statsRenamed++
// point.AddMeta("oldname", name) ComponentStatInt("MetricRouter", "renamed", r.statsRenamed)
// r.DoAddTags(point) point.SetName(new)
// r.DoDelTags(point) point.AddMeta("oldname", name)
// } }
r.DoAddTags(point)
r.DoDelTags(point)
// r.prepareUnit(point) for _, o := range r.outputs {
o <- point
// for _, o := range r.outputs { }
// o <- point }
// }
// }
// Foward message received from collector channel // Foward message received from collector channel
coll_forward := func(p lp.CCMessage) { coll_forward := func(p lp.CCMetric) {
// receive from metric collector // receive from metric collector
//p.AddTag(r.config.HostnameTagName, r.hostname) p.AddTag(r.config.HostnameTagName, r.hostname)
if r.config.IntervalStamp { if r.config.IntervalStamp {
p.SetTime(r.timestamp) p.SetTime(r.timestamp)
} }
m, err := r.mp.ProcessMessage(p) if !r.dropMetric(p) {
if err == nil && m != nil { r.statsCollForward++
for _, o := range r.outputs { r.statsTotalForward++
o <- m ComponentStatInt("MetricRouter", "collector_forward", r.statsCollForward)
} ComponentStatInt("MetricRouter", "total_forward", r.statsTotalForward)
forward(p)
} else {
r.statsDropped++
ComponentStatInt("MetricRouter", "dropped", r.statsDropped)
} }
// if !r.dropMetric(p) {
// for _, o := range r.outputs {
// o <- point
// }
// }
// even if the metric is dropped, it is stored in the cache for // even if the metric is dropped, it is stored in the cache for
// aggregations // aggregations
if r.config.NumCacheIntervals > 0 { if r.config.NumCacheIntervals > 0 {
r.cache.Add(m) r.cache.Add(p)
} }
} }
// Forward message received from receivers channel // Forward message received from receivers channel
recv_forward := func(p lp.CCMessage) { recv_forward := func(p lp.CCMetric) {
// receive from receive manager // receive from receive manager
if r.config.IntervalStamp { if r.config.IntervalStamp {
p.SetTime(r.timestamp) p.SetTime(r.timestamp)
} }
m, err := r.mp.ProcessMessage(p) if !r.dropMetric(p) {
if err == nil && m != nil { r.statsRecvForward++
for _, o := range r.outputs { r.statsTotalForward++
o <- m ComponentStatInt("MetricRouter", "receiver_forward", r.statsRecvForward)
} ComponentStatInt("MetricRouter", "total_forward", r.statsTotalForward)
forward(p)
} else {
r.statsDropped++
ComponentStatInt("MetricRouter", "dropped", r.statsDropped)
} }
// if !r.dropMetric(p) {
// forward(p)
// }
} }
// Forward message received from cache channel // Forward message received from cache channel
cache_forward := func(p lp.CCMessage) { cache_forward := func(p lp.CCMetric) {
// receive from metric collector // receive from metric collector
m, err := r.mp.ProcessMessage(p) if !r.dropMetric(p) {
if err == nil && m != nil { p.AddTag(r.config.HostnameTagName, r.hostname)
for _, o := range r.outputs { r.statsCacheForward++
o <- m r.statsTotalForward++
} ComponentStatInt("MetricRouter", "cache_forward", r.statsCacheForward)
ComponentStatInt("MetricRouter", "total_forward", r.statsTotalForward)
forward(p)
} else {
r.statsDropped++
ComponentStatInt("MetricRouter", "dropped", r.statsDropped)
} }
} }
@@ -391,10 +352,6 @@ func (r *metricRouter) Start() {
done() done()
return return
case timestamp := <-timeChan:
r.timestamp = timestamp
cclog.ComponentDebug("MetricRouter", "Update timestamp", r.timestamp.UnixNano())
case p := <-r.coll_input: case p := <-r.coll_input:
coll_forward(p) coll_forward(p)
for i := 0; len(r.coll_input) > 0 && i < (r.maxForward-1); i++ { for i := 0; len(r.coll_input) > 0 && i < (r.maxForward-1); i++ {
@@ -419,17 +376,17 @@ func (r *metricRouter) Start() {
} }
// AddCollectorInput adds a channel between metric collector and metric router // AddCollectorInput adds a channel between metric collector and metric router
func (r *metricRouter) AddCollectorInput(input chan lp.CCMessage) { func (r *metricRouter) AddCollectorInput(input chan lp.CCMetric) {
r.coll_input = input r.coll_input = input
} }
// AddReceiverInput adds a channel between metric receiver and metric router // AddReceiverInput adds a channel between metric receiver and metric router
func (r *metricRouter) AddReceiverInput(input chan lp.CCMessage) { func (r *metricRouter) AddReceiverInput(input chan lp.CCMetric) {
r.recv_input = input r.recv_input = input
} }
// AddOutput adds a output channel to the metric router // AddOutput adds a output channel to the metric router
func (r *metricRouter) AddOutput(output chan lp.CCMessage) { func (r *metricRouter) AddOutput(output chan lp.CCMetric) {
r.outputs = append(r.outputs, output) r.outputs = append(r.outputs, output)
} }
@@ -440,6 +397,14 @@ func (r *metricRouter) Close() {
// wait for close of channel r.done // wait for close of channel r.done
<-r.done <-r.done
// stop timer
if r.config.IntervalStamp {
cclog.ComponentDebug("MetricRouter", "TIMER CLOSE")
r.timerdone <- true
// wait for close of channel r.timerdone
<-r.timerdone
}
// stop metric cache // stop metric cache
if r.config.NumCacheIntervals > 0 { if r.config.NumCacheIntervals > 0 {
cclog.ComponentDebug("MetricRouter", "CACHE CLOSE") cclog.ComponentDebug("MetricRouter", "CACHE CLOSE")

View File

@@ -3,7 +3,7 @@ package multiChanTicker
import ( import (
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
) )
type multiChanTicker struct { type multiChanTicker struct {

View File

@@ -1,57 +0,0 @@
# ClusterCockpit metrics
As described in the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications), the whole ClusterCockpit stack uses metrics in the InfluxDB line protocol format. This is also the input and output format for the ClusterCockpit Metric Collector but internally it uses an extended format while processing, named CCMetric.
It is basically a copy of the [InfluxDB line protocol](https://github.com/influxdata/line-protocol) `MutableMetric` interface with one extension. Besides the tags and fields, it contains a list of meta information (re-using the `Tag` structure of the original protocol):
```golang
type ccMetric struct {
name string // Measurement name
meta map[string]string // map of meta data tags
tags map[string]string // map of of tags
fields map[string]interface{} // map of of fields
tm time.Time // timestamp
}
type CCMetric interface {
ToPoint(metaAsTags map[string]bool) *write.Point // Generate influxDB point for data type ccMetric
ToLineProtocol(metaAsTags map[string]bool) string // Generate influxDB line protocol for data type ccMetric
String() string // Return line-protocol like string
Name() string // Get metric name
SetName(name string) // Set metric name
Time() time.Time // Get timestamp
SetTime(t time.Time) // Set timestamp
Tags() map[string]string // Map of tags
AddTag(key, value string) // Add a tag
GetTag(key string) (value string, ok bool) // Get a tag by its key
HasTag(key string) (ok bool) // Check if a tag key is present
RemoveTag(key string) // Remove a tag by its key
Meta() map[string]string // Map of meta data tags
AddMeta(key, value string) // Add a meta data tag
GetMeta(key string) (value string, ok bool) // Get a meta data tab addressed by its key
HasMeta(key string) (ok bool) // Check if a meta data key is present
RemoveMeta(key string) // Remove a meta data tag by its key
Fields() map[string]interface{} // Map of fields
AddField(key string, value interface{}) // Add a field
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
HasField(key string) (ok bool) // Check if a field key is present
RemoveField(key string) // Remove a field addressed by its key
}
func New(name string, tags map[string]string, meta map[string]string, fields map[string]interface{}, tm time.Time) (CCMetric, error)
func FromMetric(other CCMetric) CCMetric
func FromInfluxMetric(other lp.Metric) CCMetric
```
The `CCMetric` interface provides the same functions as the `MutableMetric` like `{Add, Get, Remove, Has}{Tag, Field}` and additionally provides `{Add, Get, Remove, Has}Meta`.
The InfluxDB protocol creates a new metric with `influx.New(name, tags, fields, time)` while CCMetric uses `ccMetric.New(name, tags, meta, fields, time)` where `tags` and `meta` are both of type `map[string]string`.
You can copy a CCMetric with `FromMetric(other CCMetric) CCMetric`. If you get an `influx.Metric` from a function, like the line protocol parser, you can use `FromInfluxMetric(other influx.Metric) CCMetric` to get a CCMetric out of it (see `NatsReceiver` for an example).
Although the [cc-specifications](https://github.com/ClusterCockpit/cc-specifications/blob/master/interfaces/lineprotocol/README.md) defines that there is only a `value` field for the metric value, the CCMetric still can have multiple values similar to the InfluxDB line protocol.

View File

@@ -1,463 +0,0 @@
package ccTopology
import (
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
cclogger "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
"golang.org/x/exp/slices"
)
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
// Structure holding all information about a hardware thread
// See https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-devices-system-cpu
type HwthreadEntry struct {
// for each CPUx:
CpuID int // CPU / hardware thread ID
SMT int // Simultaneous Multithreading ID
CoreCPUsList []int // CPUs within the same core
Core int // Socket local core ID
Socket int // Sockets (physical) ID
Die int // Die ID
NumaDomain int // NUMA Domain
}
var cache struct {
HwthreadList []int // List of CPU hardware threads
SMTList []int // List of symmetric hyper threading IDs
CoreList []int // List of CPU core IDs
SocketList []int // List of CPU sockets (physical) IDs
DieList []int // List of CPU Die IDs
NumaDomainList []int // List of NUMA Domains
CpuData []HwthreadEntry
}
// fileToInt reads an integer value from a sysfs file
// In case of an error -1 is returned
func fileToInt(path string) int {
buffer, err := os.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "fileToInt", "Reading", path, ":", err.Error())
return -1
}
stringBuffer := strings.TrimSpace(string(buffer))
id, err := strconv.Atoi(stringBuffer)
if err != nil {
cclogger.ComponentError("ccTopology", "fileToInt", "Parsing", path, ":", stringBuffer, err.Error())
return -1
}
return id
}
// fileToList reads a list from a sysfs file
// A list consists of value ranges separated by colon
// A range can be a single value or a range of values given by a startValue-endValue
// In case of an error nil is returned
func fileToList(path string) []int {
// Read list
buffer, err := os.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "fileToList", "Reading", path, ":", err.Error())
return nil
}
// Create list
list := make([]int, 0)
stringBuffer := strings.TrimSpace(string(buffer))
for _, valueRangeString := range strings.Split(stringBuffer, ",") {
valueRange := strings.Split(valueRangeString, "-")
switch len(valueRange) {
case 1:
singleValue, err := strconv.Atoi(valueRange[0])
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
return nil
}
list = append(list, singleValue)
case 2:
startValue, err := strconv.Atoi(valueRange[0])
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
return nil
}
endValue, err := strconv.Atoi(valueRange[1])
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[1], ":", err.Error())
return nil
}
for value := startValue; value <= endValue; value++ {
list = append(list, value)
}
}
}
return list
}
// init initializes the cache structure
func init() {
getHWThreads :=
func() []int {
globPath := filepath.Join(SYSFS_CPUBASE, "cpu[0-9]*")
regexPath := filepath.Join(SYSFS_CPUBASE, "cpu([[:digit:]]+)")
regex := regexp.MustCompile(regexPath)
// File globbing for hardware threads
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "init:getHWThreads", err.Error())
return nil
}
hwThreadIDs := make([]int, len(files))
for i, file := range files {
// Extract hardware thread ID
matches := regex.FindStringSubmatch(file)
if len(matches) != 2 {
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to extract hardware thread ID from ", file)
return nil
}
// Convert hardware thread ID to int
id, err := strconv.Atoi(matches[1])
if err != nil {
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to convert to int hardware thread ID ", matches[1])
return nil
}
hwThreadIDs[i] = id
}
// Sort hardware thread IDs
slices.Sort(hwThreadIDs)
return hwThreadIDs
}
getNumaDomain :=
func(basePath string) int {
globPath := filepath.Join(basePath, "node*")
regexPath := filepath.Join(basePath, "node([[:digit:]]+)")
regex := regexp.MustCompile(regexPath)
// File globbing for NUMA node
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", err.Error())
return -1
}
// Check, that exactly one NUMA domain was found
if len(files) != 1 {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Number of NUMA domains != 1: ", len(files))
return -1
}
// Extract NUMA node ID
matches := regex.FindStringSubmatch(files[0])
if len(matches) != 2 {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to extract NUMA node ID from: ", files[0])
return -1
}
id, err := strconv.Atoi(matches[1])
if err != nil {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to parse NUMA node ID from: ", matches[1])
return -1
}
return id
}
cache.HwthreadList = getHWThreads()
cache.CoreList = make([]int, len(cache.HwthreadList))
cache.SocketList = make([]int, len(cache.HwthreadList))
cache.DieList = make([]int, len(cache.HwthreadList))
cache.SMTList = make([]int, len(cache.HwthreadList))
cache.NumaDomainList = make([]int, len(cache.HwthreadList))
cache.CpuData = make([]HwthreadEntry, len(cache.HwthreadList))
for i, c := range cache.HwthreadList {
// Set cpuBase directory for topology lookup
cpuBase := filepath.Join(SYSFS_CPUBASE, fmt.Sprintf("cpu%d", c))
topoBase := filepath.Join(cpuBase, "topology")
// Lookup Core ID
cache.CoreList[i] = fileToInt(filepath.Join(topoBase, "core_id"))
// Lookup socket / physical package ID
cache.SocketList[i] = fileToInt(filepath.Join(topoBase, "physical_package_id"))
// Lookup CPU die id
cache.DieList[i] = fileToInt(filepath.Join(topoBase, "die_id"))
if cache.DieList[i] < 0 {
cache.DieList[i] = cache.SocketList[i]
}
// Lookup List of CPUs within the same core
coreCPUsList := fileToList(filepath.Join(topoBase, "core_cpus_list"))
// Find index of CPU ID in List of CPUs within the same core
// if not found return -1
cache.SMTList[i] = slices.Index(coreCPUsList, c)
// Lookup NUMA domain id
cache.NumaDomainList[i] = getNumaDomain(cpuBase)
cache.CpuData[i] =
HwthreadEntry{
CpuID: cache.HwthreadList[i],
SMT: cache.SMTList[i],
CoreCPUsList: coreCPUsList,
Socket: cache.SocketList[i],
NumaDomain: cache.NumaDomainList[i],
Die: cache.DieList[i],
Core: cache.CoreList[i],
}
}
slices.Sort(cache.HwthreadList)
cache.HwthreadList = slices.Compact(cache.HwthreadList)
slices.Sort(cache.SMTList)
cache.SMTList = slices.Compact(cache.SMTList)
slices.Sort(cache.CoreList)
cache.CoreList = slices.Compact(cache.CoreList)
slices.Sort(cache.SocketList)
cache.SocketList = slices.Compact(cache.SocketList)
slices.Sort(cache.DieList)
cache.DieList = slices.Compact(cache.DieList)
slices.Sort(cache.NumaDomainList)
cache.NumaDomainList = slices.Compact(cache.NumaDomainList)
}
// SocketList gets the list of CPU socket IDs
func SocketList() []int {
return slices.Clone(cache.SocketList)
}
// HwthreadList gets the list of hardware thread IDs in the order of listing in /proc/cpuinfo
func HwthreadList() []int {
return slices.Clone(cache.HwthreadList)
}
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
// Deprecated! Use HwthreadList()
func CpuList() []int {
return HwthreadList()
}
// CoreList gets the list of CPU core IDs in the order of listing in /proc/cpuinfo
func CoreList() []int {
return slices.Clone(cache.CoreList)
}
// Get list of NUMA node IDs
func NumaNodeList() []int {
return slices.Clone(cache.NumaDomainList)
}
// DieList gets the list of CPU die IDs
func DieList() []int {
if len(cache.DieList) > 0 {
return slices.Clone(cache.DieList)
}
return SocketList()
}
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
func GetTypeList(topology_type string) []int {
switch topology_type {
case "node":
return []int{0}
case "socket":
return SocketList()
case "die":
return DieList()
case "memoryDomain":
return NumaNodeList()
case "core":
return CoreList()
case "hwthread":
return HwthreadList()
}
return []int{}
}
func GetTypeId(hwt HwthreadEntry, topology_type string) (int, error) {
var err error = nil
switch topology_type {
case "node":
return 0, err
case "socket":
return hwt.Socket, err
case "die":
return hwt.Die, err
case "memoryDomain":
return hwt.NumaDomain, err
case "core":
return hwt.Core, err
case "hwthread":
return hwt.CpuID, err
}
return -1, fmt.Errorf("unknown topology type '%s'", topology_type)
}
// CpuData returns CPU data for each hardware thread
func CpuData() []HwthreadEntry {
// return a deep copy to protect cache data
c := slices.Clone(cache.CpuData)
for i := range c {
c[i].CoreCPUsList = slices.Clone(cache.CpuData[i].CoreCPUsList)
}
return c
}
// Structure holding basic information about a CPU
type CpuInformation struct {
NumHWthreads int
SMTWidth int
NumSockets int
NumDies int
NumCores int
NumNumaDomains int
}
// CpuInformation reports basic information about the CPU
func CpuInfo() CpuInformation {
return CpuInformation{
NumNumaDomains: len(cache.NumaDomainList),
SMTWidth: len(cache.SMTList),
NumDies: len(cache.DieList),
NumCores: len(cache.CoreList),
NumSockets: len(cache.SocketList),
NumHWthreads: len(cache.HwthreadList),
}
}
// GetHwthreadSocket gets the CPU socket ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadSocket(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
return d.Socket
}
}
return -1
}
// GetHwthreadNumaDomain gets the NUMA domain ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadNumaDomain(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
return d.NumaDomain
}
}
return -1
}
// GetHwthreadDie gets the CPU die ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadDie(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
return d.Die
}
}
return -1
}
// GetHwthreadCore gets the CPU core ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadCore(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
return d.Core
}
}
return -1
}
// GetSocketHwthreads gets all hardware thread IDs associated with a CPU socket
func GetSocketHwthreads(socket int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.Socket == socket {
cpuList = append(cpuList, d.CpuID)
}
}
return cpuList
}
// GetNumaDomainHwthreads gets the all hardware thread IDs associated with a NUMA domain
func GetNumaDomainHwthreads(numaDomain int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.NumaDomain == numaDomain {
cpuList = append(cpuList, d.CpuID)
}
}
return cpuList
}
// GetDieHwthreads gets all hardware thread IDs associated with a CPU die
func GetDieHwthreads(die int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.Die == die {
cpuList = append(cpuList, d.CpuID)
}
}
return cpuList
}
// GetCoreHwthreads get all hardware thread IDs associated with a CPU core
func GetCoreHwthreads(core int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.Core == core {
cpuList = append(cpuList, d.CpuID)
}
}
return cpuList
}
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
func GetTypeHwthreads(topology_type string, id int) []int {
switch topology_type {
case "node":
return HwthreadList()
case "socket":
return GetSocketHwthreads(id)
case "die":
return GetDieHwthreads(id)
case "memoryDomain":
return GetNumaDomainHwthreads(id)
case "core":
return GetCoreHwthreads(id)
case "hwthread":
return []int{id}
}
return []int{}
}

View File

@@ -1,125 +0,0 @@
package hostlist
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
)
func Expand(in string) (result []string, err error) {
// Create ranges regular expression
reStNumber := "[[:digit:]]+"
reStRange := reStNumber + "-" + reStNumber
reStOptionalNumberOrRange := "(" + reStNumber + ",|" + reStRange + ",)*"
reStNumberOrRange := "(" + reStNumber + "|" + reStRange + ")"
reStBraceLeft := "[[]"
reStBraceRight := "[]]"
reStRanges := reStBraceLeft +
reStOptionalNumberOrRange +
reStNumberOrRange +
reStBraceRight
reRanges := regexp.MustCompile(reStRanges)
// Create host list regular expression
reStDNSChars := "[a-zA-Z0-9-]+"
reStPrefix := "^(" + reStDNSChars + ")"
reStOptionalSuffix := "(" + reStDNSChars + ")?"
re := regexp.MustCompile(reStPrefix + "([[][0-9,-]+[]])?" + reStOptionalSuffix)
// Remove all delimiters from the input
in = strings.TrimLeft(in, ", ")
for len(in) > 0 {
if v := re.FindStringSubmatch(in); v != nil {
// Remove matched part from the input
lenPrefix := len(v[0])
in = in[lenPrefix:]
// Remove all delimiters from the input
in = strings.TrimLeft(in, ", ")
// matched prefix, range and suffix
hlPrefix := v[1]
hlRanges := v[2]
hlSuffix := v[3]
// Single node without ranges
if hlRanges == "" {
result = append(result, hlPrefix)
continue
}
// Node with ranges
if v := reRanges.FindStringSubmatch(hlRanges); v != nil {
// Remove braces
hlRanges = hlRanges[1 : len(hlRanges)-1]
// Split host ranges at ,
for _, hlRange := range strings.Split(hlRanges, ",") {
// Split host range at -
RangeStartEnd := strings.Split(hlRange, "-")
// Range is only a single number
if len(RangeStartEnd) == 1 {
result = append(result, hlPrefix+RangeStartEnd[0]+hlSuffix)
continue
}
// Range has a start and an end
widthRangeStart := len(RangeStartEnd[0])
widthRangeEnd := len(RangeStartEnd[1])
iStart, _ := strconv.ParseUint(RangeStartEnd[0], 10, 64)
iEnd, _ := strconv.ParseUint(RangeStartEnd[1], 10, 64)
if iStart > iEnd {
return nil, fmt.Errorf("single range start is greater than end: %s", hlRange)
}
// Create print format string for range numbers
doPadding := widthRangeStart == widthRangeEnd
widthPadding := widthRangeStart
var formatString string
if doPadding {
formatString = "%0" + fmt.Sprint(widthPadding) + "d"
} else {
formatString = "%d"
}
formatString = hlPrefix + formatString + hlSuffix
// Add nodes from this range
for i := iStart; i <= iEnd; i++ {
result = append(result, fmt.Sprintf(formatString, i))
}
}
} else {
return nil, fmt.Errorf("not at hostlist range: %s", hlRanges)
}
} else {
return nil, fmt.Errorf("not a hostlist: %s", in)
}
}
if result != nil {
// sort
sort.Strings(result)
// uniq
previous := 1
for current := 1; current < len(result); current++ {
if result[current-1] != result[current] {
if previous != current {
result[previous] = result[current]
}
previous++
}
}
result = result[:previous]
}
return
}

View File

@@ -1,126 +0,0 @@
package hostlist
import (
"testing"
)
func TestExpand(t *testing.T) {
// Compare two slices of strings
equal := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
type testDefinition struct {
input string
resultExpected []string
errorExpected bool
}
expandTests := []testDefinition{
{
// Single node
input: "n1",
resultExpected: []string{"n1"},
errorExpected: false,
},
{
// Single node, duplicated
input: "n1,n1",
resultExpected: []string{"n1"},
errorExpected: false,
},
{
// Single node with padding
input: "n[01]",
resultExpected: []string{"n01"},
errorExpected: false,
},
{
// Single node with suffix
input: "n[01]-p",
resultExpected: []string{"n01-p"},
errorExpected: false,
},
{
// Multiple nodes with a single range
input: "n[1-2]",
resultExpected: []string{"n1", "n2"},
errorExpected: false,
},
{
// Multiple nodes with a single range and a single index
input: "n[1-2,3]",
resultExpected: []string{"n1", "n2", "n3"},
errorExpected: false,
},
{
// Multiple nodes with different prefixes
input: "n[1-2],m[1,2]",
resultExpected: []string{"m1", "m2", "n1", "n2"},
errorExpected: false,
},
{
// Multiple nodes with different suffixes
input: "n[1-2]-p,n[1,2]-q",
resultExpected: []string{"n1-p", "n1-q", "n2-p", "n2-q"},
errorExpected: false,
},
{
// Multiple nodes with and without node ranges
input: " n09, n[01-04,06-07,09] , , n10,n04",
resultExpected: []string{"n01", "n02", "n03", "n04", "n06", "n07", "n09", "n10"},
errorExpected: false,
},
{
// Forbidden DNS character
input: "n@",
resultExpected: []string{},
errorExpected: true,
},
{
// Forbidden range
input: "n[1-2-2,3]",
resultExpected: []string{},
errorExpected: true,
},
{
// Forbidden range limits
input: "n[2-1]",
resultExpected: []string{},
errorExpected: true,
},
}
for _, expandTest := range expandTests {
result, err := Expand(expandTest.input)
hasError := err != nil
if hasError != expandTest.errorExpected && hasError {
t.Errorf("Expand('%s') failed: unexpected error '%v'",
expandTest.input, err)
continue
}
if hasError != expandTest.errorExpected && !hasError {
t.Errorf("Expand('%s') did not fail as expected: got result '%+v'",
expandTest.input, result)
continue
}
if !hasError && !equal(result, expandTest.resultExpected) {
t.Errorf("Expand('%s') failed: got result '%+v', expected result '%v'",
expandTest.input, result, expandTest.resultExpected)
continue
}
t.Logf("Checked hostlist.Expand('%s'): result = '%+v', err = '%v'",
expandTest.input, result, err)
}
}

View File

@@ -1,266 +0,0 @@
# Message Processor Component
Multiple parts of in the ClusterCockit ecosystem require the processing of CCMessages.
The main CC application using it is `cc-metric-collector`. The processing part there was originally in the metric router, the central
hub connecting collectors (reading local data), receivers (receiving remote data) and sinks (sending data). Already in early stages, the
lack of flexibility caused some trouble:
> The sysadmins wanted to keep operating their Ganglia based monitoring infrastructure while we developed the CC stack. Ganglia wants the core metrics with
> a specific name and resolution (right unit prefix) but there was no conversion of the data in the CC stack, so CC frontend developers wanted a different
> resolution for some metrics. The issue was basically the `mem_used` metric showing the currently used memory of the node. Ganglia wants it in `kByte` as provided
> by the Linux operating system but CC wanted it in `GByte`.
With the message processor, the Ganglia sinks can apply the unit prefix changes individually and name the metrics as required by Ganglia.
## For developers
Whenever you receive or are about to send a message out, you should provide some processing.
### Configuration of component
New operations can be added to the message processor at runtime. Of course, they can also be removed again. For the initial setup, having a configuration file
or some fields in a configuration file for the processing.
The message processor uses the following configuration
```json
{
"drop_messages": [
"name_of_message_to_drop"
],
"drop_messages_if": [
"condition_when_to_drop_message",
"name == 'drop_this'",
"tag.hostname == 'this_host'",
"meta.unit != 'MB'"
],
"rename_messages" : {
"old_message_name" : "new_message_name"
},
"rename_messages_if": {
"condition_when_to_rename_message" : "new_name"
},
"add_tags_if": [
{
"if" : "condition_when_to_add_tag",
"key": "name_for_new_tag",
"value": "new_tag_value"
}
],
"delete_tags_if": [
{
"if" : "condition_when_to_delete_tag",
"key": "name_of_tag"
}
],
"add_meta_if": [
{
"if" : "condition_when_to_add_meta_info",
"key": "name_for_new_meta_info",
"value": "new_meta_info_value"
}
],
"delete_meta_if": [
{
"if" : "condition_when_to_delete_meta_info",
"key": "name_of_meta_info"
}
],
"add_field_if": [
{
"if" : "condition_when_to_add_field",
"key": "name_for_new_field",
"value": "new_field_value_but_only_string_at_the_moment"
}
],
"delete_field_if": [
{
"if" : "condition_when_to_delete_field",
"key": "name_of_field"
}
],
"move_tag_to_meta_if": [
{
"if" : "condition_when_to_move_tag_to_meta_info_including_its_value",
"key": "name_of_tag",
"value": "name_of_meta_info"
}
],
"move_tag_to_field_if": [
{
"if" : "condition_when_to_move_tag_to_fields_including_its_value",
"key": "name_of_tag",
"value": "name_of_field"
}
],
"move_meta_to_tag_if": [
{
"if" : "condition_when_to_move_meta_info_to_tags_including_its_value",
"key": "name_of_meta_info",
"value": "name_of_tag"
}
],
"move_meta_to_field_if": [
{
"if" : "condition_when_to_move_meta_info_to_fields_including_its_value",
"key": "name_of_tag",
"value": "name_of_meta_info"
}
],
"move_field_to_tag_if": [
{
"if" : "condition_when_to_move_field_to_tags_including_its_stringified_value",
"key": "name_of_field",
"value": "name_of_tag"
}
],
"move_field_to_meta_if": [
{
"if" : "condition_when_to_move_field_to_meta_info_including_its_stringified_value",
"key": "name_of_field",
"value": "name_of_meta_info"
}
],
"drop_by_message_type": [
"metric",
"event",
"log",
"control"
],
"change_unit_prefix": {
"name == 'metric_with_wrong_unit_prefix'" : "G",
"only_if_messagetype == 'metric'": "T"
},
"normalize_units": true,
"add_base_env": {
"MY_CONSTANT_FOR_CUSTOM_CONDITIONS": 1.0,
"output_value_for_test_metrics": 42.0,
},
"stage_order": [
"rename_messages_if",
"drop_messages"
]
}
```
The options `change_unit_prefix` and `normalize_units` are only applied to CCMetrics. It is not possible to delete the field related to each message type as defined in [cc-specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/interfaces/lineprotocol). In short:
- CCMetrics always have to have a field named `value`
- CCEvents always have to have a field named `event`
- CCLogs always have to have a field named `log`
- CCControl messages always have to have a field named `control`
With `add_base_env`, one can specifiy mykey=myvalue pairs that can be used in conditions like `tag.type == mykey`.
The order in which each message is processed, can be specified with the `stage_order` option. The stage names are the keys in the JSON configuration, thus `change_unit_prefix`, `move_field_to_meta_if`, etc. Stages can be listed multiple times.
### Using the component
In order to load the configuration from a `json.RawMessage`:
```golang
mp, err := NewMessageProcessor()
if err != nil {
log.Error("failed to create new message processor")
}
mp.FromConfigJSON(configJson)
```
After initialization and adding the different operations, the `ProcessMessage()` function applies all operations and returns whether the message should be dropped.
```golang
m := lp.CCMetric{}
x, err := mp.ProcessMessage(m)
if err != nil {
// handle error
}
if x != nil {
// process x further
} else {
// this message got dropped
}
```
Single operations can be added and removed at runtime
```golang
type MessageProcessor interface {
// Functions to set the execution order of the processing stages
SetStages([]string) error
DefaultStages() []string
// Function to add variables to the base evaluation environment
AddBaseEnv(env map[string]interface{}) error
// Functions to add and remove rules
AddDropMessagesByName(name string) error
RemoveDropMessagesByName(name string)
AddDropMessagesByCondition(condition string) error
RemoveDropMessagesByCondition(condition string)
AddRenameMetricByCondition(condition string, name string) error
RemoveRenameMetricByCondition(condition string)
AddRenameMetricByName(from, to string) error
RemoveRenameMetricByName(from string)
SetNormalizeUnits(settings bool)
AddChangeUnitPrefix(condition string, prefix string) error
RemoveChangeUnitPrefix(condition string)
AddAddTagsByCondition(condition, key, value string) error
RemoveAddTagsByCondition(condition string)
AddDeleteTagsByCondition(condition, key, value string) error
RemoveDeleteTagsByCondition(condition string)
AddAddMetaByCondition(condition, key, value string) error
RemoveAddMetaByCondition(condition string)
AddDeleteMetaByCondition(condition, key, value string) error
RemoveDeleteMetaByCondition(condition string)
AddMoveTagToMeta(condition, key, value string) error
RemoveMoveTagToMeta(condition string)
AddMoveTagToFields(condition, key, value string) error
RemoveMoveTagToFields(condition string)
AddMoveMetaToTags(condition, key, value string) error
RemoveMoveMetaToTags(condition string)
AddMoveMetaToFields(condition, key, value string) error
RemoveMoveMetaToFields(condition string)
AddMoveFieldToTags(condition, key, value string) error
RemoveMoveFieldToTags(condition string)
AddMoveFieldToMeta(condition, key, value string) error
RemoveMoveFieldToMeta(condition string)
// Read in a JSON configuration
FromConfigJSON(config json.RawMessage) error
ProcessMessage(m lp2.CCMessage) (lp2.CCMessage, error)
// Processing functions for legacy CCMetric and current CCMessage
ProcessMetric(m lp.CCMetric) (lp2.CCMessage, error)
}
```
### Syntax for evaluatable terms
The message processor uses `gval` for evaluating the terms. It provides a basic set of operators like string comparison and arithmetic operations.
Accessible for operations are
- `name` of the message
- `timestamp` or `time` of the message
- `type`, `type-id` of the message (also `tag_type`, `tag_type-id` and `tag_typeid`)
- `stype`, `stype-id` of the message (if message has theses tags, also `tag_stype`, `tag_stype-id` and `tag_stypeid`)
- `value` for a CCMetric message (also `field_value`)
- `event` for a CCEvent message (also `field_event`)
- `control` for a CCControl message (also `field_control`)
- `log` for a CCLog message (also `field_log`)
- `messagetype` or `msgtype`. Possible values `event`, `metric`, `log` and `control`.
Generally, all tags are accessible with `tag_<tagkey>`, `tags_<tagkey>` or `tags.<tagkey>`. Similarly for all fields with `field[s]?[_.]<fieldkey>`. For meta information `meta[_.]<metakey>` (there is no `metas[_.]<metakey>`).
The [syntax of `expr`](https://expr-lang.org/docs/language-definition) is accepted with some additions:
- Comparing strings: `==`, `!=`, `str matches regex` (use `%` instead of `\`!)
- Combining conditions: `&&`, `||`
- Comparing numbers: `==`, `!=`, `<`, `>`, `<=`, `>=`
- Test lists: `<value> in <list>`
- Topological tests: `tag_type-id in getCpuListOfType("socket", "1")` (test if the metric belongs to socket 1 in local node topology)
Often the operations are written in JSON files for loading them at startup. In JSON, some characters are not allowed. Therefore, the term syntax reflects that:
- use `''` instead of `""` for strings
- for the regexes, use `%` instead of `\`
For operations that should be applied on all messages, use the condition `true`.
### Overhead
The operations taking conditions are pre-processed, which is commonly the time consuming part but, of course, with each added operation, the time to process a message
increases. Moreover, the processing creates a copy of the message.

View File

@@ -1,988 +0,0 @@
package messageprocessor
import (
"encoding/json"
"fmt"
"strings"
"sync"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lplegacy "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/expr-lang/expr"
"github.com/expr-lang/expr/vm"
)
// Message processor add/delete tag/meta configuration
type messageProcessorTagConfig struct {
Key string `json:"key"` // Tag name
Value string `json:"value,omitempty"` // Tag value
Condition string `json:"if"` // Condition for adding or removing corresponding tag
}
type messageProcessorConfig struct {
StageOrder []string `json:"stage_order,omitempty"` // List of stages to execute them in the specified order and to skip unrequired ones
DropMessages []string `json:"drop_messages,omitempty"` // List of metric names to drop. For fine-grained dropping use drop_messages_if
DropMessagesIf []string `json:"drop_messages_if,omitempty"` // List of evaluatable terms to drop messages
RenameMessages map[string]string `json:"rename_messages,omitempty"` // Map of metric names to rename
RenameMessagesIf map[string]string `json:"rename_messages_if,omitempty"` // Map to rename metric name based on a condition
NormalizeUnits bool `json:"normalize_units,omitempty"` // Check unit meta flag and normalize it using cc-units
ChangeUnitPrefix map[string]string `json:"change_unit_prefix,omitempty"` // Add prefix that should be applied to the messages
AddTagsIf []messageProcessorTagConfig `json:"add_tags_if"` // List of tags that are added when the condition is met
DelTagsIf []messageProcessorTagConfig `json:"delete_tags_if"` // List of tags that are removed when the condition is met
AddMetaIf []messageProcessorTagConfig `json:"add_meta_if"` // List of meta infos that are added when the condition is met
DelMetaIf []messageProcessorTagConfig `json:"delete_meta_if"` // List of meta infos that are removed when the condition is met
AddFieldIf []messageProcessorTagConfig `json:"add_field_if"` // List of fields that are added when the condition is met
DelFieldIf []messageProcessorTagConfig `json:"delete_field_if"` // List of fields that are removed when the condition is met
DropByType []string `json:"drop_by_message_type"` // List of message types that should be dropped
MoveTagToMeta []messageProcessorTagConfig `json:"move_tag_to_meta_if"`
MoveTagToField []messageProcessorTagConfig `json:"move_tag_to_field_if"`
MoveMetaToTag []messageProcessorTagConfig `json:"move_meta_to_tag_if"`
MoveMetaToField []messageProcessorTagConfig `json:"move_meta_to_field_if"`
MoveFieldToTag []messageProcessorTagConfig `json:"move_field_to_tag_if"`
MoveFieldToMeta []messageProcessorTagConfig `json:"move_field_to_meta_if"`
AddBaseEnv map[string]interface{} `json:"add_base_env"`
}
type messageProcessor struct {
// For thread-safety
mutex sync.RWMutex
// mapping contains all evalables as strings to gval.Evaluable
// because it is not possible to get the original string out of
// a gval.Evaluable
mapping map[string]*vm.Program
stages []string // order of stage execution
dropMessages map[string]struct{} // internal lookup map
dropTypes map[string]struct{} // internal lookup map
dropMessagesIf map[*vm.Program]struct{} // pre-processed dropMessagesIf
renameMessages map[string]string // internal lookup map
renameMessagesIf map[*vm.Program]string // pre-processed RenameMessagesIf
changeUnitPrefix map[*vm.Program]string // pre-processed ChangeUnitPrefix
normalizeUnits bool
addTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddTagsIf
deleteTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelTagsIf
addMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddMetaIf
deleteMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelMetaIf
addFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddFieldIf
deleteFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelFieldIf
moveTagToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToMeta
moveTagToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToField
moveMetaToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToTag
moveMetaToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToField
moveFieldToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToTag
moveFieldToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToMeta
}
type MessageProcessor interface {
// Functions to set the execution order of the processing stages
SetStages([]string) error
DefaultStages() []string
// Function to add variables to the base evaluation environment
AddBaseEnv(env map[string]interface{}) error
// Functions to add and remove rules
AddDropMessagesByName(name string) error
RemoveDropMessagesByName(name string)
AddDropMessagesByCondition(condition string) error
RemoveDropMessagesByCondition(condition string)
AddRenameMetricByCondition(condition string, name string) error
RemoveRenameMetricByCondition(condition string)
AddRenameMetricByName(from, to string) error
RemoveRenameMetricByName(from string)
SetNormalizeUnits(settings bool)
AddChangeUnitPrefix(condition string, prefix string) error
RemoveChangeUnitPrefix(condition string)
AddAddTagsByCondition(condition, key, value string) error
RemoveAddTagsByCondition(condition string)
AddDeleteTagsByCondition(condition, key, value string) error
RemoveDeleteTagsByCondition(condition string)
AddAddMetaByCondition(condition, key, value string) error
RemoveAddMetaByCondition(condition string)
AddDeleteMetaByCondition(condition, key, value string) error
RemoveDeleteMetaByCondition(condition string)
AddMoveTagToMeta(condition, key, value string) error
RemoveMoveTagToMeta(condition string)
AddMoveTagToFields(condition, key, value string) error
RemoveMoveTagToFields(condition string)
AddMoveMetaToTags(condition, key, value string) error
RemoveMoveMetaToTags(condition string)
AddMoveMetaToFields(condition, key, value string) error
RemoveMoveMetaToFields(condition string)
AddMoveFieldToTags(condition, key, value string) error
RemoveMoveFieldToTags(condition string)
AddMoveFieldToMeta(condition, key, value string) error
RemoveMoveFieldToMeta(condition string)
// Read in a JSON configuration
FromConfigJSON(config json.RawMessage) error
// Processing functions for legacy CCMetric and current CCMessage
ProcessMetric(m lplegacy.CCMetric) (lp.CCMessage, error)
ProcessMessage(m lp.CCMessage) (lp.CCMessage, error)
//EvalToBool(condition string, parameters map[string]interface{}) (bool, error)
//EvalToFloat64(condition string, parameters map[string]interface{}) (float64, error)
//EvalToString(condition string, parameters map[string]interface{}) (string, error)
}
const (
STAGENAME_DROP_BY_NAME string = "drop_by_name"
STAGENAME_DROP_BY_TYPE string = "drop_by_type"
STAGENAME_DROP_IF string = "drop_if"
STAGENAME_ADD_TAG string = "add_tag"
STAGENAME_DELETE_TAG string = "delete_tag"
STAGENAME_MOVE_TAG_META string = "move_tag_to_meta"
STAGENAME_MOVE_TAG_FIELD string = "move_tag_to_fields"
STAGENAME_ADD_META string = "add_meta"
STAGENAME_DELETE_META string = "delete_meta"
STAGENAME_MOVE_META_TAG string = "move_meta_to_tags"
STAGENAME_MOVE_META_FIELD string = "move_meta_to_fields"
STAGENAME_ADD_FIELD string = "add_field"
STAGENAME_DELETE_FIELD string = "delete_field"
STAGENAME_MOVE_FIELD_TAG string = "move_field_to_tags"
STAGENAME_MOVE_FIELD_META string = "move_field_to_meta"
STAGENAME_RENAME_BY_NAME string = "rename"
STAGENAME_RENAME_IF string = "rename_if"
STAGENAME_CHANGE_UNIT_PREFIX string = "change_unit_prefix"
STAGENAME_NORMALIZE_UNIT string = "normalize_unit"
)
var StageNames = []string{
STAGENAME_DROP_BY_NAME,
STAGENAME_DROP_BY_TYPE,
STAGENAME_DROP_IF,
STAGENAME_ADD_TAG,
STAGENAME_DELETE_TAG,
STAGENAME_MOVE_TAG_META,
STAGENAME_MOVE_TAG_FIELD,
STAGENAME_ADD_META,
STAGENAME_DELETE_META,
STAGENAME_MOVE_META_TAG,
STAGENAME_MOVE_META_FIELD,
STAGENAME_ADD_FIELD,
STAGENAME_DELETE_FIELD,
STAGENAME_MOVE_FIELD_TAG,
STAGENAME_MOVE_FIELD_META,
STAGENAME_RENAME_BY_NAME,
STAGENAME_RENAME_IF,
STAGENAME_CHANGE_UNIT_PREFIX,
STAGENAME_NORMALIZE_UNIT,
}
var paramMapPool = sync.Pool{
New: func() any {
return make(map[string]interface{})
},
}
func sanitizeExprString(key string) string {
return strings.ReplaceAll(key, "type-id", "typeid")
}
func getParamMap(point lp.CCMetric) map[string]interface{} {
params := paramMapPool.Get().(map[string]interface{})
params["message"] = point
params["msg"] = point
params["name"] = point.Name()
params["timestamp"] = point.Time().Unix()
params["time"] = params["timestamp"]
fields := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Fields() {
fields[key] = value
switch key {
case "value":
params["messagetype"] = "metric"
params["value"] = value
params["metric"] = value
case "event":
params["messagetype"] = "event"
params["event"] = value
case "control":
params["messagetype"] = "control"
params["control"] = value
case "log":
params["messagetype"] = "log"
params["log"] = value
default:
params["messagetype"] = "unknown"
}
}
params["msgtype"] = params["messagetype"]
params["fields"] = fields
params["field"] = fields
tags := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Tags() {
tags[sanitizeExprString(key)] = value
}
params["tags"] = tags
params["tag"] = tags
meta := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Meta() {
meta[sanitizeExprString(key)] = value
}
params["meta"] = meta
return params
}
var baseenv = map[string]interface{}{
"name": "",
"messagetype": "unknown",
"msgtype": "unknown",
"tag": map[string]interface{}{
"type": "unknown",
"typeid": "0",
"stype": "unknown",
"stypeid": "0",
"hostname": "localhost",
"cluster": "nocluster",
},
"tags": map[string]interface{}{
"type": "unknown",
"typeid": "0",
"stype": "unknown",
"stypeid": "0",
"hostname": "localhost",
"cluster": "nocluster",
},
"meta": map[string]interface{}{
"unit": "invalid",
"source": "unknown",
},
"fields": map[string]interface{}{
"value": 0,
"event": "",
"control": "",
"log": "",
},
"field": map[string]interface{}{
"value": 0,
"event": "",
"control": "",
"log": "",
},
"timestamp": 1234567890,
"msg": lp.EmptyMessage(),
"message": lp.EmptyMessage(),
}
func addBaseEnvWalker(values map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{})
for k, v := range values {
switch value := v.(type) {
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
out[k] = value
case map[string]interface{}:
if _, ok := baseenv[k]; !ok {
out[k] = addBaseEnvWalker(value)
}
}
}
return out
}
func (mp *messageProcessor) AddBaseEnv(env map[string]interface{}) error {
for k, v := range env {
switch value := v.(type) {
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
baseenv[k] = value
case map[string]interface{}:
if _, ok := baseenv[k]; !ok {
baseenv[k] = addBaseEnvWalker(value)
}
}
}
return nil
}
func (mp *messageProcessor) init() error {
mp.stages = make([]string, 0)
mp.mapping = make(map[string]*vm.Program)
mp.dropMessages = make(map[string]struct{})
mp.dropTypes = make(map[string]struct{})
mp.dropMessagesIf = make(map[*vm.Program]struct{})
mp.renameMessages = make(map[string]string)
mp.renameMessagesIf = make(map[*vm.Program]string)
mp.changeUnitPrefix = make(map[*vm.Program]string)
mp.addTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.addMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.addFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveFieldToMeta = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveFieldToTag = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveMetaToField = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveMetaToTag = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveTagToField = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveTagToMeta = make(map[*vm.Program]messageProcessorTagConfig)
mp.normalizeUnits = false
return nil
}
func (mp *messageProcessor) AddDropMessagesByName(name string) error {
mp.mutex.Lock()
if _, ok := mp.dropMessages[name]; !ok {
mp.dropMessages[name] = struct{}{}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByName(name string) {
mp.mutex.Lock()
delete(mp.dropMessages, name)
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddDropMessagesByType(typestring string) error {
valid := []string{"metric", "event", "control", "log"}
isValid := false
for _, t := range valid {
if t == typestring {
isValid = true
break
}
}
if isValid {
mp.mutex.Lock()
if _, ok := mp.dropTypes[typestring]; !ok {
cclog.ComponentDebug("MessageProcessor", "Adding type", typestring, "for dropping")
mp.dropTypes[typestring] = struct{}{}
}
mp.mutex.Unlock()
} else {
return fmt.Errorf("invalid message type %s", typestring)
}
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByType(typestring string) {
mp.mutex.Lock()
delete(mp.dropTypes, typestring)
mp.mutex.Unlock()
}
func (mp *messageProcessor) addTagConfig(condition, key, value string, config *map[*vm.Program]messageProcessorTagConfig) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := (*config)[evaluable]; !ok {
mp.mapping[condition] = evaluable
(*config)[evaluable] = messageProcessorTagConfig{
Condition: condition,
Key: key,
Value: value,
}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) removeTagConfig(condition string, config *map[*vm.Program]messageProcessorTagConfig) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(*config, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddAddTagsByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addTagsIf)
}
func (mp *messageProcessor) RemoveAddTagsByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addTagsIf)
}
func (mp *messageProcessor) AddDeleteTagsByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteTagsIf)
}
func (mp *messageProcessor) RemoveDeleteTagsByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteTagsIf)
}
func (mp *messageProcessor) AddAddMetaByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addMetaIf)
}
func (mp *messageProcessor) RemoveAddMetaByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addMetaIf)
}
func (mp *messageProcessor) AddDeleteMetaByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteMetaIf)
}
func (mp *messageProcessor) RemoveDeleteMetaByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteMetaIf)
}
func (mp *messageProcessor) AddAddFieldByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addFieldIf)
}
func (mp *messageProcessor) RemoveAddFieldByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addFieldIf)
}
func (mp *messageProcessor) AddDeleteFieldByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteFieldIf)
}
func (mp *messageProcessor) RemoveDeleteFieldByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteFieldIf)
}
func (mp *messageProcessor) AddDropMessagesByCondition(condition string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.dropMessagesIf[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.dropMessagesIf[evaluable] = struct{}{}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByCondition(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.dropMessagesIf, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddRenameMetricByCondition(condition string, name string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.renameMessagesIf[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.renameMessagesIf[evaluable] = name
} else {
mp.renameMessagesIf[evaluable] = name
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveRenameMetricByCondition(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.renameMessagesIf, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) SetNormalizeUnits(setting bool) {
mp.normalizeUnits = setting
}
func (mp *messageProcessor) AddChangeUnitPrefix(condition string, prefix string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.changeUnitPrefix[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.changeUnitPrefix[evaluable] = prefix
} else {
mp.changeUnitPrefix[evaluable] = prefix
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveChangeUnitPrefix(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.changeUnitPrefix, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddRenameMetricByName(from, to string) error {
mp.mutex.Lock()
if _, ok := mp.renameMessages[from]; !ok {
mp.renameMessages[from] = to
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveRenameMetricByName(from string) {
mp.mutex.Lock()
delete(mp.renameMessages, from)
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddMoveTagToMeta(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveTagToMeta)
}
func (mp *messageProcessor) RemoveMoveTagToMeta(condition string) {
mp.removeTagConfig(condition, &mp.moveTagToMeta)
}
func (mp *messageProcessor) AddMoveTagToFields(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveTagToField)
}
func (mp *messageProcessor) RemoveMoveTagToFields(condition string) {
mp.removeTagConfig(condition, &mp.moveTagToField)
}
func (mp *messageProcessor) AddMoveMetaToTags(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveMetaToTag)
}
func (mp *messageProcessor) RemoveMoveMetaToTags(condition string) {
mp.removeTagConfig(condition, &mp.moveMetaToTag)
}
func (mp *messageProcessor) AddMoveMetaToFields(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveMetaToField)
}
func (mp *messageProcessor) RemoveMoveMetaToFields(condition string) {
mp.removeTagConfig(condition, &mp.moveMetaToField)
}
func (mp *messageProcessor) AddMoveFieldToTags(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveFieldToTag)
}
func (mp *messageProcessor) RemoveMoveFieldToTags(condition string) {
mp.removeTagConfig(condition, &mp.moveFieldToTag)
}
func (mp *messageProcessor) AddMoveFieldToMeta(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveFieldToMeta)
}
func (mp *messageProcessor) RemoveMoveFieldToMeta(condition string) {
mp.removeTagConfig(condition, &mp.moveFieldToMeta)
}
func (mp *messageProcessor) SetStages(stages []string) error {
newstages := make([]string, 0)
if len(stages) == 0 {
mp.mutex.Lock()
mp.stages = newstages
mp.mutex.Unlock()
return nil
}
for i, s := range stages {
valid := false
for _, v := range StageNames {
if s == v {
valid = true
}
}
if valid {
newstages = append(newstages, s)
} else {
return fmt.Errorf("invalid stage %s at index %d", s, i)
}
}
mp.mutex.Lock()
mp.stages = newstages
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) DefaultStages() []string {
return StageNames
}
func (mp *messageProcessor) FromConfigJSON(config json.RawMessage) error {
var c messageProcessorConfig
err := json.Unmarshal(config, &c)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
if len(c.StageOrder) > 0 {
err = mp.SetStages(c.StageOrder)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
} else {
err = mp.SetStages(mp.DefaultStages())
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropMessages {
err = mp.AddDropMessagesByName(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropByType {
err = mp.AddDropMessagesByType(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropMessagesIf {
err = mp.AddDropMessagesByCondition(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.RenameMessagesIf {
err = mp.AddRenameMetricByCondition(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.RenameMessages {
err = mp.AddRenameMetricByName(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.ChangeUnitPrefix {
err = mp.AddChangeUnitPrefix(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddTagsIf {
err = mp.AddAddTagsByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddMetaIf {
err = mp.AddAddMetaByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddFieldIf {
err = mp.AddAddFieldByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelTagsIf {
err = mp.AddDeleteTagsByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelMetaIf {
err = mp.AddDeleteMetaByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelFieldIf {
err = mp.AddDeleteFieldByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveTagToMeta {
err = mp.AddMoveTagToMeta(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveTagToField {
err = mp.AddMoveTagToFields(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveMetaToTag {
err = mp.AddMoveMetaToTags(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveMetaToField {
err = mp.AddMoveMetaToFields(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveFieldToTag {
err = mp.AddMoveFieldToTags(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveFieldToMeta {
err = mp.AddMoveFieldToMeta(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropByType {
err = mp.AddDropMessagesByType(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
if len(c.AddBaseEnv) > 0 {
err = mp.AddBaseEnv(c.AddBaseEnv)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
mp.SetNormalizeUnits(c.NormalizeUnits)
return nil
}
func (mp *messageProcessor) ProcessMetric(metric lplegacy.CCMetric) (lp.CCMessage, error) {
m, err := lp.NewMessage(
metric.Name(),
metric.Tags(),
metric.Meta(),
metric.Fields(),
metric.Time(),
)
if err != nil {
return m, fmt.Errorf("failed to parse metric to message: %v", err.Error())
}
return mp.ProcessMessage(m)
}
func (mp *messageProcessor) ProcessMessage(m lp.CCMessage) (lp.CCMessage, error) {
var err error = nil
var out lp.CCMessage = lp.FromMessage(m)
name := out.Name()
if len(mp.stages) == 0 {
mp.SetStages(mp.DefaultStages())
}
mp.mutex.RLock()
defer mp.mutex.RUnlock()
params := getParamMap(out)
defer func() {
params["field"] = nil
params["tag"] = nil
paramMapPool.Put(params["fields"])
paramMapPool.Put(params["tags"])
paramMapPool.Put(params["meta"])
paramMapPool.Put(params)
}()
for _, s := range mp.stages {
switch s {
case STAGENAME_DROP_BY_NAME:
if len(mp.dropMessages) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by message name ", name)
if _, ok := mp.dropMessages[name]; ok {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_DROP_BY_TYPE:
if len(mp.dropTypes) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by message type")
if _, ok := mp.dropTypes[params["messagetype"].(string)]; ok {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_DROP_IF:
if len(mp.dropMessagesIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by condition")
drop, err := dropMessagesIf(&params, &mp.dropMessagesIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if drop {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_RENAME_BY_NAME:
if len(mp.renameMessages) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Renaming by name match")
if newname, ok := mp.renameMessages[name]; ok {
//cclog.ComponentDebug("MessageProcessor", "Rename to", newname)
out.SetName(newname)
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", name)
out.AddMeta("oldname", name)
}
}
case STAGENAME_RENAME_IF:
if len(mp.renameMessagesIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Renaming by condition")
_, err := renameMessagesIf(out, &params, &mp.renameMessagesIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_TAG:
if len(mp.addTagsIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding tags")
_, err = addTagIf(out, &params, &mp.addTagsIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_TAG:
if len(mp.deleteTagsIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete tags")
_, err = deleteTagIf(out, &params, &mp.deleteTagsIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_META:
if len(mp.addMetaIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding meta information")
_, err = addMetaIf(out, &params, &mp.addMetaIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_META:
if len(mp.deleteMetaIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete meta information")
_, err = deleteMetaIf(out, &params, &mp.deleteMetaIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_FIELD:
if len(mp.addFieldIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding fields")
_, err = addFieldIf(out, &params, &mp.addFieldIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_FIELD:
if len(mp.deleteFieldIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete fields")
_, err = deleteFieldIf(out, &params, &mp.deleteFieldIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_TAG_META:
if len(mp.moveTagToMeta) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move tag to meta")
_, err := moveTagToMeta(out, &params, &mp.moveTagToMeta)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_TAG_FIELD:
if len(mp.moveTagToField) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move tag to fields")
_, err := moveTagToField(out, &params, &mp.moveTagToField)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_META_TAG:
if len(mp.moveMetaToTag) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move meta to tags")
_, err := moveMetaToTag(out, &params, &mp.moveMetaToTag)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_META_FIELD:
if len(mp.moveMetaToField) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move meta to fields")
_, err := moveMetaToField(out, &params, &mp.moveMetaToField)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_FIELD_META:
if len(mp.moveFieldToMeta) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move field to meta")
_, err := moveFieldToMeta(out, &params, &mp.moveFieldToMeta)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_FIELD_TAG:
if len(mp.moveFieldToTag) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move field to tags")
_, err := moveFieldToTag(out, &params, &mp.moveFieldToTag)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_NORMALIZE_UNIT:
if mp.normalizeUnits {
//cclog.ComponentDebug("MessageProcessor", "Normalize units")
if lp.IsMetric(out) {
_, err := normalizeUnits(out)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
} else {
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
}
}
case STAGENAME_CHANGE_UNIT_PREFIX:
if len(mp.changeUnitPrefix) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Change unit prefix")
if lp.IsMetric(out) {
_, err := changeUnitPrefix(out, &params, &mp.changeUnitPrefix)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
} else {
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
}
}
}
}
return out, nil
}
// Get a new instace of a message processor.
func NewMessageProcessor() (MessageProcessor, error) {
mp := new(messageProcessor)
err := mp.init()
if err != nil {
err := fmt.Errorf("failed to create MessageProcessor: %v", err.Error())
cclog.ComponentError("MessageProcessor", err.Error())
return nil, err
}
return mp, nil
}

View File

@@ -1,262 +0,0 @@
package messageprocessor
import (
"errors"
"fmt"
lp2 "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
units "github.com/ClusterCockpit/cc-units"
"github.com/expr-lang/expr"
"github.com/expr-lang/expr/vm"
)
type MessageLocation int
const (
MESSAGE_LOCATION_TAGS MessageLocation = iota
MESSAGE_LOCATION_META
MESSAGE_LOCATION_FIELDS
)
// Abstract function to move entries from one location to another
func moveInMessage(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, from, to MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
//cclog.ComponentDebug("MessageProcessor", "Move from", from, "to", to)
if value.(bool) {
var v string
var ok bool = false
switch from {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Getting tag key", data.Key)
v, ok = message.GetTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Getting meta key", data.Key)
//cclog.ComponentDebug("MessageProcessor", message.Meta())
v, ok = message.GetMeta(data.Key)
case MESSAGE_LOCATION_FIELDS:
var x interface{}
//cclog.ComponentDebug("MessageProcessor", "Getting field key", data.Key)
x, ok = message.GetField(data.Key)
v = fmt.Sprintf("%v", x)
}
if ok {
switch from {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Removing tag key", data.Key)
message.RemoveTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Removing meta key", data.Key)
message.RemoveMeta(data.Key)
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Removing field key", data.Key)
message.RemoveField(data.Key)
}
switch to {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", v)
message.AddTag(data.Value, v)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", v)
message.AddMeta(data.Value, v)
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", v)
message.AddField(data.Value, v)
}
}
}
}
return false, nil
}
func deleteIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
switch location {
case MESSAGE_LOCATION_FIELDS:
switch data.Key {
case "value", "event", "log", "control":
return false, errors.New("cannot delete protected fields")
default:
//cclog.ComponentDebug("MessageProcessor", "Removing field for", data.Key)
message.RemoveField(data.Key)
}
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Removing tag for", data.Key)
message.RemoveTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Removing meta for", data.Key)
message.RemoveMeta(data.Key)
}
}
}
return false, nil
}
func addIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
switch location {
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", data.Value)
message.AddField(data.Key, data.Value)
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", data.Value)
message.AddTag(data.Key, data.Value)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", data.Value)
message.AddMeta(data.Key, data.Value)
}
}
}
return false, nil
}
func deleteTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_TAGS)
}
func addTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_TAGS)
}
func moveTagToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_META)
}
func moveTagToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_FIELDS)
}
func deleteMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_META)
}
func addMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_META)
}
func moveMetaToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_TAGS)
}
func moveMetaToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_FIELDS)
}
func deleteFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
}
func addFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
}
func moveFieldToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_TAGS)
}
func moveFieldToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_META)
}
func dropMessagesIf(params *map[string]interface{}, checks *map[*vm.Program]struct{}) (bool, error) {
for d := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
return true, nil
}
}
return false, nil
}
func normalizeUnits(message lp2.CCMessage) (bool, error) {
if in_unit, ok := message.GetMeta("unit"); ok {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
message.AddMeta("unit", u.Short())
}
} else if in_unit, ok := message.GetTag("unit"); ok {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
message.AddTag("unit", u.Short())
}
}
return false, nil
}
func changeUnitPrefix(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
for r, n := range *checks {
value, err := expr.Run(r, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
newPrefix := units.NewPrefix(n)
//cclog.ComponentDebug("MessageProcessor", "Condition matches, change to prefix", newPrefix.String())
if in_unit, ok := message.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
if conv != nil && out_unit.Valid() {
if val, ok := message.GetField("value"); ok {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
message.AddField("value", conv(val))
message.AddMeta("unit", out_unit.Short())
}
}
}
} else if in_unit, ok := message.GetTag("unit"); ok && newPrefix != units.InvalidPrefix {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
if conv != nil && out_unit.Valid() {
if val, ok := message.GetField("value"); ok {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
message.AddField("value", conv(val))
message.AddTag("unit", out_unit.Short())
}
}
}
}
}
}
return false, nil
}
func renameMessagesIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
for d, n := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
old := message.Name()
//cclog.ComponentDebug("MessageProcessor", "Rename to", n)
message.SetName(n)
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", old)
message.AddMeta("oldname", old)
}
}
return false, nil
}

View File

@@ -1,396 +0,0 @@
package messageprocessor
import (
"encoding/json"
"errors"
"fmt"
"testing"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
)
func generate_message_lists(num_lists, num_entries int) ([][]lp.CCMessage, error) {
mlist := make([][]lp.CCMessage, 0)
for j := 0; j < num_lists; j++ {
out := make([]lp.CCMessage, 0)
for i := 0; i < num_entries; i++ {
var x lp.CCMessage
var err error = nil
switch {
case i%4 == 0:
x, err = lp.NewEvent("myevent", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "nothing happend", time.Now())
case i%4 == 1:
x, err = lp.NewMetric("mymetric", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{"unit": "kByte"}, 12.145, time.Now())
case i%4 == 2:
x, err = lp.NewLog("mylog", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "disk status: OK", time.Now())
case i%4 == 3:
x, err = lp.NewGetControl("mycontrol", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, time.Now())
}
if err == nil {
x.AddTag("hostname", "myhost")
out = append(out, x)
} else {
return nil, errors.New("failed to create message")
}
}
mlist = append(mlist, out)
}
return mlist, nil
}
func TestNewMessageProcessor(t *testing.T) {
_, err := NewMessageProcessor()
if err != nil {
t.Error(err.Error())
}
}
type Configs struct {
name string
config json.RawMessage
drop bool
errors bool
pre func(msg lp.CCMessage) error
check func(msg lp.CCMessage) error
}
var test_configs = []Configs{
{
name: "single_dropif_nomatch",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
},
{
name: "drop_by_name",
config: json.RawMessage(`{"drop_messages": [ "net_bytes_in"]}`),
drop: true,
},
{
name: "drop_by_type_match",
config: json.RawMessage(`{"drop_by_message_type": [ "metric"]}`),
drop: true,
},
{
name: "drop_by_type_nomatch",
config: json.RawMessage(`{"drop_by_message_type": [ "event"]}`),
},
{
name: "single_dropif_match",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'"]}`),
drop: true,
},
{
name: "double_dropif_match_nomatch",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'", "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
drop: true,
},
{
name: "rename_simple",
config: json.RawMessage(`{"rename_messages": { "net_bytes_in" : "net_bytes_out", "rapl_power": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_out" {
return errors.New("expected name net_bytes_out but still have net_bytes_in")
}
return nil
},
},
{
name: "rename_match",
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_in'" : "net_bytes_out", "name == 'rapl_power'": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_out" {
return errors.New("expected name net_bytes_out but still have net_bytes_in")
}
return nil
},
},
{
name: "rename_nomatch",
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_out'" : "net_bytes_in", "name == 'rapl_power'": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_in" {
return errors.New("expected name net_bytes_in but still have net_bytes_out")
}
return nil
},
},
{
name: "add_tag",
config: json.RawMessage(`{"add_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "cluster", "value" : "mycluster"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasTag("cluster") {
return errors.New("expected new tag 'cluster' but not present")
}
return nil
},
},
{
name: "del_tag",
config: json.RawMessage(`{"delete_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "type"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type") {
return errors.New("expected to have no 'type' but still present")
}
return nil
},
},
{
name: "add_meta",
config: json.RawMessage(`{"add_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "source", "value" : "example"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasMeta("source") {
return errors.New("expected new tag 'source' but not present")
}
return nil
},
},
{
name: "del_meta",
config: json.RawMessage(`{"delete_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") {
return errors.New("expected to have no 'unit' but still present")
}
return nil
},
},
{
name: "add_field",
config: json.RawMessage(`{"add_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value" : "example"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasField("myfield") {
return errors.New("expected new tag 'source' but not present")
}
return nil
},
},
{
name: "delete_fields_if_protected",
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "value"}]}`),
errors: true,
check: func(msg lp.CCMessage) error {
if !msg.HasField("value") {
return errors.New("expected to still have 'value' field because it is a protected field key")
}
return nil
},
},
{
name: "delete_fields_if_unprotected",
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "testfield"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("testfield") {
return errors.New("expected to still have 'testfield' field but should be deleted")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("testfield", 4.123)
return nil
},
},
{
name: "single_change_prefix_match",
config: json.RawMessage(`{"change_unit_prefix": {"name == 'net_bytes_in' && tags.type == 'node'": "M"}}`),
check: func(msg lp.CCMessage) error {
if u, ok := msg.GetMeta("unit"); ok {
if u != "MB" {
return fmt.Errorf("expected unit MB but have %s", u)
}
} else if u, ok := msg.GetTag("unit"); ok {
if u != "MB" {
return fmt.Errorf("expected unit MB but have %s", u)
}
}
return nil
},
},
{
name: "normalize_units",
config: json.RawMessage(`{"normalize_units": true}`),
check: func(msg lp.CCMessage) error {
if u, ok := msg.GetMeta("unit"); ok {
if u != "B" {
return fmt.Errorf("expected unit B but have %s", u)
}
} else if u, ok := msg.GetTag("unit"); ok {
if u != "B" {
return fmt.Errorf("expected unit B but have %s", u)
}
}
return nil
},
},
{
name: "move_tag_to_meta",
config: json.RawMessage(`{"move_tag_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type-id") || !msg.HasMeta("typeid") {
return errors.New("moving tag 'type-id' to meta 'typeid' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddTag("type-id", "0")
return nil
},
},
{
name: "move_tag_to_field",
config: json.RawMessage(`{"move_tag_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type-id") || !msg.HasField("typeid") {
return errors.New("moving tag 'type-id' to field 'typeid' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddTag("type-id", "0")
return nil
},
},
{
name: "move_meta_to_tag",
config: json.RawMessage(`{"move_meta_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") || !msg.HasTag("unit") {
return errors.New("moving meta 'unit' to tag 'unit' failed")
}
return nil
},
},
{
name: "move_meta_to_field",
config: json.RawMessage(`{"move_meta_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") || !msg.HasField("unit") {
return errors.New("moving meta 'unit' to field 'unit' failed")
}
return nil
},
},
{
name: "move_field_to_tag",
config: json.RawMessage(`{"move_field_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("myfield") || !msg.HasTag("field") {
return errors.New("moving meta 'myfield' to tag 'field' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("myfield", 12)
return nil
},
},
{
name: "move_field_to_meta",
config: json.RawMessage(`{"move_field_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("myfield") || !msg.HasMeta("field") {
return errors.New("moving meta 'myfield' to meta 'field' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("myfield", 12)
return nil
},
},
}
func TestConfigList(t *testing.T) {
for _, c := range test_configs {
t.Run(c.name, func(t *testing.T) {
m, err := lp.NewMetric("net_bytes_in", map[string]string{"type": "node", "type-id": "0"}, map[string]string{"unit": "Byte"}, float64(1024.0), time.Now())
if err != nil {
t.Error(err.Error())
return
}
if c.pre != nil {
if err = c.pre(m); err != nil {
t.Errorf("error running pre-test function: %v", err.Error())
return
}
}
mp, err := NewMessageProcessor()
if err != nil {
t.Error(err.Error())
return
}
err = mp.FromConfigJSON(c.config)
if err != nil {
t.Error(err.Error())
return
}
//t.Log(m.ToLineProtocol(nil))
out, err := mp.ProcessMessage(m)
if err != nil && !c.errors {
cclog.SetDebug()
mp.ProcessMessage(m)
t.Error(err.Error())
return
}
if out == nil && !c.drop {
t.Error("fail, message should NOT be dropped but processor signalled dropping")
return
} else if out != nil && c.drop {
t.Error("fail, message should be dropped but processor signalled NO dropping")
return
}
// {
// if c.drop {
// t.Error("fail, message should be dropped but processor signalled NO dropping")
// } else {
// t.Error("fail, message should NOT be dropped but processor signalled dropping")
// }
// cclog.SetDebug()
// mp.ProcessMessage(m)
// return
// }
if c.check != nil {
if err := c.check(out); err != nil {
t.Errorf("check failed with %v", err.Error())
t.Log("Rerun with debugging")
cclog.SetDebug()
mp.ProcessMessage(m)
return
}
}
})
}
}
func BenchmarkProcessing(b *testing.B) {
mlist, err := generate_message_lists(b.N, 1000)
if err != nil {
b.Error(err.Error())
return
}
mp, err := NewMessageProcessor()
if err != nil {
b.Error(err.Error())
return
}
err = mp.FromConfigJSON(json.RawMessage(`{"move_meta_to_tag_if": [{"if" : "name == 'mymetric'", "key":"unit", "value":"unit"}]}`))
if err != nil {
b.Error(err.Error())
return
}
b.StartTimer()
for i := 0; i < b.N; i++ {
for _, m := range mlist[i] {
if _, err := mp.ProcessMessage(m); err != nil {
b.Errorf("failed processing message '%s': %v", m.ToLineProtocol(nil), err.Error())
return
}
}
}
b.StopTimer()
b.ReportMetric(float64(b.Elapsed())/float64(len(mlist)*b.N), "ns/message")
}

View File

@@ -1,44 +1,8 @@
{ {
"natsrecv": { "natsrecv" : {
"type": "nats", "type": "nats",
"address": "nats://my-url", "address": "nats://my-url",
"port": "4222", "port" : "4222",
"database": "testcluster" "database": "testcluster"
},
"redfish_recv": {
"type": "redfish",
"endpoint": "https://%h-bmc",
"client_config": [
{
"host_list": "my-host-1-[1-2]",
"username": "username-1",
"password": "password-1"
},
{
"host_list": "my-host-2-[1,2]",
"username": "username-2",
"password": "password-2"
}
]
},
"ipmi_recv": {
"type": "ipmi",
"endpoint": "ipmi-sensors://%h-ipmi",
"exclude_metrics": [
"fan_speed",
"voltage"
],
"client_config": [
{
"username": "username-1",
"password": "password-1",
"host_list": "my-host-1-[1-2]"
},
{
"username": "username-2",
"password": "password-2",
"host_list": "my-host-2-[1,2]"
}
]
} }
} }

View File

@@ -2,7 +2,7 @@
This folder contains the ReceiveManager and receiver implementations for the cc-metric-collector. This folder contains the ReceiveManager and receiver implementations for the cc-metric-collector.
## Configuration # Configuration
The configuration file for the receivers is a list of configurations. The `type` field in each specifies which receiver to initialize. The configuration file for the receivers is a list of configurations. The `type` field in each specifies which receiver to initialize.
@@ -22,11 +22,8 @@ This allows to specify
- [`nats`](./natsReceiver.md): Receive metrics from the NATS network - [`nats`](./natsReceiver.md): Receive metrics from the NATS network
- [`prometheus`](./prometheusReceiver.md): Scrape data from a Prometheus client - [`prometheus`](./prometheusReceiver.md): Scrape data from a Prometheus client
- [`http`](./httpReceiver.md): Listen for HTTP Post requests transporting metrics in InfluxDB line protocol - [`http`](./httpReceiver.md): Listen for HTTP Post requests transporting metrics in InfluxDB line protocol
- [`ipmi`](./ipmiReceiver.md): Read IPMI sensor readings
- [`redfish`](redfishReceiver.md) Use the Redfish (specification) to query thermal and power metrics
## Contributing own receivers
# Contributing own receivers
A receiver contains a few functions and is derived from the type `Receiver` (in `metricReceiver.go`): A receiver contains a few functions and is derived from the type `Receiver` (in `metricReceiver.go`):
For an example, check the [sample receiver](./sampleReceiver.go) For an example, check the [sample receiver](./sampleReceiver.go)

View File

@@ -5,57 +5,40 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor" "github.com/gorilla/mux"
influx "github.com/influxdata/line-protocol/v2/lineprotocol" influx "github.com/influxdata/line-protocol"
) )
const HTTP_RECEIVER_PORT = "8080" const HTTP_RECEIVER_PORT = "8080"
type HttpReceiverConfig struct { type HttpReceiverConfig struct {
defaultReceiverConfig Type string `json:"type"`
Addr string `json:"address"` Addr string `json:"address"`
Port string `json:"port"` Port string `json:"port"`
Path string `json:"path"` Path string `json:"path"`
// Maximum amount of time to wait for the next request when keep-alives are enabled
// should be larger than the measurement interval to keep the connection open
IdleTimeout string `json:"idle_timeout"`
idleTimeout time.Duration
// Controls whether HTTP keep-alives are enabled. By default, keep-alives are enabled
KeepAlivesEnabled bool `json:"keep_alives_enabled"`
// Basic authentication
Username string `json:"username"`
Password string `json:"password"`
useBasicAuth bool
} }
type HttpReceiver struct { type HttpReceiver struct {
receiver receiver
//meta map[string]string handler *influx.MetricHandler
config HttpReceiverConfig parser *influx.Parser
server *http.Server meta map[string]string
wg sync.WaitGroup config HttpReceiverConfig
router *mux.Router
server *http.Server
wg sync.WaitGroup
} }
func (r *HttpReceiver) Init(name string, config json.RawMessage) error { func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
r.name = fmt.Sprintf("HttpReceiver(%s)", name) r.name = fmt.Sprintf("HttpReceiver(%s)", name)
// Set default values
r.config.Port = HTTP_RECEIVER_PORT r.config.Port = HTTP_RECEIVER_PORT
r.config.KeepAlivesEnabled = true
// should be larger than the measurement interval to keep the connection open
r.config.IdleTimeout = "120s"
// Read config
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &r.config) err := json.Unmarshal(config, &r.config)
if err != nil { if err != nil {
@@ -66,59 +49,20 @@ func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
if len(r.config.Port) == 0 { if len(r.config.Port) == 0 {
return errors.New("not all configuration variables set required by HttpReceiver") return errors.New("not all configuration variables set required by HttpReceiver")
} }
r.meta = map[string]string{"source": r.name}
// Check idle timeout config
if len(r.config.IdleTimeout) > 0 {
t, err := time.ParseDuration(r.config.IdleTimeout)
if err == nil {
cclog.ComponentDebug(r.name, "idleTimeout", t)
r.config.idleTimeout = t
}
}
// Check basic authentication config
if len(r.config.Username) > 0 || len(r.config.Password) > 0 {
r.config.useBasicAuth = true
}
if r.config.useBasicAuth && len(r.config.Username) == 0 {
return errors.New("basic authentication requires username")
}
if r.config.useBasicAuth && len(r.config.Password) == 0 {
return errors.New("basic authentication requires password")
}
msgp, err := mp.NewMessageProcessor()
if err != nil {
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = msgp
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
r.mp.AddAddMetaByCondition("true", "source", r.name)
//r.meta = map[string]string{"source": r.name}
p := r.config.Path p := r.config.Path
if !strings.HasPrefix(p, "/") { if !strings.HasPrefix(p, "/") {
p = "/" + p p = "/" + p
} }
addr := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port) uri := fmt.Sprintf("%s:%s%s", r.config.Addr, r.config.Port, p)
uri := addr + p cclog.ComponentDebug(r.name, "INIT", uri)
cclog.ComponentDebug(r.name, "INIT", "listen on:", uri) r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
// Register handler function r.ServerHttp for path p in the DefaultServeMux r.parser.SetTimeFunc(DefaultTime)
http.HandleFunc(p, r.ServerHttp)
// Create http server
r.server = &http.Server{
Addr: addr,
Handler: nil, // handler to invoke, http.DefaultServeMux if nil
IdleTimeout: r.config.idleTimeout,
}
r.server.SetKeepAlivesEnabled(r.config.KeepAlivesEnabled)
r.router = mux.NewRouter()
r.router.Path(p).HandlerFunc(r.ServerHttp)
r.server = &http.Server{Addr: uri, Handler: r.router}
return nil return nil
} }
@@ -135,96 +79,28 @@ func (r *HttpReceiver) Start() {
} }
func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) { func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) {
// Check request method, only post method is handled
if req.Method != http.MethodPost { if req.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
// Check basic authentication body, err := ioutil.ReadAll(req.Body)
if r.config.useBasicAuth { if err != nil {
username, password, ok := req.BasicAuth() http.Error(w, err.Error(), http.StatusInternalServerError)
if !ok || username != r.config.Username || password != r.config.Password { return
http.Error(w, "Unauthorized", http.StatusUnauthorized) }
return metrics, err := r.parser.Parse(body)
} if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
} }
if r.sink != nil {
d := influx.NewDecoder(req.Body)
for d.Next() {
// Decode measurement name
measurement, err := d.Measurement()
if err != nil {
msg := "ServerHttp: Failed to decode measurement: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
// Decode tags
tags := make(map[string]string)
for {
key, value, err := d.NextTag()
if err != nil {
msg := "ServerHttp: Failed to decode tag: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
if key == nil {
break
}
tags[string(key)] = string(value)
}
// Decode fields
fields := make(map[string]interface{})
for {
key, value, err := d.NextField()
if err != nil {
msg := "ServerHttp: Failed to decode field: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
if key == nil {
break
}
fields[string(key)] = value.Interface()
}
// Decode time stamp
t, err := d.Time(influx.Nanosecond, time.Time{})
if err != nil {
msg := "ServerHttp: Failed to decode time stamp: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
y, _ := lp.NewMessage(
string(measurement),
tags,
nil,
fields,
t,
)
m, err := r.mp.ProcessMessage(y)
if err == nil && m != nil {
r.sink <- m
}
for _, m := range metrics {
y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
y.AddMeta(k, v)
} }
// Check for IO errors if r.sink != nil {
err := d.Err() r.sink <- y
if err != nil {
msg := "ServerHttp: Failed to decode: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
} }
} }

View File

@@ -10,10 +10,7 @@ The `http` receiver can be used receive metrics through HTTP POST requests.
"type": "http", "type": "http",
"address" : "", "address" : "",
"port" : "8080", "port" : "8080",
"path" : "/write", "path" : "/write"
"idle_timeout": "120s",
"username": "myUser",
"password": "myPW"
} }
} }
``` ```
@@ -22,22 +19,5 @@ The `http` receiver can be used receive metrics through HTTP POST requests.
- `address`: Listen address - `address`: Listen address
- `port`: Listen port - `port`: Listen port
- `path`: URL path for the write endpoint - `path`: URL path for the write endpoint
- `idle_timeout`: Maximum amount of time to wait for the next request when keep-alives are enabled should be larger than the measurement interval to keep the connection open
- `keep_alives_enabled`: Controls whether HTTP keep-alives are enabled. By default, keep-alives are enabled.
- `username`: username for basic authentication
- `password`: password for basic authentication
The HTTP endpoint listens to `http://<address>:<port>/<path>` The HTTP endpoint listens to `http://<address>:<port>/<path>`
### Debugging
- Install [curl](https://curl.se/)
- Use curl to send message to `http` receiver
```bash
curl http://localhost:8080/write \
--user "myUser:myPW" \
--data \
"myMetric,hostname=myHost,type=hwthread,type-id=0,unit=Hz value=400000i 1694777161164284635
myMetric,hostname=myHost,type=hwthread,type-id=1,unit=Hz value=400001i 1694777161164284635"
```

View File

@@ -1,571 +0,0 @@
package receivers
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
"github.com/ClusterCockpit/cc-metric-collector/pkg/hostlist"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
)
type IPMIReceiverClientConfig struct {
// Hostname the IPMI service belongs to
Protocol string // Protocol / tool to use for IPMI sensor reading
DriverType string // Out of band IPMI driver
Fanout int // Maximum number of simultaneous IPMI connections
NumHosts int // Number of remote IPMI devices with the same configuration
IPMIHosts string // List of remote IPMI devices to communicate with
IPMI2HostMapping map[string]string // Mapping between IPMI device name and host name
Username string // User name to authenticate with
Password string // Password to use for authentication
CLIOptions []string // Additional command line options for ipmi-sensors
isExcluded map[string]bool // is metric excluded
mp mp.MessageProcessor
}
type IPMIReceiver struct {
receiver
config struct {
defaultReceiverConfig
Interval time.Duration
// Client config for each IPMI hosts
ClientConfigs []IPMIReceiverClientConfig
}
// Storage for static information
//meta map[string]string
done chan bool // channel to finish / stop IPMI receiver
wg sync.WaitGroup // wait group for IPMI receiver
mp mp.MessageProcessor
}
// doReadMetrics reads metrics from all configure IPMI hosts.
func (r *IPMIReceiver) doReadMetric() {
for i := range r.config.ClientConfigs {
clientConfig := &r.config.ClientConfigs[i]
var cmd_options []string
if clientConfig.Protocol == "ipmi-sensors" {
cmd_options = append(cmd_options,
"--always-prefix",
"--sdr-cache-recreate",
// Attempt to interpret OEM data, such as event data, sensor readings, or general extra info
"--interpret-oem-data",
// Ignore not-available (i.e. N/A) sensors in output
"--ignore-not-available-sensors",
// Ignore unrecognized sensor events
"--ignore-unrecognized-events",
// Output fields in comma separated format
"--comma-separated-output",
// Do not output column headers
"--no-header-output",
// Output non-abbreviated units (e.g. 'Amps' instead of 'A').
// May aid in disambiguation of units (e.g. 'C' for Celsius or Coulombs).
"--non-abbreviated-units",
"--fanout", fmt.Sprint(clientConfig.Fanout),
"--driver-type", clientConfig.DriverType,
"--hostname", clientConfig.IPMIHosts,
"--username", clientConfig.Username,
"--password", clientConfig.Password,
)
cmd_options := append(cmd_options, clientConfig.CLIOptions...)
command := exec.Command("ipmi-sensors", cmd_options...)
stdout, _ := command.StdoutPipe()
errBuf := new(bytes.Buffer)
command.Stderr = errBuf
// start command
if err := command.Start(); err != nil {
cclog.ComponentError(
r.name,
fmt.Sprintf("doReadMetric(): Failed to start command \"%s\": %v", command.String(), err),
)
continue
}
// Read command output
const (
idxID = iota
idxName
idxType
idxReading
idxUnits
idxEvent
)
numPrefixRegex := regexp.MustCompile("^[[:digit:]][[:digit:]]-(.*)$")
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
// Read host
v1 := strings.Split(scanner.Text(), ": ")
if len(v1) != 2 {
continue
}
host, ok := clientConfig.IPMI2HostMapping[v1[0]]
if !ok {
continue
}
// Read sensors
v2 := strings.Split(v1[1], ",")
if len(v2) != 6 {
continue
}
// Skip sensors with non available sensor readings
if v2[idxReading] == "N/A" {
continue
}
metric := strings.ToLower(v2[idxType])
name := strings.ToLower(
strings.Replace(
strings.TrimSpace(
v2[idxName]), " ", "_", -1))
// remove prefix enumeration like 01-...
if v := numPrefixRegex.FindStringSubmatch(name); v != nil {
name = v[1]
}
unit := v2[idxUnits]
if unit == "Watts" {
// Power
metric = "power"
name = strings.TrimSuffix(name, "_power")
name = strings.TrimSuffix(name, "_pwr")
name = strings.TrimPrefix(name, "pwr_")
} else if metric == "voltage" &&
unit == "Volts" {
// Voltage
name = strings.TrimPrefix(name, "volt_")
} else if metric == "current" &&
unit == "Amps" {
// Current
unit = "Ampere"
} else if metric == "temperature" &&
unit == "degrees C" {
// Temperature
name = strings.TrimSuffix(name, "_temp")
unit = "degC"
} else if metric == "temperature" &&
unit == "degrees F" {
// Temperature
name = strings.TrimSuffix(name, "_temp")
unit = "degF"
} else if metric == "fan" && unit == "RPM" {
// Fan speed
metric = "fan_speed"
name = strings.TrimSuffix(name, "_tach")
name = strings.TrimPrefix(name, "spd_")
} else if (metric == "cooling device" ||
metric == "other units based sensor") &&
name == "system_air_flow" &&
unit == "CFM" {
// Air flow
metric = "air_flow"
name = strings.TrimSuffix(name, "_air_flow")
unit = "CubicFeetPerMinute"
} else if (metric == "processor" ||
metric == "other units based sensor") &&
(name == "cpu_utilization" ||
name == "io_utilization" ||
name == "mem_utilization" ||
name == "sys_utilization") &&
(unit == "unspecified" ||
unit == "%") {
// Utilization
metric = "utilization"
name = strings.TrimSuffix(name, "_utilization")
unit = "percent"
} else {
if false {
// Debug output for unprocessed metrics
fmt.Printf(
"host: '%s', metric: '%s', name: '%s', unit: '%s'\n",
host, metric, name, unit)
}
continue
}
// Skip excluded metrics
if clientConfig.isExcluded[metric] {
continue
}
// Parse sensor value
value, err := strconv.ParseFloat(v2[idxReading], 64)
if err != nil {
continue
}
y, err := lp.NewMessage(
metric,
map[string]string{
"hostname": host,
"type": "node",
"name": name,
},
map[string]string{
"source": r.name,
"group": "IPMI",
"unit": unit,
},
map[string]interface{}{
"value": value,
},
time.Now())
if err == nil {
mc, err := clientConfig.mp.ProcessMessage(y)
if err == nil && mc != nil {
m, err := r.mp.ProcessMessage(mc)
if err == nil && m != nil {
r.sink <- m
}
}
}
}
// Wait for command end
if err := command.Wait(); err != nil {
errMsg, _ := io.ReadAll(errBuf)
cclog.ComponentError(
r.name,
fmt.Sprintf("doReadMetric(): Failed to wait for the end of command \"%s\": %v\n",
strings.Replace(command.String(), clientConfig.Password, "<PW>", -1), err),
fmt.Sprintf("doReadMetric(): command stderr: \"%s\"\n", string(errMsg)),
)
}
}
}
}
func (r *IPMIReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
// Start IPMI receiver
r.wg.Add(1)
go func() {
defer r.wg.Done()
// Create ticker
ticker := time.NewTicker(r.config.Interval)
defer ticker.Stop()
for {
r.doReadMetric()
select {
case tickerTime := <-ticker.C:
// Check if we missed the ticker event
if since := time.Since(tickerTime); since > 5*time.Second {
cclog.ComponentInfo(r.name, "Missed ticker event for more then", since)
}
// process ticker event -> continue
continue
case <-r.done:
// process done event
return
}
}
}()
cclog.ComponentDebug(r.name, "STARTED")
}
// Close receiver: close network connection, close files, close libraries, ...
func (r *IPMIReceiver) Close() {
cclog.ComponentDebug(r.name, "CLOSE")
// Send the signal and wait
close(r.done)
r.wg.Wait()
cclog.ComponentDebug(r.name, "DONE")
}
// NewIPMIReceiver creates a new instance of the redfish receiver
// Initialize the receiver by giving it a name and reading in the config JSON
func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
var err error
r := new(IPMIReceiver)
// Config options from config file
configJSON := struct {
defaultReceiverConfig
// How often the IPMI sensor metrics should be read and send to the sink (default: 30 s)
IntervalString string `json:"interval,omitempty"`
// Maximum number of simultaneous IPMI connections (default: 64)
Fanout int `json:"fanout,omitempty"`
// Out of band IPMI driver (default: LAN_2_0)
DriverType string `json:"driver_type,omitempty"`
// Default client username, password and endpoint
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the IPMI device
// Globally excluded metrics
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ClientConfigs []struct {
Fanout int `json:"fanout,omitempty"` // Maximum number of simultaneous IPMI connections (default: 64)
DriverType string `json:"driver_type,omitempty"` // Out of band IPMI driver (default: LAN_2_0)
HostList string `json:"host_list"` // List of hosts with the same client configuration
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the IPMI service
// Per client excluded metrics
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
// Additional command line options for ipmi-sensors
CLIOptions []string `json:"cli_options,omitempty"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
} `json:"client_config"`
}{
// Set defaults values
// Allow overwriting these defaults by reading config JSON
Fanout: 64,
DriverType: "LAN_2_0",
IntervalString: "30s",
}
// Set name of IPMIReceiver
r.name = fmt.Sprintf("IPMIReceiver(%s)", name)
// Create done channel
r.done = make(chan bool)
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
// Set static information
//r.meta = map[string]string{"source": r.name}
r.mp.AddAddMetaByCondition("true", "source", r.name)
// Read the IPMI receiver specific JSON config
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&configJSON); err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Convert interval string representation to duration
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
if err != nil {
err := fmt.Errorf(
"failed to parse duration string interval='%s': %w",
configJSON.IntervalString,
err,
)
cclog.Error(r.name, err)
return nil, err
}
// Create client config from JSON config
totalNumHosts := 0
for i := range configJSON.ClientConfigs {
clientConfigJSON := &configJSON.ClientConfigs[i]
var endpoint string
if clientConfigJSON.Endpoint != nil {
endpoint = *clientConfigJSON.Endpoint
} else if configJSON.Endpoint != nil {
endpoint = *configJSON.Endpoint
} else {
err := fmt.Errorf("client config number %v requires endpoint", i)
cclog.ComponentError(r.name, err)
return nil, err
}
fanout := configJSON.Fanout
if clientConfigJSON.Fanout != 0 {
fanout = clientConfigJSON.Fanout
}
driverType := configJSON.DriverType
if clientConfigJSON.DriverType != "" {
driverType = clientConfigJSON.DriverType
}
if driverType != "LAN" && driverType != "LAN_2_0" {
err := fmt.Errorf("client config number %v has invalid driver type %s", i, driverType)
cclog.ComponentError(r.name, err)
return nil, err
}
var protocol string
var host_pattern string
if e := strings.Split(endpoint, "://"); len(e) == 2 {
protocol = e[0]
host_pattern = e[1]
} else {
err := fmt.Errorf("client config number %v has invalid endpoint %s", i, endpoint)
cclog.ComponentError(r.name, err)
return nil, err
}
var username string
if clientConfigJSON.Username != nil {
username = *clientConfigJSON.Username
} else if configJSON.Username != nil {
username = *configJSON.Username
} else {
err := fmt.Errorf("client config number %v requires username", i)
cclog.ComponentError(r.name, err)
return nil, err
}
var password string
if clientConfigJSON.Password != nil {
password = *clientConfigJSON.Password
} else if configJSON.Password != nil {
password = *configJSON.Password
} else {
err := fmt.Errorf("client config number %v requires password", i)
cclog.ComponentError(r.name, err)
return nil, err
}
// Create mapping between IPMI host name and node host name
// This also guaranties that all IPMI host names are unique
ipmi2HostMapping := make(map[string]string)
hostList, err := hostlist.Expand(clientConfigJSON.HostList)
if err != nil {
err := fmt.Errorf("client config number %d failed to parse host list %s: %v",
i, clientConfigJSON.HostList, err)
cclog.ComponentError(r.name, err)
return nil, err
}
for _, host := range hostList {
ipmiHost := strings.Replace(host_pattern, "%h", host, -1)
ipmi2HostMapping[ipmiHost] = host
}
numHosts := len(ipmi2HostMapping)
totalNumHosts += numHosts
ipmiHostList := make([]string, 0, numHosts)
for ipmiHost := range ipmi2HostMapping {
ipmiHostList = append(ipmiHostList, ipmiHost)
}
// Additional command line options
for _, v := range clientConfigJSON.CLIOptions {
switch {
case v == "-u" || strings.HasPrefix(v, "--username"):
err := fmt.Errorf("client config number %v: do not set username in cli_options. Use json config username instead", i)
cclog.ComponentError(r.name, err)
return nil, err
case v == "-p" || strings.HasPrefix(v, "--password"):
err := fmt.Errorf("client config number %v: do not set password in cli_options. Use json config password instead", i)
cclog.ComponentError(r.name, err)
return nil, err
case v == "-h" || strings.HasPrefix(v, "--hostname"):
err := fmt.Errorf("client config number %v: do not set hostname in cli_options. Use json config host_list instead", i)
cclog.ComponentError(r.name, err)
return nil, err
case v == "-D" || strings.HasPrefix(v, "--driver-type"):
err := fmt.Errorf("client config number %v: do not set driver type in cli_options. Use json config driver_type instead", i)
cclog.ComponentError(r.name, err)
return nil, err
case v == "-F" || strings.HasPrefix(v, " --fanout"):
err := fmt.Errorf("client config number %v: do not set fanout in cli_options. Use json config fanout instead", i)
cclog.ComponentError(r.name, err)
return nil, err
case v == "--always-prefix" ||
v == "--sdr-cache-recreate" ||
v == "--interpret-oem-data" ||
v == "--ignore-not-available-sensors" ||
v == "--ignore-unrecognized-events" ||
v == "--comma-separated-output" ||
v == "--no-header-output" ||
v == "--non-abbreviated-units":
err := fmt.Errorf("client config number %v: Do not use option %s in cli_options, it is used internally", i, v)
cclog.ComponentError(r.name, err)
return nil, err
}
}
cliOptions := make([]string, 0)
cliOptions = append(cliOptions, clientConfigJSON.CLIOptions...)
// Is metrics excluded globally or per client
isExcluded := make(map[string]bool)
for _, key := range clientConfigJSON.ExcludeMetrics {
isExcluded[key] = true
}
for _, key := range configJSON.ExcludeMetrics {
isExcluded[key] = true
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
if len(clientConfigJSON.MessageProcessor) > 0 {
err = p.FromConfigJSON(clientConfigJSON.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
r.config.ClientConfigs = append(
r.config.ClientConfigs,
IPMIReceiverClientConfig{
Protocol: protocol,
Fanout: fanout,
DriverType: driverType,
NumHosts: numHosts,
IPMIHosts: strings.Join(ipmiHostList, ","),
IPMI2HostMapping: ipmi2HostMapping,
Username: username,
Password: password,
CLIOptions: cliOptions,
isExcluded: isExcluded,
mp: p,
})
}
if totalNumHosts == 0 {
err := fmt.Errorf("at least one IPMI host config is required")
cclog.ComponentError(r.name, err)
return nil, err
}
cclog.ComponentInfo(r.name, "monitoring", totalNumHosts, "IPMI hosts")
return r, nil
}

View File

@@ -1,48 +0,0 @@
## IPMI Receiver
The IPMI Receiver uses `ipmi-sensors` from the [FreeIPMI](https://www.gnu.org/software/freeipmi/) project to read IPMI sensor readings and sensor data repository (SDR) information. The available metrics depend on the sensors provided by the hardware vendor but typically contain temperature, fan speed, voltage and power metrics.
### Configuration structure
```json
{
"<IPMI receiver name>": {
"type": "ipmi",
"interval": "30s",
"fanout": 256,
"username": "<Username>",
"password": "<Password>",
"endpoint": "ipmi-sensors://%h-bmc",
"exclude_metrics": [ "fan_speed", "voltage" ],
"client_config": [
{
"host_list": "n[1,2-4]"
},
{
"host_list": "n[5-6]",
"driver_type": "LAN",
"cli_options": [ "--workaround-flags=..." ],
"password": "<Password 2>"
}
]
}
}
```
Global settings:
- `interval`: How often the IPMI sensor metrics should be read and send to the sink (default: 30 s)
Global and per IPMI device settings (per IPMI device settings overwrite the global settings):
- `exclude_metrics`: list of excluded metrics e.g. fan_speed, power, temperature, utilization, voltage
- `fanout`: Maximum number of simultaneous IPMI connections (default: 64)
- `driver_type`: Out of band IPMI driver (default: LAN_2_0)
- `username`: User name to authenticate with
- `password`: Password to use for authentication
- `endpoint`: URL of the IPMI device (placeholder `%h` gets replaced by the hostname)
Per IPMI device settings:
- `host_list`: List of hosts with the same client configuration
- `cli_options`: Additional command line options for ipmi-sensors

View File

@@ -1,15 +1,11 @@
package receivers package receivers
import ( import (
"encoding/json" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
) )
type defaultReceiverConfig struct { type defaultReceiverConfig struct {
Type string `json:"type"` Type string `json:"type"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
} }
// Receiver configuration: Listen address, port // Receiver configuration: Listen address, port
@@ -23,15 +19,14 @@ type ReceiverConfig struct {
type receiver struct { type receiver struct {
name string name string
sink chan lp.CCMessage sink chan lp.CCMetric
mp mp.MessageProcessor
} }
type Receiver interface { type Receiver interface {
Start() Start()
Close() // Close / finish metric receiver Close() // Close / finish metric receiver
Name() string // Name of the metric receiver Name() string // Name of the metric receiver
SetSink(sink chan lp.CCMessage) // Set sink channel SetSink(sink chan lp.CCMetric) // Set sink channel
} }
// Name returns the name of the metric receiver // Name returns the name of the metric receiver
@@ -40,6 +35,6 @@ func (r *receiver) Name() string {
} }
// SetSink set the sink channel // SetSink set the sink channel
func (r *receiver) SetSink(sink chan lp.CCMessage) { func (r *receiver) SetSink(sink chan lp.CCMetric) {
r.sink = sink r.sink = sink
} }

View File

@@ -4,110 +4,54 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"os"
"time" "time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor" influx "github.com/influxdata/line-protocol"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
nats "github.com/nats-io/nats.go" nats "github.com/nats-io/nats.go"
) )
type NatsReceiverConfig struct { type NatsReceiverConfig struct {
defaultReceiverConfig Type string `json:"type"`
Addr string `json:"address"` Addr string `json:"address"`
Port string `json:"port"` Port string `json:"port"`
Subject string `json:"subject"` Subject string `json:"subject"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
NkeyFile string `json:"nkey_file,omitempty"`
} }
type NatsReceiver struct { type NatsReceiver struct {
receiver receiver
nc *nats.Conn nc *nats.Conn
//meta map[string]string handler *influx.MetricHandler
config NatsReceiverConfig parser *influx.Parser
meta map[string]string
config NatsReceiverConfig
}
var DefaultTime = func() time.Time {
return time.Unix(42, 0)
} }
// Start subscribes to the configured NATS subject
// Messages wil be handled by r._NatsReceive
func (r *NatsReceiver) Start() { func (r *NatsReceiver) Start() {
cclog.ComponentDebug(r.name, "START") cclog.ComponentDebug(r.name, "START")
r.nc.Subscribe(r.config.Subject, r._NatsReceive) r.nc.Subscribe(r.config.Subject, r._NatsReceive)
} }
// _NatsReceive receives subscribed messages from the NATS server
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) { func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
metrics, err := r.parser.Parse(m.Data)
if r.sink != nil { if err == nil {
d := influx.NewDecoderWithBytes(m.Data) for _, m := range metrics {
for d.Next() { y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
// Decode measurement name y.AddMeta(k, v)
measurement, err := d.Measurement()
if err != nil {
msg := "_NatsReceive: Failed to decode measurement: " + err.Error()
cclog.ComponentError(r.name, msg)
return
} }
if r.sink != nil {
// Decode tags r.sink <- y
tags := make(map[string]string)
for {
key, value, err := d.NextTag()
if err != nil {
msg := "_NatsReceive: Failed to decode tag: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
if key == nil {
break
}
tags[string(key)] = string(value)
}
// Decode fields
fields := make(map[string]interface{})
for {
key, value, err := d.NextField()
if err != nil {
msg := "_NatsReceive: Failed to decode field: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
if key == nil {
break
}
fields[string(key)] = value.Interface()
}
// Decode time stamp
t, err := d.Time(influx.Nanosecond, time.Time{})
if err != nil {
msg := "_NatsReceive: Failed to decode time: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
y, _ := lp.NewMessage(
string(measurement),
tags,
nil,
fields,
t,
)
m, err := r.mp.ProcessMessage(y)
if err == nil && m != nil {
r.sink <- m
} }
} }
} }
} }
// Close closes the connection to the NATS server
func (r *NatsReceiver) Close() { func (r *NatsReceiver) Close() {
if r.nc != nil { if r.nc != nil {
cclog.ComponentDebug(r.name, "CLOSE") cclog.ComponentDebug(r.name, "CLOSE")
@@ -115,14 +59,10 @@ func (r *NatsReceiver) Close() {
} }
} }
// NewNatsReceiver creates a new Receiver which subscribes to messages from a NATS server
func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) { func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
var uinfo nats.Option = nil
r := new(NatsReceiver) r := new(NatsReceiver)
r.name = fmt.Sprintf("NatsReceiver(%s)", name) r.name = fmt.Sprintf("NatsReceiver(%s)", name)
r.config.Addr = nats.DefaultURL
// Read configuration file, allow overwriting default config
r.config.Addr = "localhost"
r.config.Port = "4222" r.config.Port = "4222"
if len(config) > 0 { if len(config) > 0 {
err := json.Unmarshal(config, &r.config) err := json.Unmarshal(config, &r.config)
@@ -136,45 +76,17 @@ func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
len(r.config.Subject) == 0 { len(r.config.Subject) == 0 {
return nil, errors.New("not all configuration variables set required by NatsReceiver") return nil, errors.New("not all configuration variables set required by NatsReceiver")
} }
p, err := mp.NewMessageProcessor() r.meta = map[string]string{"source": r.name}
if err != nil { uri := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error()) cclog.ComponentDebug(r.name, "NewNatsReceiver", uri, "Subject", r.config.Subject)
} if nc, err := nats.Connect(uri); err == nil {
r.mp = p
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Set metadata
// r.meta = map[string]string{
// "source": r.name,
// }
r.mp.AddAddMetaByCondition("true", "source", r.name)
if len(r.config.User) > 0 && len(r.config.Password) > 0 {
uinfo = nats.UserInfo(r.config.User, r.config.Password)
} else if len(r.config.NkeyFile) > 0 {
_, err := os.Stat(r.config.NkeyFile)
if err == nil {
uinfo = nats.UserCredentials(r.config.NkeyFile)
} else {
cclog.ComponentError(r.name, "NKEY file", r.config.NkeyFile, "does not exist: %v", err.Error())
return nil, err
}
}
// Connect to NATS server
url := fmt.Sprintf("nats://%s:%s", r.config.Addr, r.config.Port)
cclog.ComponentDebug(r.name, "NewNatsReceiver", url, "Subject", r.config.Subject)
if nc, err := nats.Connect(url, uinfo); err == nil {
r.nc = nc r.nc = nc
} else { } else {
r.nc = nil r.nc = nil
return nil, err return nil, err
} }
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
return r, nil return r, nil
} }

View File

@@ -10,10 +10,7 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
"type": "nats", "type": "nats",
"address" : "nats-server.example.org", "address" : "nats-server.example.org",
"port" : "4222", "port" : "4222",
"subject" : "subject", "subject" : "subject"
"user": "natsuser",
"password": "natssecret",
"nkey_file": "/path/to/nkey_file"
} }
} }
``` ```
@@ -22,35 +19,3 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
- `address`: Address of the NATS control server - `address`: Address of the NATS control server
- `port`: Port of the NATS control server - `port`: Port of the NATS control server
- `subject`: Subscribes to this subject and receive metrics - `subject`: Subscribes to this subject and receive metrics
- `user`: Connect to nats using this user
- `password`: Connect to nats using this password
- `nkey_file`: Path to credentials file with NKEY
### Debugging
- Install NATS server and command line client
- Start NATS server
```bash
nats-server --net nats-server.example.org --port 4222
```
- Check NATS server works as expected
```bash
nats --server=nats-server-db.example.org:4222 server check
```
- Use NATS command line client to subscribe to all messages
```bash
nats --server=nats-server-db.example.org:4222 sub ">"
```
- Use NATS command line client to send message to NATS receiver
```bash
nats --server=nats-server-db.example.org:4222 pub subject \
"myMetric,hostname=myHost,type=hwthread,type-id=0,unit=Hz value=400000i 1694777161164284635
myMetric,hostname=myHost,type=hwthread,type-id=1,unit=Hz value=400001i 1694777161164284635"
```

View File

@@ -12,8 +12,8 @@ import (
"sync" "sync"
"time" "time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
) )
type PrometheusReceiverConfig struct { type PrometheusReceiverConfig struct {
@@ -74,7 +74,7 @@ func (r *PrometheusReceiver) Start() {
} }
value, err := strconv.ParseFloat(lineSplit[1], 64) value, err := strconv.ParseFloat(lineSplit[1], 64)
if err == nil { if err == nil {
y, err := lp.NewMessage(name, tags, r.meta, map[string]interface{}{"value": value}, t) y, err := lp.New(name, tags, r.meta, map[string]interface{}{"value": value}, t)
if err == nil { if err == nil {
r.sink <- y r.sink <- y
} }

View File

@@ -2,31 +2,29 @@ package receivers
import ( import (
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"sync" "sync"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message" lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
) )
var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){ var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){
"http": NewHttpReceiver, "nats": NewNatsReceiver,
"ipmi": NewIPMIReceiver,
"nats": NewNatsReceiver,
"redfish": NewRedfishReceiver,
} }
type receiveManager struct { type receiveManager struct {
inputs []Receiver inputs []Receiver
output chan lp.CCMessage output chan lp.CCMetric
done chan bool
wg *sync.WaitGroup
config []json.RawMessage config []json.RawMessage
} }
type ReceiveManager interface { type ReceiveManager interface {
Init(wg *sync.WaitGroup, receiverConfigFile string) error Init(wg *sync.WaitGroup, receiverConfigFile string) error
AddInput(name string, rawConfig json.RawMessage) error AddInput(name string, rawConfig json.RawMessage) error
AddOutput(output chan lp.CCMessage) AddOutput(output chan lp.CCMetric)
Start() Start()
Close() Close()
} }
@@ -35,6 +33,8 @@ func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) er
// Initialize struct fields // Initialize struct fields
rm.inputs = make([]Receiver, 0) rm.inputs = make([]Receiver, 0)
rm.output = nil rm.output = nil
rm.done = make(chan bool)
rm.wg = wg
rm.config = make([]json.RawMessage, 0) rm.config = make([]json.RawMessage, 0)
configFile, err := os.Open(receiverConfigFile) configFile, err := os.Open(receiverConfigFile)
@@ -58,7 +58,7 @@ func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) er
} }
func (rm *receiveManager) Start() { func (rm *receiveManager) Start() {
cclog.ComponentDebug("ReceiveManager", "START") rm.wg.Add(1)
for _, r := range rm.inputs { for _, r := range rm.inputs {
cclog.ComponentDebug("ReceiveManager", "START", r.Name()) cclog.ComponentDebug("ReceiveManager", "START", r.Name())
@@ -74,13 +74,9 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "JSON config error:", err.Error()) cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "JSON config error:", err.Error())
return err return err
} }
if config.Type == "" {
cclog.ComponentError("ReceiveManager", "SKIP", "JSON config for receiver", name, "does not contain a receiver type")
return fmt.Errorf("JSON config for receiver %s does not contain a receiver type", name)
}
if _, found := AvailableReceivers[config.Type]; !found { if _, found := AvailableReceivers[config.Type]; !found {
cclog.ComponentError("ReceiveManager", "SKIP", "unknown receiver type:", config.Type) cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "unknown receiver:", err.Error())
return fmt.Errorf("unknown receiver type: %s", config.Type) return err
} }
r, err := AvailableReceivers[config.Type](name, rawConfig) r, err := AvailableReceivers[config.Type](name, rawConfig)
if err != nil { if err != nil {
@@ -93,7 +89,7 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
return nil return nil
} }
func (rm *receiveManager) AddOutput(output chan lp.CCMessage) { func (rm *receiveManager) AddOutput(output chan lp.CCMetric) {
rm.output = output rm.output = output
for _, r := range rm.inputs { for _, r := range rm.inputs {
r.SetSink(rm.output) r.SetSink(rm.output)
@@ -101,19 +97,16 @@ func (rm *receiveManager) AddOutput(output chan lp.CCMessage) {
} }
func (rm *receiveManager) Close() { func (rm *receiveManager) Close() {
cclog.ComponentDebug("ReceiveManager", "CLOSE")
// Close all receivers
for _, r := range rm.inputs { for _, r := range rm.inputs {
cclog.ComponentDebug("ReceiveManager", "CLOSE", r.Name()) cclog.ComponentDebug("ReceiveManager", "CLOSE", r.Name())
r.Close() r.Close()
} }
rm.wg.Done()
cclog.ComponentDebug("ReceiveManager", "DONE") cclog.ComponentDebug("ReceiveManager", "CLOSE")
} }
func New(wg *sync.WaitGroup, receiverConfigFile string) (ReceiveManager, error) { func New(wg *sync.WaitGroup, receiverConfigFile string) (ReceiveManager, error) {
r := new(receiveManager) r := &receiveManager{}
err := r.Init(wg, receiverConfigFile) err := r.Init(wg, receiverConfigFile)
if err != nil { if err != nil {
return nil, err return nil, err

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
## Redfish receiver
The Redfish receiver uses the [Redfish (specification)](https://www.dmtf.org/standards/redfish) to query thermal and power metrics. Thermal metrics may include various fan speeds and temperatures. Power metrics may include the current power consumption of various hardware components. It may also include the minimum, maximum and average power consumption of these components in a given time interval. The receiver will poll each configured redfish device once in a given interval. Multiple devices can be accessed in parallel to increase throughput.
### Configuration structure
```json
{
"<redfish receiver name>": {
"type": "redfish",
"username": "<Username>",
"password": "<Password>",
"endpoint": "https://%h-bmc",
"exclude_metrics": [ "min_consumed_watts" ],
"client_config": [
{
"host_list": "n[1,2-4]"
},
{
"host_list": "n5",
"disable_power_metrics": true,
"disable_processor_metrics": true,
"disable_thermal_metrics": true
},
{
"host_list": "n6" ],
"username": "<Username 2>",
"password": "<Password 2>",
"endpoint": "https://%h-BMC",
"disable_sensor_metrics": true
}
]
}
}
```
Global settings:
- `fanout`: Maximum number of simultaneous redfish connections (default: 64)
- `interval`: How often the redfish power metrics should be read and send to the sink (default: 30 s)
- `http_insecure`: Control whether a client verifies the server's certificate (default: true == do not verify server's certificate)
- `http_timeout`: Time limit for requests made by this HTTP client (default: 10 s)
Global and per redfish device settings (per redfish device settings overwrite the global settings):
- `disable_power_metrics`:
disable collection of power metrics
(`/redfish/v1/Chassis/{ChassisId}/Power`)
- `disable_processor_metrics`:
disable collection of processor metrics
(`/redfish/v1/Systems/{ComputerSystemId}/Processors/{ProcessorId}/ProcessorMetrics`)
- `disable_sensors`:
disable collection of fan, power and thermal sensor metrics
(`/redfish/v1/Chassis/{ChassisId}/Sensors/{SensorId}`)
- `disable_thermal_metrics`:
disable collection of thermal metrics
(`/redfish/v1/Chassis/{ChassisId}/Thermal`)
- `exclude_metrics`: list of excluded metrics
- `username`: User name to authenticate with
- `password`: Password to use for authentication
- `endpoint`: URL of the redfish service (placeholder `%h` gets replaced by the hostname)
Per redfish device settings:
- `host_list`: List of hosts with the same client configuration

View File

@@ -4,14 +4,12 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger" cclog "github.com/ClusterCockpit/cc-metric-collector/internal/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
) )
// SampleReceiver configuration: receiver type, listen address, port // SampleReceiver configuration: receiver type, listen address, port
// The defaultReceiverConfig contains the keys 'type' and 'process_messages'
type SampleReceiverConfig struct { type SampleReceiverConfig struct {
defaultReceiverConfig Type string `json:"type"`
Addr string `json:"address"` Addr string `json:"address"`
Port string `json:"port"` Port string `json:"port"`
} }
@@ -21,6 +19,7 @@ type SampleReceiver struct {
config SampleReceiverConfig config SampleReceiverConfig
// Storage for static information // Storage for static information
meta map[string]string
// Use in case of own go routine // Use in case of own go routine
// done chan bool // done chan bool
// wg sync.WaitGroup // wg sync.WaitGroup
@@ -37,26 +36,16 @@ func (r *SampleReceiver) Start() {
// or use own go routine but always make sure it exits // or use own go routine but always make sure it exits
// as soon as it gets the signal of the r.done channel // as soon as it gets the signal of the r.done channel
//
// r.done = make(chan bool)
// r.wg.Add(1) // r.wg.Add(1)
// go func() { // go func() {
// defer r.wg.Done() // for {
// // select {
// // Create ticker // case <-r.done:
// ticker := time.NewTicker(30 * time.Second) // r.wg.Done()
// defer ticker.Stop() // return
// // }
// for { // }
// readMetric() // r.wg.Done()
// select {
// case <-ticker.C:
// // process ticker event -> continue
// continue
// case <-r.done:
// return
// }
// }
// }() // }()
} }
@@ -80,19 +69,8 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished // The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
r.name = fmt.Sprintf("SampleReceiver(%s)", name) r.name = fmt.Sprintf("SampleReceiver(%s)", name)
// create new message processor
p, err := mp.NewMessageProcessor()
if err != nil {
cclog.ComponentError(r.name, "Initialization of message processor failed:", err.Error())
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
// Set static information // Set static information
err = r.mp.AddAddMetaByCondition("true", "source", r.name) r.meta = map[string]string{"source": r.name}
if err != nil {
cclog.ComponentError(r.name, fmt.Sprintf("Failed to add static information source=%s:", r.name), err.Error())
return nil, fmt.Errorf("failed to add static information source=%s: %v", r.name, err.Error())
}
// Set defaults in r.config // Set defaults in r.config
// Allow overwriting these defaults by reading config JSON // Allow overwriting these defaults by reading config JSON
@@ -106,15 +84,6 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
} }
} }
// Add message processor config
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
cclog.ComponentError(r.name, "Failed parsing JSON for message processor:", err.Error())
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Check that all required fields in the configuration are set // Check that all required fields in the configuration are set
// Use 'if len(r.config.Option) > 0' for strings // Use 'if len(r.config.Option) > 0' for strings

View File

@@ -1,23 +1,22 @@
{ {
"process_messages" : { "add_tags" : [
"add_tag_if": [ {
{ "key" : "cluster",
"key" : "cluster", "value" : "testcluster",
"value" : "testcluster", "if" : "*"
"if" : "true" },
}, {
{ "key" : "test",
"key" : "test", "value" : "testing",
"value" : "testing", "if" : "name == 'temp_package_id_0'"
"if" : "name == 'temp_package_id_0'" }
} ],
], "delete_tags" : [
"delete_tag_if": [ {
{ "key" : "unit",
"key" : "unit", "value" : "*",
"if" : "true" "if" : "*"
} }
] ],
},
"interval_timestamp" : true "interval_timestamp" : true
} }

View File

@@ -6,7 +6,7 @@ CC_HOME=/tmp
LOG_DIR=/var/log LOG_DIR=/var/log
DATA_DIR=/var/lib/cc-metric-collector DATA_DIR=/var/lib/grafana
MAX_OPEN_FILES=10000 MAX_OPEN_FILES=10000
@@ -15,9 +15,3 @@ CONF_DIR=/etc/cc-metric-collector
CONF_FILE=/etc/cc-metric-collector/cc-metric-collector.json CONF_FILE=/etc/cc-metric-collector/cc-metric-collector.json
RESTART_ON_UPGRADE=true RESTART_ON_UPGRADE=true
# Golang runtime debugging. (see: https://pkg.go.dev/runtime)
# GODEBUG=gctrace=1
# Golang garbage collection target percentage
# GOGC=100

View File

@@ -1,16 +0,0 @@
#!/usr/bin/make -f
# You must remove unused comment lines for the released package.
#export DH_VERBOSE = 1
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
%:
dh $@
override_dh_auto_build:
make
override_dh_auto_install:
make PREFIX=/usr install

Some files were not shown because too many files have changed in this diff Show More