mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2025-07-19 19:31:41 +02:00
Compare commits
13 Commits
rapl_colle
...
likwid_col
Author | SHA1 | Date | |
---|---|---|---|
|
9ca73a9f50 | ||
|
0186dce521 | ||
|
16c796a2b8 | ||
|
b6c4769db3 | ||
|
7bbee70c14 | ||
|
902f4349b6 | ||
|
6aada60d97 | ||
|
06ca37e705 | ||
|
9b671ce68f | ||
|
226e8425cb | ||
|
a37f6603c8 | ||
|
78902305e8 | ||
|
e7b77f7721 |
337
.github/workflows/Release.yml
vendored
337
.github/workflows/Release.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
#
|
||||
# Build on AlmaLinux 8 using go-toolset
|
||||
#
|
||||
AlmaLinux8-RPM-build:
|
||||
AlmaLinux-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://hub.docker.com/_/almalinux
|
||||
container: almalinux:8
|
||||
@@ -41,17 +41,14 @@ jobs:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
||||
dnf --assumeyes install \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.21.7-1.module_el8+960+4060efbe.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
@@ -73,101 +70,20 @@ jobs:
|
||||
NEW_SRPM=${OLD_SRPM/el8/alma8}
|
||||
mv "${OLD_RPM}" "${NEW_RPM}"
|
||||
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
||||
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=SRPM::${NEW_SRPM}"
|
||||
echo "::set-output name=RPM::${NEW_RPM}"
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector RPM for AlmaLinux 8
|
||||
path: ${{ steps.rpmrename.outputs.RPM }}
|
||||
overwrite: true
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector SRPM for AlmaLinux 8
|
||||
path: ${{ steps.rpmrename.outputs.SRPM }}
|
||||
overwrite: true
|
||||
|
||||
#
|
||||
# Build on AlmaLinux 9 using go-toolset
|
||||
#
|
||||
AlmaLinux9-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://hub.docker.com/_/almalinux
|
||||
container: almalinux:9
|
||||
# The job outputs link to the outputs of the 'rpmrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
outputs:
|
||||
rpm : ${{steps.rpmrename.outputs.RPM}}
|
||||
srpm : ${{steps.rpmrename.outputs.SRPM}}
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
||||
dnf --assumeyes install wget openssl-devel diffutils delve which
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make RPM
|
||||
|
||||
# AlmaLinux 9 is a derivate of RedHat Enterprise Linux 8 (UBI8),
|
||||
# so the created RPM both contain the substring 'el9' in the RPM file names
|
||||
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
|
||||
# because it is unclear whether the default AlmaLinux 8 container contains the
|
||||
# 'rename' command. This way we also get the new names for output.
|
||||
- name: Rename RPMs (s/el9/alma9/)
|
||||
id: rpmrename
|
||||
run: |
|
||||
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
|
||||
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
|
||||
NEW_RPM="${OLD_RPM/el9/alma9}"
|
||||
NEW_SRPM=${OLD_SRPM/el9/alma9}
|
||||
mv "${OLD_RPM}" "${NEW_RPM}"
|
||||
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
||||
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector RPM for AlmaLinux 9
|
||||
path: ${{ steps.rpmrename.outputs.RPM }}
|
||||
overwrite: true
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector SRPM for AlmaLinux 9
|
||||
path: ${{ steps.rpmrename.outputs.SRPM }}
|
||||
overwrite: true
|
||||
|
||||
#
|
||||
# Build on UBI 8 using go-toolset
|
||||
@@ -195,17 +111,14 @@ jobs:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.21.7-1.module_el8+960+4060efbe.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
@@ -215,78 +128,15 @@ jobs:
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector RPM for UBI 8
|
||||
path: ${{ steps.rpmbuild.outputs.RPM }}
|
||||
overwrite: true
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector SRPM for UBI 8
|
||||
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
||||
overwrite: true
|
||||
|
||||
#
|
||||
# Build on UBI 9 using go-toolset
|
||||
#
|
||||
UBI-9-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
||||
container: redhat/ubi9
|
||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||
outputs:
|
||||
rpm : ${{steps.rpmbuild.outputs.RPM}}
|
||||
srpm : ${{steps.rpmbuild.outputs.SRPM}}
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make RPM
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector RPM for UBI 9
|
||||
path: ${{ steps.rpmbuild.outputs.RPM }}
|
||||
overwrite: true
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector SRPM for UBI 9
|
||||
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
||||
overwrite: true
|
||||
|
||||
#
|
||||
# Build on Ubuntu 22.04 using official go package
|
||||
@@ -312,15 +162,17 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
# Use official golang package
|
||||
- name: Install Golang
|
||||
run: |
|
||||
wget -q https://go.dev/dl/go1.21.1.linux-amd64.tar.gz --output-document=- | \
|
||||
tar --directory=/usr/local --extract --gzip
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
go version
|
||||
- name: DEB build MetricCollector
|
||||
id: dpkg-build
|
||||
run: |
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make DEB
|
||||
- name: Rename DEB (add '_ubuntu22.04')
|
||||
@@ -329,65 +181,13 @@ jobs:
|
||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
|
||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
||||
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=DEB::${NEW_DEB_FILE}"
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save DEB as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector DEB for Ubuntu 22.04
|
||||
path: ${{ steps.debrename.outputs.DEB }}
|
||||
overwrite: true
|
||||
|
||||
#
|
||||
# Build on Ubuntu 24.04 using official go package
|
||||
#
|
||||
Ubuntu-noblenumbat-build:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:24.04
|
||||
# The job outputs link to the outputs of the 'debrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
outputs:
|
||||
deb : ${{steps.debrename.outputs.DEB}}
|
||||
steps:
|
||||
# Use apt to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
apt update && apt --assume-yes upgrade
|
||||
apt --assume-yes install build-essential sed git wget bash
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: DEB build MetricCollector
|
||||
id: dpkg-build
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make DEB
|
||||
- name: Rename DEB (add '_ubuntu24.04')
|
||||
id: debrename
|
||||
run: |
|
||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu24.04.deb"
|
||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
||||
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save DEB as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector DEB for Ubuntu 24.04
|
||||
path: ${{ steps.debrename.outputs.DEB }}
|
||||
overwrite: true
|
||||
|
||||
|
||||
#
|
||||
# Create release with fresh RPMs
|
||||
@@ -395,56 +195,33 @@ jobs:
|
||||
Release:
|
||||
runs-on: ubuntu-latest
|
||||
# We need the RPMs, so add dependency
|
||||
needs: [AlmaLinux8-RPM-build, AlmaLinux9-RPM-build, UBI-8-RPM-build, UBI-9-RPM-build, Ubuntu-jammy-build, Ubuntu-noblenumbat-build]
|
||||
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build]
|
||||
|
||||
steps:
|
||||
# See: https://github.com/actions/download-artifact
|
||||
- name: Download AlmaLinux 8 RPM
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector RPM for AlmaLinux 8
|
||||
- name: Download AlmaLinux 8 SRPM
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector SRPM for AlmaLinux 8
|
||||
|
||||
- name: Download AlmaLinux 9 RPM
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector RPM for AlmaLinux 9
|
||||
- name: Download AlmaLinux 9 SRPM
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector SRPM for AlmaLinux 9
|
||||
|
||||
- name: Download UBI 8 RPM
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector RPM for UBI 8
|
||||
- name: Download UBI 8 SRPM
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector SRPM for UBI 8
|
||||
|
||||
- name: Download UBI 9 RPM
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector RPM for UBI 9
|
||||
- name: Download UBI 9 SRPM
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector SRPM for UBI 9
|
||||
|
||||
- name: Download Ubuntu 22.04 DEB
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-metric-collector DEB for Ubuntu 22.04
|
||||
|
||||
- name: Download Ubuntu 24.04 DEB
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cc-metric-collector DEB for Ubuntu 24.04
|
||||
|
||||
# The download actions do not publish the name of the downloaded file,
|
||||
# so we re-use the job outputs of the parent jobs. The files are all
|
||||
# downloaded to the current folder.
|
||||
@@ -454,51 +231,31 @@ jobs:
|
||||
- name: Set RPM variables
|
||||
id: files
|
||||
run: |
|
||||
ALMA_8_RPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.rpm}}")
|
||||
ALMA_8_SRPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.srpm}}")
|
||||
ALMA_9_RPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.rpm}}")
|
||||
ALMA_9_SRPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.srpm}}")
|
||||
ALMA_8_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
|
||||
ALMA_8_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
|
||||
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
|
||||
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
|
||||
UBI_9_RPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.rpm}}")
|
||||
UBI_9_SRPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.srpm}}")
|
||||
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
|
||||
U_2404_DEB=$(basename "${{ needs.Ubuntu-noblenumbat-build.outputs.deb}}")
|
||||
U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}")
|
||||
echo "ALMA_8_RPM::${ALMA_8_RPM}"
|
||||
echo "ALMA_8_SRPM::${ALMA_8_SRPM}"
|
||||
echo "ALMA_9_RPM::${ALMA_9_RPM}"
|
||||
echo "ALMA_9_SRPM::${ALMA_9_SRPM}"
|
||||
echo "UBI_8_RPM::${UBI_8_RPM}"
|
||||
echo "UBI_8_SRPM::${UBI_8_SRPM}"
|
||||
echo "UBI_9_RPM::${UBI_9_RPM}"
|
||||
echo "UBI_9_SRPM::${UBI_9_SRPM}"
|
||||
echo "U_2204_DEB::${U_2204_DEB}"
|
||||
echo "U_2404_DEB::${U_2404_DEB}"
|
||||
echo "ALMA_8_RPM=${ALMA_8_RPM}" >> $GITHUB_OUTPUT
|
||||
echo "ALMA_8_SRPM=${ALMA_8_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "ALMA_9_RPM=${ALMA_9_RPM}" >> $GITHUB_OUTPUT
|
||||
echo "ALMA_9_SRPM=${ALMA_9_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "UBI_8_RPM=${UBI_8_RPM}" >> $GITHUB_OUTPUT
|
||||
echo "UBI_8_SRPM=${UBI_8_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "UBI_9_RPM=${UBI_9_RPM}" >> $GITHUB_OUTPUT
|
||||
echo "UBI_9_SRPM=${UBI_9_SRPM}" >> $GITHUB_OUTPUT
|
||||
echo "U_2204_DEB=${U_2204_DEB}" >> $GITHUB_OUTPUT
|
||||
echo "U_2404_DEB=${U_2404_DEB}" >> $GITHUB_OUTPUT
|
||||
echo "U_2004_DEB::${U_2004_DEB}"
|
||||
echo "::set-output name=ALMA_8_RPM::${ALMA_8_RPM}"
|
||||
echo "::set-output name=ALMA_8_SRPM::${ALMA_8_SRPM}"
|
||||
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
|
||||
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
|
||||
echo "::set-output name=U_2004_DEB::${U_2004_DEB}"
|
||||
|
||||
# See: https://github.com/softprops/action-gh-release
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: cc-metric-collector-${{github.ref_name}}
|
||||
files: |
|
||||
${{ steps.files.outputs.ALMA_8_RPM }}
|
||||
${{ steps.files.outputs.ALMA_8_SRPM }}
|
||||
${{ steps.files.outputs.ALMA_9_RPM }}
|
||||
${{ steps.files.outputs.ALMA_9_SRPM }}
|
||||
${{ steps.files.outputs.UBI_8_RPM }}
|
||||
${{ steps.files.outputs.UBI_8_SRPM }}
|
||||
${{ steps.files.outputs.UBI_9_RPM }}
|
||||
${{ steps.files.outputs.UBI_9_SRPM }}
|
||||
${{ steps.files.outputs.U_2204_DEB }}
|
||||
${{ steps.files.outputs.U_2404_DEB }}
|
||||
${{ steps.files.outputs.U_2004_DEB }}
|
||||
|
207
.github/workflows/runonce.yml
vendored
207
.github/workflows/runonce.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
@@ -63,35 +63,9 @@ jobs:
|
||||
run: ./cc-metric-collector --once --config .github/ci-config.json
|
||||
|
||||
#
|
||||
# Job build-1-23
|
||||
# Build on latest Ubuntu using golang version 1.23
|
||||
# Build on AlmaLinux 8 using go-toolset
|
||||
#
|
||||
build-1-23:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
# Checkout git repository and submodules
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Build MetricCollector
|
||||
run: make
|
||||
|
||||
- name: Run MetricCollector once
|
||||
run: ./cc-metric-collector --once --config .github/ci-config.json
|
||||
|
||||
#
|
||||
# Build on AlmaLinux 8
|
||||
#
|
||||
AlmaLinux8-RPM-build:
|
||||
AlmaLinux-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://hub.docker.com/_/almalinux
|
||||
container: almalinux:8
|
||||
@@ -114,18 +88,14 @@ jobs:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
||||
dnf --assumeyes install \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.21.7-1.module_el8+960+4060efbe.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
@@ -133,60 +103,13 @@ jobs:
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make RPM
|
||||
|
||||
#
|
||||
# Build on AlmaLinux 9
|
||||
#
|
||||
AlmaLinux9-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://hub.docker.com/_/almalinux
|
||||
container: almalinux:9
|
||||
# The job outputs link to the outputs of the 'rpmrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
||||
dnf --assumeyes install wget openssl-devel diffutils delve which
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make RPM
|
||||
|
||||
|
||||
#
|
||||
# Build on UBI 8 using go-toolset
|
||||
#
|
||||
UBI-8-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
||||
container: redhat/ubi8
|
||||
container: registry.access.redhat.com/ubi8/ubi:8.8-1032.1692772289
|
||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||
steps:
|
||||
|
||||
@@ -203,61 +126,14 @@ jobs:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
run: |
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make RPM
|
||||
|
||||
#
|
||||
# Build on UBI 9 using go-toolset
|
||||
#
|
||||
UBI-9-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
||||
container: redhat/ubi9
|
||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
# - name: Setup Golang
|
||||
# uses: actions/setup-go@v5
|
||||
# with:
|
||||
# go-version: 'stable'
|
||||
- name: Setup Golang
|
||||
run: |
|
||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.21.7-1.module_el8+960+4060efbe.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.21.7-1.module_el8+960+4060efbe.x86_64.rpm
|
||||
|
||||
- name: RPM build MetricCollector
|
||||
id: rpmbuild
|
||||
@@ -287,48 +163,15 @@ jobs:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
# Use official golang package
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: Install Golang
|
||||
run: |
|
||||
wget -q https://go.dev/dl/go1.21.1.linux-amd64.tar.gz --output-document=- | \
|
||||
tar --directory=/usr/local --extract --gzip
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
go version
|
||||
- name: DEB build MetricCollector
|
||||
id: dpkg-build
|
||||
run: |
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
||||
make DEB
|
||||
|
||||
#
|
||||
# Build on Ubuntu 24.04 using official go package
|
||||
#
|
||||
Ubuntu-noblenumbat-build:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:24.04
|
||||
|
||||
steps:
|
||||
# Use apt to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
apt update && apt --assume-yes upgrade
|
||||
apt --assume-yes install build-essential sed git wget bash
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
# Use official golang package
|
||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: DEB build MetricCollector
|
||||
id: dpkg-build
|
||||
run: |
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
make DEB
|
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ type RuntimeConfig struct {
|
||||
ReceiveManager receivers.ReceiveManager
|
||||
MultiChanTicker mct.MultiChanTicker
|
||||
|
||||
Channels []chan lp.CCMessage
|
||||
Channels []chan lp.CCMetric
|
||||
Sync sync.WaitGroup
|
||||
}
|
||||
|
||||
@@ -242,7 +242,7 @@ func mainFunc() int {
|
||||
}
|
||||
|
||||
// Connect metric router to sink manager
|
||||
RouterToSinksChannel := make(chan lp.CCMessage, 200)
|
||||
RouterToSinksChannel := make(chan lp.CCMetric, 200)
|
||||
rcfg.SinkManager.AddInput(RouterToSinksChannel)
|
||||
rcfg.MetricRouter.AddOutput(RouterToSinksChannel)
|
||||
|
||||
@@ -254,7 +254,7 @@ func mainFunc() int {
|
||||
}
|
||||
|
||||
// Connect collector manager to metric router
|
||||
CollectToRouterChannel := make(chan lp.CCMessage, 200)
|
||||
CollectToRouterChannel := make(chan lp.CCMetric, 200)
|
||||
rcfg.CollectManager.AddOutput(CollectToRouterChannel)
|
||||
rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel)
|
||||
|
||||
@@ -267,7 +267,7 @@ func mainFunc() int {
|
||||
}
|
||||
|
||||
// Connect receive manager to metric router
|
||||
ReceiveToRouterChannel := make(chan lp.CCMessage, 200)
|
||||
ReceiveToRouterChannel := make(chan lp.CCMetric, 200)
|
||||
rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel)
|
||||
rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel)
|
||||
use_recv = true
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# LIKWID version
|
||||
LIKWID_VERSION := 5.4.1
|
||||
LIKWID_VERSION := 5.2.2
|
||||
LIKWID_INSTALLED_FOLDER := $(shell dirname $$(which likwid-topology 2>/dev/null) 2>/dev/null)
|
||||
|
||||
LIKWID_FOLDER := $(CURDIR)/likwid
|
||||
@@ -23,7 +23,7 @@ likwid:
|
||||
mkdir --parents --verbose "$${BUILD_FOLDER}"
|
||||
wget --output-document=- http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz |
|
||||
tar --directory="$${BUILD_FOLDER}" --extract --gz
|
||||
install -D --verbose --preserve-timestamps --mode=0644 --target-directory="$(LIKWID_FOLDER)" "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/likwid*.h
|
||||
install -D --verbose --preserve-timestamps --mode=0644 --target-directory="$(LIKWID_FOLDER)" "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/likwid*.h "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/bstrlib.h
|
||||
rm --recursive "$${BUILD_FOLDER}"
|
||||
fi
|
||||
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
|
||||
@@ -110,7 +110,7 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMess
|
||||
|
||||
for key, data := range m.matches {
|
||||
value, _ := strconv.ParseFloat(data, 32)
|
||||
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// Struct for the collector-specific JSON config
|
||||
@@ -103,7 +103,7 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCM
|
||||
|
||||
for key, data := range m.matches {
|
||||
value, _ := strconv.ParseFloat(data, 32)
|
||||
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ var AvailableCollectors = map[string]MetricCollector{
|
||||
type collectorManager struct {
|
||||
collectors []MetricCollector // List of metric collectors to read in parallel
|
||||
serial []MetricCollector // List of metric collectors to read serially
|
||||
output chan lp.CCMessage // Output channels
|
||||
output chan lp.CCMetric // Output channels
|
||||
done chan bool // channel to finish / stop metric collector manager
|
||||
ticker mct.MultiChanTicker // periodically ticking once each interval
|
||||
duration time.Duration // duration (for metrics that measure over a given duration)
|
||||
@@ -60,7 +60,7 @@ type collectorManager struct {
|
||||
// Metric collector manager access functions
|
||||
type CollectorManager interface {
|
||||
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
|
||||
AddOutput(output chan lp.CCMessage)
|
||||
AddOutput(output chan lp.CCMetric)
|
||||
Start()
|
||||
Close()
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (cm *collectorManager) Start() {
|
||||
}
|
||||
|
||||
// AddOutput adds the output channel to the metric collector manager
|
||||
func (cm *collectorManager) AddOutput(output chan lp.CCMessage) {
|
||||
func (cm *collectorManager) AddOutput(output chan lp.CCMetric) {
|
||||
cm.output = output
|
||||
}
|
||||
|
||||
|
@@ -10,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// CPUFreqCollector
|
||||
@@ -112,14 +112,14 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
||||
|
||||
// Check if at least one CPU with frequency information was detected
|
||||
if len(m.topology) == 0 {
|
||||
return fmt.Errorf("no CPU frequency info found in %s", cpuInfoFile)
|
||||
return fmt.Errorf("No CPU frequency info found in %s", cpuInfoFile)
|
||||
}
|
||||
|
||||
m.init = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Check if already initialized
|
||||
if !m.init {
|
||||
return
|
||||
@@ -154,7 +154,7 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
|
||||
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
|
||||
return
|
||||
}
|
||||
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
|
||||
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -91,7 +91,7 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Check if already initialized
|
||||
if !m.init {
|
||||
return
|
||||
@@ -117,7 +117,7 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
continue
|
||||
}
|
||||
|
||||
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
|
||||
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
|
@@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
sysconf "github.com/tklauser/go-sysconf"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
|
||||
m.name = "CpustatCollector"
|
||||
m.setup()
|
||||
m.parallel = true
|
||||
m.meta = map[string]string{"source": m.name, "group": "CPU"}
|
||||
m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
|
||||
m.nodetags = map[string]string{"type": "node"}
|
||||
if len(config) > 0 {
|
||||
err := json.Unmarshal(config, &m.config)
|
||||
@@ -105,7 +105,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
|
||||
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
|
||||
values := make(map[string]float64)
|
||||
clktck, _ := sysconf.Sysconf(sysconf.SC_CLK_TCK)
|
||||
for match, index := range m.matches {
|
||||
@@ -122,23 +122,21 @@ func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]st
|
||||
sum := float64(0)
|
||||
for name, value := range values {
|
||||
sum += value
|
||||
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
|
||||
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
|
||||
if err == nil {
|
||||
y.AddTag("unit", "Percent")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if v, ok := values["cpu_idle"]; ok {
|
||||
sum -= v
|
||||
y, err := lp.NewMessage("cpu_used", tags, m.meta, map[string]interface{}{"value": sum * 100}, now)
|
||||
y, err := lp.New("cpu_used", tags, m.meta, map[string]interface{}{"value": sum * 100}, now)
|
||||
if err == nil {
|
||||
y.AddTag("unit", "Percent")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -164,7 +162,7 @@ func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
}
|
||||
}
|
||||
|
||||
num_cpus_metric, err := lp.NewMessage("num_cpus",
|
||||
num_cpus_metric, err := lp.New("num_cpus",
|
||||
m.nodetags,
|
||||
m.meta,
|
||||
map[string]interface{}{"value": int(num_cpus)},
|
||||
|
@@ -13,15 +13,14 @@ The `cpustat` collector reads data from `/proc/stat` and outputs a handful **nod
|
||||
|
||||
Metrics:
|
||||
|
||||
* `cpu_user` with `unit=Percent`
|
||||
* `cpu_nice` with `unit=Percent`
|
||||
* `cpu_system` with `unit=Percent`
|
||||
* `cpu_idle` with `unit=Percent`
|
||||
* `cpu_iowait` with `unit=Percent`
|
||||
* `cpu_irq` with `unit=Percent`
|
||||
* `cpu_softirq` with `unit=Percent`
|
||||
* `cpu_steal` with `unit=Percent`
|
||||
* `cpu_guest` with `unit=Percent`
|
||||
* `cpu_guest_nice` with `unit=Percent`
|
||||
* `cpu_used` = `cpu_* - cpu_idle` with `unit=Percent`
|
||||
* `num_cpus`
|
||||
* `cpu_user`
|
||||
* `cpu_nice`
|
||||
* `cpu_system`
|
||||
* `cpu_idle`
|
||||
* `cpu_iowait`
|
||||
* `cpu_irq`
|
||||
* `cpu_softirq`
|
||||
* `cpu_steal`
|
||||
* `cpu_guest`
|
||||
* `cpu_guest_nice`
|
||||
* `cpu_used` = `cpu_* - cpu_idle`
|
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influx "github.com/influxdata/line-protocol"
|
||||
)
|
||||
|
||||
@@ -75,7 +75,7 @@ var DefaultTime = func() time.Time {
|
||||
return time.Unix(42, 0)
|
||||
}
|
||||
|
||||
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
@@ -8,21 +8,23 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// "log"
|
||||
|
||||
const MOUNTFILE = `/proc/self/mounts`
|
||||
|
||||
type DiskstatCollectorConfig struct {
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
ExcludeMounts []string `json:"exclude_mounts,omitempty"`
|
||||
}
|
||||
|
||||
type DiskstatCollector struct {
|
||||
metricCollector
|
||||
config DiskstatCollectorConfig
|
||||
allowedMetrics map[string]bool
|
||||
//matches map[string]int
|
||||
config IOstatCollectorConfig
|
||||
//devices map[string]IOstatCollectorEntry
|
||||
}
|
||||
|
||||
func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
||||
@@ -31,21 +33,12 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
||||
m.meta = map[string]string{"source": m.name, "group": "Disk"}
|
||||
m.setup()
|
||||
if len(config) > 0 {
|
||||
if err := json.Unmarshal(config, &m.config); err != nil {
|
||||
err := json.Unmarshal(config, &m.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m.allowedMetrics = map[string]bool{
|
||||
"disk_total": true,
|
||||
"disk_free": true,
|
||||
"part_max_used": true,
|
||||
}
|
||||
for _, excl := range m.config.ExcludeMetrics {
|
||||
if _, ok := m.allowedMetrics[excl]; ok {
|
||||
m.allowedMetrics[excl] = false
|
||||
}
|
||||
}
|
||||
file, err := os.Open(MOUNTFILE)
|
||||
file, err := os.Open(string(MOUNTFILE))
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
return err
|
||||
@@ -55,12 +48,12 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
||||
file, err := os.Open(MOUNTFILE)
|
||||
file, err := os.Open(string(MOUNTFILE))
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
return
|
||||
@@ -69,7 +62,6 @@ func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessag
|
||||
|
||||
part_max_used := uint64(0)
|
||||
scanner := bufio.NewScanner(file)
|
||||
mountLoop:
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if len(line) == 0 {
|
||||
@@ -85,17 +77,13 @@ mountLoop:
|
||||
if strings.Contains(linefields[1], "boot") {
|
||||
continue
|
||||
}
|
||||
|
||||
mountPath := strings.Replace(linefields[1], `\040`, " ", -1)
|
||||
|
||||
for _, excl := range m.config.ExcludeMounts {
|
||||
if strings.Contains(mountPath, excl) {
|
||||
continue mountLoop
|
||||
}
|
||||
path := strings.Replace(linefields[1], `\040`, " ", -1)
|
||||
stat := syscall.Statfs_t{
|
||||
Blocks: 0,
|
||||
Bsize: 0,
|
||||
Bfree: 0,
|
||||
}
|
||||
|
||||
stat := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(mountPath, &stat)
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -104,20 +92,16 @@ mountLoop:
|
||||
}
|
||||
tags := map[string]string{"type": "node", "device": linefields[0]}
|
||||
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
|
||||
if m.allowedMetrics["disk_total"] {
|
||||
y, err := lp.NewMessage("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "GBytes")
|
||||
output <- y
|
||||
}
|
||||
y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "GBytes")
|
||||
output <- y
|
||||
}
|
||||
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
|
||||
if m.allowedMetrics["disk_free"] {
|
||||
y, err := lp.NewMessage("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "GBytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "GBytes")
|
||||
output <- y
|
||||
}
|
||||
if total > 0 {
|
||||
perc := (100 * (total - free)) / total
|
||||
@@ -126,12 +110,10 @@ mountLoop:
|
||||
}
|
||||
}
|
||||
}
|
||||
if m.allowedMetrics["part_max_used"] {
|
||||
y, err := lp.NewMessage("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "percent")
|
||||
output <- y
|
||||
}
|
||||
y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "percent")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,13 +6,10 @@
|
||||
"exclude_metrics": [
|
||||
"disk_total"
|
||||
],
|
||||
"exclude_mounts": [
|
||||
"slurm-tmpfs"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. Additionally, any mount point containing one of the strings specified in `exclude_mounts` will be skipped during metric collection.
|
||||
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
||||
|
||||
Metrics per device (with `device` tag):
|
||||
* `disk_total` (unit `GBytes`)
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const DEFAULT_GPFS_CMD = "mmpmon"
|
||||
@@ -94,7 +94,7 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Check if already initialized
|
||||
if !m.init {
|
||||
return
|
||||
@@ -218,7 +218,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
continue
|
||||
}
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"gpfs_bytes_read",
|
||||
m.tags,
|
||||
m.meta,
|
||||
@@ -234,7 +234,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
|
||||
bwRead := float64(bytesRead-lastBytesRead) / timeDiff
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"gpfs_bw_read",
|
||||
m.tags,
|
||||
m.meta,
|
||||
@@ -258,7 +258,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
continue
|
||||
}
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"gpfs_bytes_written",
|
||||
m.tags,
|
||||
m.meta,
|
||||
@@ -274,7 +274,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
|
||||
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"gpfs_bw_write",
|
||||
m.tags,
|
||||
m.meta,
|
||||
@@ -304,7 +304,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -316,7 +316,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -328,7 +328,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -364,7 +364,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
|
||||
continue
|
||||
}
|
||||
if y, err := lp.NewMessage("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
|
||||
if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
@@ -372,7 +372,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
if m.config.SendTotalValues {
|
||||
bytesTotal := bytesRead + bytesWritten
|
||||
if y, err :=
|
||||
lp.NewMessage("gpfs_bytes_total",
|
||||
lp.New("gpfs_bytes_total",
|
||||
m.tags,
|
||||
m.meta,
|
||||
map[string]interface{}{
|
||||
@@ -385,7 +385,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
}
|
||||
iops := numReads + numWrites
|
||||
if y, err :=
|
||||
lp.NewMessage("gpfs_iops",
|
||||
lp.New("gpfs_iops",
|
||||
m.tags,
|
||||
m.meta,
|
||||
map[string]interface{}{
|
||||
@@ -397,7 +397,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
}
|
||||
metaops := numInodeUpdates + numCloses + numOpens + numReaddirs
|
||||
if y, err :=
|
||||
lp.NewMessage("gpfs_metaops",
|
||||
lp.New("gpfs_metaops",
|
||||
m.tags,
|
||||
m.meta,
|
||||
map[string]interface{}{
|
||||
|
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"encoding/json"
|
||||
@@ -182,7 +182,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
|
||||
}
|
||||
|
||||
// Read reads Infiniband counter files below IB_BASEPATH
|
||||
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
|
||||
// Check if already initialized
|
||||
if !m.init {
|
||||
@@ -230,7 +230,7 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
||||
// Send absolut values
|
||||
if m.config.SendAbsoluteValues {
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
counterDef.name,
|
||||
info.tagSet,
|
||||
m.meta,
|
||||
@@ -248,7 +248,7 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
||||
if counterDef.lastState >= 0 {
|
||||
rate := float64((counterDef.currentState - counterDef.lastState)) / timeDiff
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
counterDef.name+"_bw",
|
||||
info.tagSet,
|
||||
m.meta,
|
||||
@@ -278,7 +278,7 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
||||
// Send total values
|
||||
if m.config.SendTotalValues {
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"ib_total",
|
||||
info.tagSet,
|
||||
m.meta,
|
||||
@@ -291,7 +291,7 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
||||
}
|
||||
|
||||
if y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
"ib_total_pkts",
|
||||
info.tagSet,
|
||||
m.meta,
|
||||
|
@@ -2,24 +2,24 @@ package collectors
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
|
||||
// "log"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
)
|
||||
|
||||
// Konstante für den Pfad zu /proc/diskstats
|
||||
const IOSTATFILE = `/proc/diskstats`
|
||||
const IOSTAT_SYSFSPATH = `/sys/block`
|
||||
|
||||
type IOstatCollectorConfig struct {
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
// Neues Feld zum Ausschließen von Devices per JSON-Konfiguration
|
||||
ExcludeDevices []string `json:"exclude_devices,omitempty"`
|
||||
}
|
||||
|
||||
type IOstatCollectorEntry struct {
|
||||
@@ -76,7 +76,7 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
||||
if len(m.matches) == 0 {
|
||||
return errors.New("no metrics to collect")
|
||||
}
|
||||
file, err := os.Open(IOSTATFILE)
|
||||
file, err := os.Open(string(IOSTATFILE))
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
return err
|
||||
@@ -87,24 +87,17 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
linefields := strings.Fields(line)
|
||||
if len(linefields) < 3 {
|
||||
continue
|
||||
}
|
||||
device := linefields[2]
|
||||
|
||||
if strings.Contains(device, "loop") {
|
||||
continue
|
||||
}
|
||||
if _, skip := stringArrayContains(m.config.ExcludeDevices, device); skip {
|
||||
continue
|
||||
}
|
||||
values := make(map[string]int64)
|
||||
for m := range m.matches {
|
||||
values[m] = 0
|
||||
}
|
||||
m.devices[device] = IOstatCollectorEntry{
|
||||
tags: map[string]string{
|
||||
"device": device,
|
||||
"device": linefields[2],
|
||||
"type": "node",
|
||||
},
|
||||
lastValues: values,
|
||||
@@ -114,12 +107,12 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
||||
file, err := os.Open(IOSTATFILE)
|
||||
file, err := os.Open(string(IOSTATFILE))
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
return
|
||||
@@ -133,16 +126,10 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
||||
continue
|
||||
}
|
||||
linefields := strings.Fields(line)
|
||||
if len(linefields) < 3 {
|
||||
continue
|
||||
}
|
||||
device := linefields[2]
|
||||
if strings.Contains(device, "loop") {
|
||||
continue
|
||||
}
|
||||
if _, skip := stringArrayContains(m.config.ExcludeDevices, device); skip {
|
||||
continue
|
||||
}
|
||||
if _, ok := m.devices[device]; !ok {
|
||||
continue
|
||||
}
|
||||
@@ -152,7 +139,7 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
||||
x, err := strconv.ParseInt(linefields[idx], 0, 64)
|
||||
if err == nil {
|
||||
diff := x - entry.lastValues[name]
|
||||
y, err := lp.NewMessage(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
|
||||
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -4,17 +4,12 @@
|
||||
```json
|
||||
"iostat": {
|
||||
"exclude_metrics": [
|
||||
"io_read_ms"
|
||||
"read_ms"
|
||||
],
|
||||
"exclude_devices": [
|
||||
"nvme0n1p1",
|
||||
"nvme0n1p2",
|
||||
"md127"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric or device is not required, it can be excluded from forwarding it to the sink.
|
||||
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
||||
|
||||
Metrics:
|
||||
* `io_reads`
|
||||
|
@@ -12,9 +12,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const IPMISENSORS_PATH = `ipmi-sensors`
|
||||
@@ -55,35 +54,20 @@ func (m *IpmiCollector) Init(config json.RawMessage) error {
|
||||
// Check if executables ipmitool or ipmisensors are found
|
||||
p, err := exec.LookPath(m.config.IpmitoolPath)
|
||||
if err == nil {
|
||||
command := exec.Command(p)
|
||||
err := command.Run()
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
|
||||
m.ipmitool = ""
|
||||
} else {
|
||||
m.ipmitool = p
|
||||
}
|
||||
m.ipmitool = p
|
||||
}
|
||||
p, err = exec.LookPath(m.config.IpmisensorsPath)
|
||||
if err == nil {
|
||||
command := exec.Command(p)
|
||||
err := command.Run()
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
|
||||
m.ipmisensors = ""
|
||||
} else {
|
||||
m.ipmisensors = p
|
||||
}
|
||||
m.ipmisensors = p
|
||||
}
|
||||
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
|
||||
return errors.New("no usable IPMI reader found")
|
||||
return errors.New("no IPMI reader found")
|
||||
}
|
||||
|
||||
m.init = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
|
||||
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMetric) {
|
||||
|
||||
// Setup ipmitool command
|
||||
command := exec.Command(cmd, "sensor")
|
||||
@@ -121,7 +105,7 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
|
||||
unit = "Watts"
|
||||
}
|
||||
|
||||
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", unit)
|
||||
output <- y
|
||||
@@ -135,13 +119,13 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
|
||||
cclog.ComponentError(
|
||||
m.name,
|
||||
fmt.Sprintf("readIpmiTool(): Failed to wait for the end of command \"%s\": %v\n", command.String(), err),
|
||||
fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", string(errMsg)),
|
||||
)
|
||||
cclog.ComponentError(m.name, fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", strings.TrimSpace(string(errMsg))))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
||||
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMetric) {
|
||||
|
||||
command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate")
|
||||
command.Wait()
|
||||
@@ -159,7 +143,7 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
||||
v, err := strconv.ParseFloat(lv[3], 64)
|
||||
if err == nil {
|
||||
name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1))
|
||||
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||
if err == nil {
|
||||
if len(lv) > 4 {
|
||||
y.AddMeta("unit", lv[4])
|
||||
@@ -171,7 +155,7 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
|
||||
// Check if already initialized
|
||||
if !m.init {
|
||||
|
@@ -24,9 +24,9 @@ import (
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||
"github.com/NVIDIA/go-nvml/pkg/dl"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
type LikwidCollectorMetricConfig struct {
|
||||
Name string `json:"name"` // Name of the metric
|
||||
Calc string `json:"calc"` // Calculation for the metric using
|
||||
Type string `json:"type"` // Metric type (aka node, socket, hwthread, ...)
|
||||
Type string `json:"type"` // Metric type (aka node, socket, cpu, ...)
|
||||
Publish bool `json:"publish"`
|
||||
SendCoreTotalVal bool `json:"send_core_total_values,omitempty"`
|
||||
SendSocketTotalVal bool `json:"send_socket_total_values,omitempty"`
|
||||
@@ -91,8 +91,6 @@ type LikwidCollector struct {
|
||||
running bool
|
||||
initialized bool
|
||||
needs_reinit bool
|
||||
myuid int
|
||||
lock_err_once bool
|
||||
likwidGroups map[C.int]LikwidEventsetConfig
|
||||
lock sync.Mutex
|
||||
measureThread thread.Thread
|
||||
@@ -206,7 +204,6 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
||||
m.initialized = false
|
||||
m.needs_reinit = true
|
||||
m.running = false
|
||||
m.myuid = os.Getuid()
|
||||
m.config.AccessMode = LIKWID_DEF_ACCESSMODE
|
||||
m.config.LibraryPath = LIKWID_LIB_NAME
|
||||
m.config.LockfilePath = LIKWID_DEF_LOCKFILE
|
||||
@@ -377,41 +374,27 @@ func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig,
|
||||
}
|
||||
defer watcher.Close()
|
||||
if len(m.config.LockfilePath) > 0 {
|
||||
// Check if the lock file exists
|
||||
info, err := os.Stat(m.config.LockfilePath)
|
||||
if os.IsNotExist(err) {
|
||||
// Create the lock file if it does not exist
|
||||
file, createErr := os.Create(m.config.LockfilePath)
|
||||
if createErr != nil {
|
||||
return true, fmt.Errorf("failed to create lock file: %v", createErr)
|
||||
if _, err := os.Stat(m.config.LockfilePath); os.IsNotExist(err) {
|
||||
file, err := os.Create(m.config.LockfilePath)
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, "Cannot create lockfile", m.config.LockfilePath, ":", err.Error())
|
||||
return true, err
|
||||
}
|
||||
file.Close()
|
||||
info, err = os.Stat(m.config.LockfilePath) // Recheck the file after creation
|
||||
}
|
||||
info, err := os.Stat(m.config.LockfilePath)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
// Check file ownership
|
||||
uid := info.Sys().(*syscall.Stat_t).Uid
|
||||
if uid != uint32(m.myuid) {
|
||||
if uid != uint32(os.Getuid()) {
|
||||
usr, err := user.LookupId(fmt.Sprint(uid))
|
||||
if err == nil {
|
||||
err = fmt.Errorf("access to performance counters locked by %s", usr.Username)
|
||||
return true, fmt.Errorf("access to performance counters locked by %s", usr.Username)
|
||||
} else {
|
||||
err = fmt.Errorf("access to performance counters locked by %d", uid)
|
||||
return true, fmt.Errorf("access to performance counters locked by %d", uid)
|
||||
}
|
||||
// delete error if we already returned the error once.
|
||||
if !m.lock_err_once {
|
||||
m.lock_err_once = true
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// reset lock_err_once
|
||||
m.lock_err_once = false
|
||||
|
||||
// Add the lock file to the watcher
|
||||
err = watcher.Add(m.config.LockfilePath)
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
@@ -449,7 +432,9 @@ func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig,
|
||||
gid = C.perfmon_addEventSet(evset.estr)
|
||||
}
|
||||
if gid < 0 {
|
||||
return true, fmt.Errorf("failed to add events %s, id %d, error %d", evset.go_estr, evidx, gid)
|
||||
return true, fmt.Errorf("failed to add events %s, error %d", evset.go_estr, gid)
|
||||
} else {
|
||||
evset.gid = gid
|
||||
}
|
||||
|
||||
// Setup all performance monitoring counters of an eventSet
|
||||
@@ -560,12 +545,11 @@ func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig,
|
||||
}
|
||||
|
||||
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
|
||||
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
|
||||
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
|
||||
invClock := float64(1.0 / m.basefreq)
|
||||
|
||||
for _, tid := range m.cpu2tid {
|
||||
evset.results[tid]["inverseClock"] = invClock
|
||||
evset.results[tid]["gotime"] = interval.Seconds()
|
||||
}
|
||||
|
||||
// Go over the event set metrics, derive the value out of the event:counter values and send it
|
||||
@@ -594,7 +578,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
||||
if !math.IsNaN(value) && metric.Publish {
|
||||
fields := map[string]interface{}{"value": value}
|
||||
y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
metric.Name,
|
||||
map[string]string{
|
||||
"type": metric.Type,
|
||||
@@ -631,7 +615,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
||||
|
||||
for coreID, value := range totalCoreValues {
|
||||
y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
metric.Name,
|
||||
map[string]string{
|
||||
"type": "core",
|
||||
@@ -668,7 +652,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
||||
|
||||
for socketID, value := range totalSocketValues {
|
||||
y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
metric.Name,
|
||||
map[string]string{
|
||||
"type": "socket",
|
||||
@@ -703,7 +687,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
||||
}
|
||||
|
||||
y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
metric.Name,
|
||||
map[string]string{
|
||||
"type": "node",
|
||||
@@ -728,7 +712,7 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
||||
}
|
||||
|
||||
// Go over the global metrics, derive the value out of the event sets' metric values and send it
|
||||
func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
|
||||
func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
|
||||
// Send all metrics with same time stamp
|
||||
// This function does only computiation, counter measurement is done before
|
||||
now := time.Now()
|
||||
@@ -749,7 +733,6 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
||||
params[mname] = mres
|
||||
}
|
||||
}
|
||||
params["gotime"] = interval.Seconds()
|
||||
// Evaluate the metric
|
||||
value, err := agg.EvalFloat64Condition(metric.Calc, params)
|
||||
if err != nil {
|
||||
@@ -763,7 +746,7 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
||||
if !math.IsNaN(value) {
|
||||
if metric.Publish {
|
||||
y, err :=
|
||||
lp.NewMessage(
|
||||
lp.New(
|
||||
metric.Name,
|
||||
map[string]string{
|
||||
"type": metric.Type,
|
||||
@@ -791,7 +774,7 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMetric) {
|
||||
var err error = nil
|
||||
groups := make([]LikwidEventsetConfig, 0)
|
||||
|
||||
@@ -811,17 +794,15 @@ func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMe
|
||||
if !skip {
|
||||
// read measurements and derive event set metrics
|
||||
m.calcEventsetMetrics(e, interval, output)
|
||||
groups = append(groups, e)
|
||||
}
|
||||
groups = append(groups, e)
|
||||
}
|
||||
if len(groups) > 0 {
|
||||
// calculate global metrics
|
||||
m.calcGlobalMetrics(groups, interval, output)
|
||||
}
|
||||
// calculate global metrics
|
||||
m.calcGlobalMetrics(groups, interval, output)
|
||||
}
|
||||
|
||||
// main read function taking multiple measurement rounds, each 'interval' seconds long
|
||||
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
@@ -15,7 +15,7 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
|
||||
{
|
||||
"events" : {
|
||||
"COUNTER0": "EVENT0",
|
||||
"COUNTER1": "EVENT1"
|
||||
"COUNTER1": "EVENT1",
|
||||
},
|
||||
"metrics" : [
|
||||
{
|
||||
@@ -27,7 +27,7 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
]
|
||||
"globalmetrics" : [
|
||||
{
|
||||
"name": "global_sum",
|
||||
@@ -132,9 +132,6 @@ In some cases LIKWID returns `0.0` for some events that are further used in proc
|
||||
|
||||
One might think this does not happen often but often used metrics in the world of performance engineering like Instructions-per-Cycle (IPC) or more frequently the actual CPU clock are derived with events like `CPU_CLK_UNHALTED_CORE` (Intel) which do not increment in halted state (as the name implies). In there are different power management systems in a chip which can cause a hardware thread to go in such a state. Moreover, if no cycles are executed by the core, also many other events are not incremented as well (like `INSTR_RETIRED_ANY` for retired instructions and part of IPC).
|
||||
|
||||
### `lockfile_path` option
|
||||
LIKWID can be configured with a lock file with which the access to the performance monitoring registers can be disabled (only the owner of the lock file is allowed to access the registers). When the `lockfile_path` option is set, the collector subscribes to changes to this file to stop monitoring if the owner of the lock file changes. This feature is useful when users should be able to perform own hardware performance counter measurements through LIKWID or any other tool.
|
||||
|
||||
### `send_*_total values` option
|
||||
|
||||
- `send_core_total_values`: Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU core.
|
||||
@@ -270,3 +267,45 @@ IPC PMC0/PMC1 -> {
|
||||
```
|
||||
|
||||
The script `scripts/likwid_perfgroup_to_cc_config.py` might help you.
|
||||
|
||||
### Internal structure
|
||||
|
||||
This section describes the internal structure of the `likwid` collector.
|
||||
|
||||
#### At initialization
|
||||
|
||||
After setting the defaults, the configuration is read.
|
||||
|
||||
Based on the configuration, the library is searched using `dlopen` to see whether it makes sense to proceed.
|
||||
|
||||
Next, the user-given metrics are tested to ensure they can be evaluated. For this, it creates a list of all user-given events/counters with the value `1.0` which is provided to the metric evaluator. The same is done for the global metrics by using the metric names with value `1.0`. If the evaluator does not fail, the metric can be evaluated and the collector initialization can proceed.
|
||||
|
||||
A separate thread is started to do the measurement. This is not done using a common goroutine but a real application thread with full control. This is required because LIKWID's access system tracks the processes of the using application and the PID should not change between measurements because that would require teardown and reopening of the access system.
|
||||
|
||||
With the separate thread, the access system is initialized by setting the user-given access mode and adding all hardware threads.
|
||||
|
||||
LIKWID measures per hardware thread in general but only some HW threads read the counters available only e.g. per CPU socket (often memory traffic). For this, the collector gets the system topology through LIKWID and creates different mappings like 'hwthread to list offset' and others. With this, the hardware threads responsible for a topological entity can be determined because those read the counters of the per CPU socket units. These mappings are later used in the measurement phase.
|
||||
|
||||
In the end, we read the base CPU frequency of the system. It may be used in the metric evaluation.
|
||||
|
||||
#### Measurements
|
||||
|
||||
The reading of events is done by the separate application thread.
|
||||
|
||||
It traverses over all configured event sets, creates valid LIKWID eventstrings out of them and pass them to take a measurement. This could be done only once but when the LIKWID lock changes, LIKWID has to be completely reopened to provide access again. With this reopening, the already added event sets are gone.
|
||||
|
||||
LIKWID has it's own locking mechanism using a lock file. But not the content of the file is of interest but the owner. In order to track changes of the file, a `fsnotify` watcher is installed on the file. If the file does not exist, it is created and consequently is owned by the same user as `cc-metric-collector`. The LikwidCollector has to watch the file on it's own because LIKWID does not provide proper error handling for this.
|
||||
|
||||
Each call to the LIKWID library for loading the event set, setting up the counting facilities as well as starting and stopping of the counters is wrapped into lockfile checks to ensure no state change happens. If the file owner changed, the LikwidCollector cannot access the counters anymore, so no further operation can be done and measurment stops.
|
||||
|
||||
Although start/stop would be sufficient, the LikwidCollector performs start, read, wait, read, `getLastResult`, stop. Reason might be "historic" but is not 100% clear anymore. The author failed to document ;)
|
||||
|
||||
#### Metric evaluation
|
||||
|
||||
After each meaurement, the metrics of the event set are directly evaluated. It updates the counter->result mapping with the new measurements, calls the evaluator and generates the `CCMetric` with the user-given settings if it should be published. Each metric name to result calculation is stored for the global metric evaluation, which is done as a final step.
|
||||
|
||||
#### Shutdown
|
||||
|
||||
Since each measurment involves a complete initialize to finalize cycle of the LIKWID library, only the topology module needs to be closed.
|
||||
|
||||
Moreover, the separate application thread is stopped.
|
@@ -8,16 +8,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
//
|
||||
// LoadavgCollector collects:
|
||||
// * load average of last 1, 5 & 15 minutes
|
||||
// * number of processes currently runnable
|
||||
// * total number of processes in system
|
||||
//
|
||||
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
|
||||
//
|
||||
const LOADAVGFILE = "/proc/loadavg"
|
||||
|
||||
type LoadavgCollector struct {
|
||||
@@ -66,15 +68,17 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
buffer, err := os.ReadFile(LOADAVGFILE)
|
||||
if err != nil {
|
||||
cclog.ComponentError(
|
||||
m.name,
|
||||
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
|
||||
if err != nil {
|
||||
cclog.ComponentError(
|
||||
m.name,
|
||||
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
@@ -92,7 +96,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
if m.load_skips[i] {
|
||||
continue
|
||||
}
|
||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -111,7 +115,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
if m.proc_skips[i] {
|
||||
continue
|
||||
}
|
||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const LUSTRE_SYSFS = `/sys/fs/lustre`
|
||||
@@ -377,7 +377,7 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -388,7 +388,7 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
||||
for _, def := range m.definitions {
|
||||
var use_x int64
|
||||
var err error
|
||||
var y lp.CCMessage
|
||||
var y lp.CCMetric
|
||||
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
|
||||
if err == nil {
|
||||
use_x = x
|
||||
@@ -399,19 +399,19 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
||||
switch def.calc {
|
||||
case "none":
|
||||
value = use_x
|
||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
case "difference":
|
||||
value = use_x - devData[def.name]
|
||||
if value.(int64) < 0 {
|
||||
value = 0
|
||||
}
|
||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
case "derivative":
|
||||
value = float64(use_x-devData[def.name]) / tdiff.Seconds()
|
||||
if value.(float64) < 0 {
|
||||
value = 0
|
||||
}
|
||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
}
|
||||
if err == nil {
|
||||
y.AddTag("device", device)
|
||||
|
@@ -12,8 +12,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const MEMSTATFILE = "/proc/meminfo"
|
||||
@@ -159,7 +159,7 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
}
|
||||
}
|
||||
|
||||
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||
if err == nil {
|
||||
if len(unit) > 0 {
|
||||
y.AddMeta("unit", unit)
|
||||
@@ -208,7 +208,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
}
|
||||
}
|
||||
}
|
||||
y, err := lp.NewMessage("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
|
||||
y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
|
||||
if err == nil {
|
||||
if len(unit) > 0 {
|
||||
y.AddMeta("unit", unit)
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type MetricCollector interface {
|
||||
@@ -13,7 +13,7 @@ type MetricCollector interface {
|
||||
Init(config json.RawMessage) error // Initialize metric collector
|
||||
Initialized() bool // Is metric collector initialized?
|
||||
Parallel() bool
|
||||
Read(duration time.Duration, output chan lp.CCMessage) // Read metrics from metric collector
|
||||
Read(duration time.Duration, output chan lp.CCMetric) // Read metrics from metric collector
|
||||
Close() // Close / finish metric collector
|
||||
}
|
||||
|
||||
|
@@ -9,17 +9,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const NETSTATFILE = "/proc/net/dev"
|
||||
|
||||
type NetstatCollectorConfig struct {
|
||||
IncludeDevices []string `json:"include_devices"`
|
||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
||||
SendDerivedValues bool `json:"send_derived_values"`
|
||||
InterfaceAliases map[string][]string `json:"interface_aliases,omitempty"`
|
||||
IncludeDevices []string `json:"include_devices"`
|
||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
||||
SendDerivedValues bool `json:"send_derived_values"`
|
||||
}
|
||||
|
||||
type NetstatCollectorMetric struct {
|
||||
@@ -33,26 +32,9 @@ type NetstatCollectorMetric struct {
|
||||
|
||||
type NetstatCollector struct {
|
||||
metricCollector
|
||||
config NetstatCollectorConfig
|
||||
aliasToCanonical map[string]string
|
||||
matches map[string][]NetstatCollectorMetric
|
||||
lastTimestamp time.Time
|
||||
}
|
||||
|
||||
func (m *NetstatCollector) buildAliasMapping() {
|
||||
m.aliasToCanonical = make(map[string]string)
|
||||
for canon, aliases := range m.config.InterfaceAliases {
|
||||
for _, alias := range aliases {
|
||||
m.aliasToCanonical[alias] = canon
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getCanonicalName(raw string, aliasToCanonical map[string]string) string {
|
||||
if canon, ok := aliasToCanonical[raw]; ok {
|
||||
return canon
|
||||
}
|
||||
return raw
|
||||
config NetstatCollectorConfig
|
||||
matches map[string][]NetstatCollectorMetric
|
||||
lastTimestamp time.Time
|
||||
}
|
||||
|
||||
func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||
@@ -95,8 +77,6 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||
}
|
||||
}
|
||||
|
||||
m.buildAliasMapping()
|
||||
|
||||
// Check access to net statistic file
|
||||
file, err := os.Open(NETSTATFILE)
|
||||
if err != nil {
|
||||
@@ -117,20 +97,18 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||
// Split line into fields
|
||||
f := strings.Fields(l)
|
||||
|
||||
// Get raw and canonical names
|
||||
raw := strings.Trim(f[0], ": ")
|
||||
canonical := getCanonicalName(raw, m.aliasToCanonical)
|
||||
// Get net device entry
|
||||
dev := strings.Trim(f[0], ": ")
|
||||
|
||||
// Check if device is a included device
|
||||
if _, ok := stringArrayContains(m.config.IncludeDevices, canonical); ok {
|
||||
// Tag will contain original device name (raw).
|
||||
tags := map[string]string{"stype": "network", "stype-id": raw, "type": "node"}
|
||||
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
|
||||
tags := map[string]string{"stype": "network", "stype-id": dev, "type": "node"}
|
||||
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
|
||||
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
|
||||
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
|
||||
meta_unit_pkts_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "packets/sec"}
|
||||
|
||||
m.matches[canonical] = []NetstatCollectorMetric{
|
||||
m.matches[dev] = []NetstatCollectorMetric{
|
||||
{
|
||||
name: "net_bytes_in",
|
||||
index: fieldReceiveBytes,
|
||||
@@ -165,6 +143,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(m.matches) == 0 {
|
||||
@@ -174,7 +153,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -185,7 +164,7 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
// Save current timestamp
|
||||
m.lastTimestamp = now
|
||||
|
||||
file, err := os.Open(NETSTATFILE)
|
||||
file, err := os.Open(string(NETSTATFILE))
|
||||
if err != nil {
|
||||
cclog.ComponentError(m.name, err.Error())
|
||||
return
|
||||
@@ -204,12 +183,11 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
// Split line into fields
|
||||
f := strings.Fields(l)
|
||||
|
||||
// Get raw and canonical names
|
||||
raw := strings.Trim(f[0], ":")
|
||||
canonical := getCanonicalName(raw, m.aliasToCanonical)
|
||||
// Get net device entry
|
||||
dev := strings.Trim(f[0], ":")
|
||||
|
||||
// Check if device is a included device
|
||||
if devmetrics, ok := m.matches[canonical]; ok {
|
||||
if devmetrics, ok := m.matches[dev]; ok {
|
||||
for i := range devmetrics {
|
||||
metric := &devmetrics[i]
|
||||
|
||||
@@ -219,14 +197,14 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
continue
|
||||
}
|
||||
if m.config.SendAbsoluteValues {
|
||||
if y, err := lp.NewMessage(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
|
||||
if y, err := lp.New(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if m.config.SendDerivedValues {
|
||||
if metric.lastValue >= 0 {
|
||||
rate := float64(v-metric.lastValue) / timeDiff
|
||||
if y, err := lp.NewMessage(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
|
||||
if y, err := lp.New(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
|
@@ -4,19 +4,14 @@
|
||||
```json
|
||||
"netstat": {
|
||||
"include_devices": [
|
||||
"eth0",
|
||||
"eno1"
|
||||
"eth0"
|
||||
],
|
||||
"send_abs_values": true,
|
||||
"send_derived_values": true,
|
||||
"interface_aliases": {
|
||||
"eno1": ["eno1np0", "eno1_alt"],
|
||||
"eth0": ["eth0_alias"]
|
||||
}
|
||||
"send_abs_values" : true,
|
||||
"send_derived_values" : true
|
||||
}
|
||||
```
|
||||
|
||||
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list. Optionally, you can define an interface_aliases mapping. For each canonical device (as listed in include_devices), you may provide an array of aliases that may be reported by the system. When an alias is detected, it is preferred for matching, while the output tag stype-id always shows the actual system-reported name.
|
||||
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list.
|
||||
|
||||
Metrics:
|
||||
* `net_bytes_in` (`unit=bytes`)
|
||||
@@ -28,4 +23,5 @@ Metrics:
|
||||
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
||||
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
||||
|
||||
The device name is added as tag `stype=network,stype-id=<device>`.
|
||||
The device name is added as tag `stype=network,stype-id=<device>`.
|
||||
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// First part contains the code for the general NfsCollector.
|
||||
@@ -118,7 +118,7 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
continue
|
||||
}
|
||||
value := data.current - data.last
|
||||
y, err := lp.NewMessage(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("version", m.version)
|
||||
output <- y
|
||||
|
@@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// These are the fields we read from the JSON configuration
|
||||
@@ -18,20 +18,17 @@ type NfsIOStatCollectorConfig struct {
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
|
||||
UseServerAddressAsSType bool `json:"use_server_as_stype,omitempty"`
|
||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
||||
SendDerivedValues bool `json:"send_derived_values"`
|
||||
}
|
||||
|
||||
// This contains all variables we need during execution and the variables
|
||||
// defined by metricCollector (name, init, ...)
|
||||
type NfsIOStatCollector struct {
|
||||
metricCollector
|
||||
config NfsIOStatCollectorConfig // the configuration structure
|
||||
meta map[string]string // default meta information
|
||||
tags map[string]string // default tags
|
||||
data map[string]map[string]int64 // data storage for difference calculation
|
||||
key string // which device info should be used as subtype ID? 'server' or 'mntpoint'
|
||||
lastTimestamp time.Time
|
||||
config NfsIOStatCollectorConfig // the configuration structure
|
||||
meta map[string]string // default meta information
|
||||
tags map[string]string // default tags
|
||||
data map[string]map[string]int64 // data storage for difference calculation
|
||||
key string // which device info should be used as subtype ID? 'server' or 'mntpoint', see NfsIOStatCollectorConfig.UseServerAddressAsSType
|
||||
}
|
||||
|
||||
var deviceRegex = regexp.MustCompile(`device (?P<server>[^ ]+) mounted on (?P<mntpoint>[^ ]+) with fstype nfs(?P<version>\d*) statvers=[\d\.]+`)
|
||||
@@ -84,6 +81,7 @@ func (m *NfsIOStatCollector) readNfsiostats() map[string]map[string]int64 {
|
||||
data[current[m.key]][name] = val
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
current = nil
|
||||
}
|
||||
@@ -100,9 +98,6 @@ func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
|
||||
m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"}
|
||||
m.tags = map[string]string{"type": "node"}
|
||||
m.config.UseServerAddressAsSType = false
|
||||
// Set default configuration
|
||||
m.config.SendAbsoluteValues = true
|
||||
m.config.SendDerivedValues = false
|
||||
if len(config) > 0 {
|
||||
err = json.Unmarshal(config, &m.config)
|
||||
if err != nil {
|
||||
@@ -115,15 +110,12 @@ func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
|
||||
m.key = "server"
|
||||
}
|
||||
m.data = m.readNfsiostats()
|
||||
m.lastTimestamp = time.Now()
|
||||
m.init = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
now := time.Now()
|
||||
timeDiff := now.Sub(m.lastTimestamp).Seconds()
|
||||
m.lastTimestamp = now
|
||||
func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
timestamp := time.Now()
|
||||
|
||||
// Get the current values for all mountpoints
|
||||
newdata := m.readNfsiostats()
|
||||
@@ -131,30 +123,21 @@ func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessa
|
||||
for mntpoint, values := range newdata {
|
||||
// Was the mount point already present in the last iteration
|
||||
if old, ok := m.data[mntpoint]; ok {
|
||||
for name, newVal := range values {
|
||||
if m.config.SendAbsoluteValues {
|
||||
msg, err := lp.NewMessage(fmt.Sprintf("nfsio_%s", name), m.tags, m.meta, map[string]interface{}{"value": newVal}, now)
|
||||
if err == nil {
|
||||
msg.AddTag("stype", "filesystem")
|
||||
msg.AddTag("stype-id", mntpoint)
|
||||
output <- msg
|
||||
// Calculate the difference of old and new values
|
||||
for i := range values {
|
||||
x := values[i] - old[i]
|
||||
y, err := lp.New(fmt.Sprintf("nfsio_%s", i), m.tags, m.meta, map[string]interface{}{"value": x}, timestamp)
|
||||
if err == nil {
|
||||
if strings.HasPrefix(i, "page") {
|
||||
y.AddMeta("unit", "4K_Pages")
|
||||
}
|
||||
y.AddTag("stype", "filesystem")
|
||||
y.AddTag("stype-id", mntpoint)
|
||||
// Send it to output channel
|
||||
output <- y
|
||||
}
|
||||
if m.config.SendDerivedValues {
|
||||
rate := float64(newVal-old[name]) / timeDiff
|
||||
msg, err := lp.NewMessage(fmt.Sprintf("nfsio_%s_bw", name), m.tags, m.meta, map[string]interface{}{"value": rate}, now)
|
||||
if err == nil {
|
||||
if strings.HasPrefix(name, "page") {
|
||||
msg.AddMeta("unit", "4K_pages/s")
|
||||
} else {
|
||||
msg.AddMeta("unit", "bytes/sec")
|
||||
}
|
||||
msg.AddTag("stype", "filesystem")
|
||||
msg.AddTag("stype-id", mntpoint)
|
||||
output <- msg
|
||||
}
|
||||
}
|
||||
old[name] = newVal
|
||||
// Update old to the new value for the next iteration
|
||||
old[i] = values[i]
|
||||
}
|
||||
} else {
|
||||
// First time we see this mount point, store all values
|
||||
@@ -174,6 +157,7 @@ func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessa
|
||||
m.data[mntpoint] = nil
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *NfsIOStatCollector) Close() {
|
||||
|
@@ -3,18 +3,16 @@
|
||||
```json
|
||||
"nfsiostat": {
|
||||
"exclude_metrics": [
|
||||
"oread", "pageread"
|
||||
"nfsio_oread"
|
||||
],
|
||||
"exclude_filesystems": [
|
||||
"/mnt"
|
||||
"exclude_filesystems" : [
|
||||
"/mnt",
|
||||
],
|
||||
"use_server_as_stype": false,
|
||||
"send_abs_values": false,
|
||||
"send_derived_values": true
|
||||
"use_server_as_stype": false
|
||||
}
|
||||
```
|
||||
|
||||
The `nfsiostat` collector reads data from `/proc/self/mountstats` and outputs a handful **node** metrics for each NFS filesystem. If a metric or filesystem is not required, it can be excluded from forwarding it to the sink. **Note:** When excluding metrics, you must provide the base metric name (e.g. pageread) without the nfsio_ prefix. This exclusion applies to both absolute and derived values.
|
||||
The `nfsiostat` collector reads data from `/proc/self/mountstats` and outputs a handful **node** metrics for each NFS filesystem. If a metric or filesystem is not required, it can be excluded from forwarding it to the sink.
|
||||
|
||||
Metrics:
|
||||
* `nfsio_nread`: Bytes transferred by normal `read()` calls
|
||||
@@ -26,9 +24,4 @@ Metrics:
|
||||
* `nfsio_nfsread`: Bytes transferred for reading from the server
|
||||
* `nfsio_nfswrite`: Pages transferred by writing to the server
|
||||
|
||||
For each of these, if derived values are enabled, an additional metric is sent with the `_bw` suffix, which represents the rate:
|
||||
|
||||
* For normal byte metrics: `unit=bytes/sec`
|
||||
* For page metrics: `unit=4K_pages/s`
|
||||
|
||||
The `nfsiostat` collector adds the mountpoint to the tags as `stype=filesystem,stype-id=<mountpoint>`. If the server address should be used instead of the mountpoint, use the `use_server_as_stype` config setting.
|
||||
The `nfsiostat` collector adds the mountpoint to the tags as `stype=filesystem,stype-id=<mountpoint>`. If the server address should be used instead of the mountpoint, use the `use_server_as_stype` config setting.
|
@@ -10,15 +10,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type NUMAStatsCollectorConfig struct {
|
||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
||||
SendDerivedValues bool `json:"send_derived_values"`
|
||||
}
|
||||
|
||||
// Non-Uniform Memory Access (NUMA) policy hit/miss statistics
|
||||
//
|
||||
// numa_hit:
|
||||
@@ -52,16 +47,13 @@ type NUMAStatsCollectorConfig struct {
|
||||
//
|
||||
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
|
||||
type NUMAStatsCollectorTopolgy struct {
|
||||
file string
|
||||
tagSet map[string]string
|
||||
previousValues map[string]int64
|
||||
file string
|
||||
tagSet map[string]string
|
||||
}
|
||||
|
||||
type NUMAStatsCollector struct {
|
||||
metricCollector
|
||||
topology []NUMAStatsCollectorTopolgy
|
||||
config NUMAStatsCollectorConfig
|
||||
lastTimestamp time.Time
|
||||
topology []NUMAStatsCollectorTopolgy
|
||||
}
|
||||
|
||||
func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
||||
@@ -94,9 +86,8 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
||||
file := filepath.Join(dir, "numastat")
|
||||
m.topology = append(m.topology,
|
||||
NUMAStatsCollectorTopolgy{
|
||||
file: file,
|
||||
tagSet: map[string]string{"memoryDomain": node},
|
||||
previousValues: make(map[string]int64),
|
||||
file: file,
|
||||
tagSet: map[string]string{"memoryDomain": node},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,32 +97,28 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
timeDiff := now.Sub(m.lastTimestamp).Seconds()
|
||||
m.lastTimestamp = now
|
||||
|
||||
for i := range m.topology {
|
||||
// Loop for all NUMA domains
|
||||
t := &m.topology[i]
|
||||
|
||||
now := time.Now()
|
||||
file, err := os.Open(t.file)
|
||||
if err != nil {
|
||||
cclog.ComponentError(
|
||||
m.name,
|
||||
fmt.Sprintf("Read(): Failed to open file '%s': %v", t.file, err))
|
||||
continue
|
||||
return
|
||||
}
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
// Read line by line
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
split := strings.Fields(line)
|
||||
split := strings.Fields(scanner.Text())
|
||||
if len(split) != 2 {
|
||||
continue
|
||||
}
|
||||
@@ -143,38 +130,18 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
|
||||
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
|
||||
continue
|
||||
}
|
||||
|
||||
if m.config.SendAbsoluteValues {
|
||||
msg, err := lp.NewMessage(
|
||||
"numastats_"+key,
|
||||
t.tagSet,
|
||||
m.meta,
|
||||
map[string]interface{}{"value": value},
|
||||
now,
|
||||
)
|
||||
if err == nil {
|
||||
output <- msg
|
||||
}
|
||||
}
|
||||
|
||||
if m.config.SendDerivedValues {
|
||||
prev, ok := t.previousValues[key]
|
||||
if ok {
|
||||
rate := float64(value-prev) / timeDiff
|
||||
msg, err := lp.NewMessage(
|
||||
"numastats_"+key+"_rate",
|
||||
t.tagSet,
|
||||
m.meta,
|
||||
map[string]interface{}{"value": rate},
|
||||
now,
|
||||
)
|
||||
if err == nil {
|
||||
output <- msg
|
||||
}
|
||||
}
|
||||
t.previousValues[key] = value
|
||||
y, err := lp.New(
|
||||
"numastats_"+key,
|
||||
t.tagSet,
|
||||
m.meta,
|
||||
map[string]interface{}{"value": value},
|
||||
now,
|
||||
)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
@@ -2,10 +2,7 @@
|
||||
## `numastat` collector
|
||||
|
||||
```json
|
||||
"numastats": {
|
||||
"send_abs_values" : true,
|
||||
"send_derived_values" : true
|
||||
}
|
||||
"numastats": {}
|
||||
```
|
||||
|
||||
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: <https://www.kernel.org/doc/html/latest/admin-guide/numastat.html>
|
||||
@@ -18,9 +15,3 @@ Metrics:
|
||||
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
|
||||
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
|
||||
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.
|
||||
* `numastats_numa_hit_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
* `numastats_numa_miss_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
* `numastats_numa_foreign_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
* `numastats_local_node_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
* `numastats_other_node_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
* `numastats_interleave_hit_rate` (if `send_derived_values == true`): Derived rate value per second.
|
||||
|
@@ -8,8 +8,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/NVIDIA/go-nvml/pkg/nvml"
|
||||
)
|
||||
|
||||
@@ -206,7 +206,7 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_fb_mem_total"] || !device.excludeMetrics["nv_fb_mem_used"] || !device.excludeMetrics["nv_fb_mem_reserved"] {
|
||||
var total uint64
|
||||
var used uint64
|
||||
@@ -222,7 +222,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
|
||||
if !device.excludeMetrics["nv_fb_mem_total"] {
|
||||
t := float64(total) / (1024 * 1024)
|
||||
y, err := lp.NewMessage("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MByte")
|
||||
output <- y
|
||||
@@ -231,7 +231,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
|
||||
if !device.excludeMetrics["nv_fb_mem_used"] {
|
||||
f := float64(used) / (1024 * 1024)
|
||||
y, err := lp.NewMessage("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||
y, err := lp.New("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MByte")
|
||||
output <- y
|
||||
@@ -240,7 +240,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
|
||||
if v2 && !device.excludeMetrics["nv_fb_mem_reserved"] {
|
||||
r := float64(reserved) / (1024 * 1024)
|
||||
y, err := lp.NewMessage("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
|
||||
y, err := lp.New("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MByte")
|
||||
output <- y
|
||||
@@ -250,7 +250,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_bar1_mem_total"] || !device.excludeMetrics["nv_bar1_mem_used"] {
|
||||
meminfo, ret := nvml.DeviceGetBAR1MemoryInfo(device.device)
|
||||
if ret != nvml.SUCCESS {
|
||||
@@ -259,7 +259,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
}
|
||||
if !device.excludeMetrics["nv_bar1_mem_total"] {
|
||||
t := float64(meminfo.Bar1Total) / (1024 * 1024)
|
||||
y, err := lp.NewMessage("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MByte")
|
||||
output <- y
|
||||
@@ -267,7 +267,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
}
|
||||
if !device.excludeMetrics["nv_bar1_mem_used"] {
|
||||
t := float64(meminfo.Bar1Used) / (1024 * 1024)
|
||||
y, err := lp.NewMessage("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MByte")
|
||||
output <- y
|
||||
@@ -277,7 +277,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||
if ret != nvml.SUCCESS {
|
||||
err := errors.New(nvml.ErrorString(ret))
|
||||
@@ -301,14 +301,14 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
util, ret := nvml.DeviceGetUtilizationRates(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
if !device.excludeMetrics["nv_util"] {
|
||||
y, err := lp.NewMessage("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
|
||||
y, err := lp.New("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "%")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_mem_util"] {
|
||||
y, err := lp.NewMessage("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
|
||||
y, err := lp.New("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "%")
|
||||
output <- y
|
||||
@@ -319,7 +319,7 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_temp"] {
|
||||
// Retrieves the current temperature readings for the device, in degrees C.
|
||||
//
|
||||
@@ -328,7 +328,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
// * NVML_TEMPERATURE_COUNT
|
||||
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
|
||||
y, err := lp.New("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "degC")
|
||||
output <- y
|
||||
@@ -338,7 +338,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readFan(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_fan"] {
|
||||
// Retrieves the intended operating speed of the device's fan.
|
||||
//
|
||||
@@ -351,7 +351,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
// This value may exceed 100% in certain cases.
|
||||
fan, ret := nvml.DeviceGetFanSpeed(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||
y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "%")
|
||||
output <- y
|
||||
@@ -361,14 +361,14 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
// if !device.excludeMetrics["nv_fan"] {
|
||||
// numFans, ret := nvml.DeviceGetNumFans(device.device)
|
||||
// if ret == nvml.SUCCESS {
|
||||
// for i := 0; i < numFans; i++ {
|
||||
// fan, ret := nvml.DeviceGetFanSpeed_v2(device.device, i)
|
||||
// if ret == nvml.SUCCESS {
|
||||
// y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||
// y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||
// if err == nil {
|
||||
// y.AddMeta("unit", "%")
|
||||
// y.AddTag("stype", "fan")
|
||||
@@ -382,7 +382,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_ecc_mode"] {
|
||||
// Retrieves the current and pending ECC modes for the device.
|
||||
//
|
||||
@@ -393,21 +393,21 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
// The "pending" ECC mode refers to the target mode following the next reboot.
|
||||
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
var y lp.CCMessage
|
||||
var y lp.CCMetric
|
||||
var err error
|
||||
switch ecc_pend {
|
||||
case nvml.FEATURE_DISABLED:
|
||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
|
||||
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
|
||||
case nvml.FEATURE_ENABLED:
|
||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
|
||||
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
|
||||
default:
|
||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
|
||||
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
|
||||
}
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
} else if ret == nvml.ERROR_NOT_SUPPORTED {
|
||||
y, err := lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
|
||||
y, err := lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -416,7 +416,7 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_perf_state"] {
|
||||
// Retrieves the current performance state for the device.
|
||||
//
|
||||
@@ -427,7 +427,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
// 32: Unknown performance state.
|
||||
pState, ret := nvml.DeviceGetPerformanceState(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
|
||||
y, err := lp.New("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_power_usage"] {
|
||||
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
|
||||
//
|
||||
@@ -450,7 +450,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
if mode == nvml.FEATURE_ENABLED {
|
||||
power, ret := nvml.DeviceGetPowerUsage(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
|
||||
y, err := lp.New("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "watts")
|
||||
output <- y
|
||||
@@ -461,7 +461,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
// Retrieves the current clock speeds for the device.
|
||||
//
|
||||
// Available clock information:
|
||||
@@ -471,7 +471,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
if !device.excludeMetrics["nv_graphics_clock"] {
|
||||
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
|
||||
y, err := lp.New("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -482,7 +482,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
if !device.excludeMetrics["nv_sm_clock"] {
|
||||
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
|
||||
y, err := lp.New("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -493,7 +493,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
if !device.excludeMetrics["nv_mem_clock"] {
|
||||
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||
y, err := lp.New("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -503,7 +503,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
if !device.excludeMetrics["nv_video_clock"] {
|
||||
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||
y, err := lp.New("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -513,7 +513,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
// Retrieves the maximum clock speeds for the device.
|
||||
//
|
||||
// Available clock information:
|
||||
@@ -528,7 +528,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
if !device.excludeMetrics["nv_max_graphics_clock"] {
|
||||
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
|
||||
y, err := lp.New("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -539,7 +539,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
if !device.excludeMetrics["nv_max_sm_clock"] {
|
||||
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
|
||||
y, err := lp.New("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -550,7 +550,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
if !device.excludeMetrics["nv_max_mem_clock"] {
|
||||
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||
y, err := lp.New("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -561,7 +561,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
if !device.excludeMetrics["nv_max_video_clock"] {
|
||||
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||
y, err := lp.New("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "MHz")
|
||||
output <- y
|
||||
@@ -571,7 +571,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_ecc_uncorrected_error"] {
|
||||
// Retrieves the total ECC error counts for the device.
|
||||
//
|
||||
@@ -584,7 +584,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
// i.e. the total set of errors across the entire device.
|
||||
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
|
||||
y, err := lp.New("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -593,7 +593,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
if !device.excludeMetrics["nv_ecc_corrected_error"] {
|
||||
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
|
||||
y, err := lp.New("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -602,7 +602,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_power_max_limit"] {
|
||||
// Retrieves the power management limit associated with this device.
|
||||
//
|
||||
@@ -612,7 +612,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
// If the card's total power draw reaches this limit the power management algorithm kicks in.
|
||||
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
|
||||
y, err := lp.New("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "watts")
|
||||
output <- y
|
||||
@@ -622,7 +622,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||
if ret != nvml.SUCCESS {
|
||||
err := errors.New(nvml.ErrorString(ret))
|
||||
@@ -639,7 +639,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
||||
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
|
||||
y, err := lp.New("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "%")
|
||||
output <- y
|
||||
@@ -649,7 +649,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||
if ret != nvml.SUCCESS {
|
||||
err := errors.New(nvml.ErrorString(ret))
|
||||
@@ -666,7 +666,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
||||
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
|
||||
y, err := lp.New("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "%")
|
||||
output <- y
|
||||
@@ -676,7 +676,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_remapped_rows_corrected"] ||
|
||||
!device.excludeMetrics["nv_remapped_rows_uncorrected"] ||
|
||||
!device.excludeMetrics["nv_remapped_rows_pending"] ||
|
||||
@@ -693,13 +693,13 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
||||
corrected, uncorrected, pending, failure, ret := nvml.DeviceGetRemappedRows(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
if !device.excludeMetrics["nv_remapped_rows_corrected"] {
|
||||
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
|
||||
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_remapped_rows_uncorrected"] {
|
||||
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
|
||||
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -709,7 +709,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
||||
if pending {
|
||||
p = 1
|
||||
}
|
||||
y, err := lp.NewMessage("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
|
||||
y, err := lp.New("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -719,7 +719,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
||||
if failure {
|
||||
f = 1
|
||||
}
|
||||
y, err := lp.NewMessage("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||
y, err := lp.New("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -729,7 +729,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
if !device.excludeMetrics["nv_compute_processes"] {
|
||||
// Get information about processes with a compute context on a device
|
||||
//
|
||||
@@ -753,7 +753,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||
procList, ret := nvml.DeviceGetComputeRunningProcesses(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
y, err := lp.New("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -782,7 +782,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||
procList, ret := nvml.DeviceGetGraphicsRunningProcesses(device.device)
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
y, err := lp.New("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -812,7 +812,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
// // Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||
// procList, ret := nvml.DeviceGetMPSComputeRunningProcesses(device.device)
|
||||
// if ret == nvml.SUCCESS {
|
||||
// y, err := lp.NewMessage("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
// y, err := lp.New("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||
// if err == nil {
|
||||
// output <- y
|
||||
// }
|
||||
@@ -821,7 +821,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
var violTime nvml.ViolationTime
|
||||
var ret nvml.Return
|
||||
|
||||
@@ -840,7 +840,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_POWER)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -852,7 +852,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_THERMAL)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -864,7 +864,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_SYNC_BOOST)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -876,7 +876,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_BOARD_LIMIT)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -888,7 +888,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_LOW_UTILIZATION)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -900,7 +900,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_RELIABILITY)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -912,7 +912,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_APP_CLOCKS)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -924,7 +924,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_BASE_CLOCKS)
|
||||
if ret == nvml.SUCCESS {
|
||||
t := float64(violTime.ViolationTime) * 1e-9
|
||||
y, err := lp.NewMessage("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
y, err := lp.New("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "sec")
|
||||
output <- y
|
||||
@@ -935,18 +935,12 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
||||
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||
// Retrieves the specified error counter value
|
||||
// Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
|
||||
//
|
||||
// For Pascal &tm; or newer fully supported devices.
|
||||
|
||||
var aggregate_crc_errors uint64 = 0
|
||||
var aggregate_ecc_errors uint64 = 0
|
||||
var aggregate_replay_errors uint64 = 0
|
||||
var aggregate_recovery_errors uint64 = 0
|
||||
var aggregate_crc_flit_errors uint64 = 0
|
||||
|
||||
for i := 0; i < nvml.NVLINK_MAX_LINKS; i++ {
|
||||
state, ret := nvml.DeviceGetNvLinkState(device.device, i)
|
||||
if ret == nvml.SUCCESS {
|
||||
@@ -954,9 +948,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
|
||||
// Data link receive data CRC error counter
|
||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_DATA)
|
||||
aggregate_crc_errors = aggregate_crc_errors + count
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
y, err := lp.New("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
@@ -967,9 +960,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
|
||||
// Data link receive data ECC error counter
|
||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_ECC_DATA)
|
||||
aggregate_ecc_errors = aggregate_ecc_errors + count
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
y, err := lp.New("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
@@ -980,9 +972,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
|
||||
// Data link transmit replay error counter
|
||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_REPLAY)
|
||||
aggregate_replay_errors = aggregate_replay_errors + count
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
y, err := lp.New("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
@@ -993,9 +984,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
|
||||
// Data link transmit recovery error counter
|
||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_RECOVERY)
|
||||
aggregate_recovery_errors = aggregate_recovery_errors + count
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
y, err := lp.New("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
@@ -1006,9 +996,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
|
||||
// Data link receive flow control digit CRC error counter
|
||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_FLIT)
|
||||
aggregate_crc_flit_errors = aggregate_crc_flit_errors + count
|
||||
if ret == nvml.SUCCESS {
|
||||
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
y, err := lp.New("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
@@ -1019,58 +1008,16 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export aggegated values
|
||||
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
|
||||
// Data link receive data CRC error counter
|
||||
y, err := lp.NewMessage("nv_nvlink_crc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_errors}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
|
||||
// Data link receive data ECC error counter
|
||||
y, err := lp.NewMessage("nv_nvlink_ecc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_ecc_errors}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
|
||||
// Data link transmit replay error counter
|
||||
y, err := lp.NewMessage("nv_nvlink_replay_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_replay_errors}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
|
||||
// Data link transmit recovery error counter
|
||||
y, err := lp.NewMessage("nv_nvlink_recovery_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_recovery_errors}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
|
||||
// Data link receive flow control digit CRC error counter
|
||||
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_flit_errors}, time.Now())
|
||||
if err == nil {
|
||||
y.AddTag("stype", "nvlink")
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
var err error
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
||||
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMessage) {
|
||||
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMetric) {
|
||||
name, ret := nvml.DeviceGetName(device.device)
|
||||
if ret != nvml.SUCCESS {
|
||||
name = "NoName"
|
||||
|
@@ -9,29 +9,20 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// running average power limit (RAPL) monitoring attributes for a zone
|
||||
// Only for Intel systems
|
||||
|
||||
type RAPLZoneInfo struct {
|
||||
energy int64 // current reading of the energy counter in micro joules
|
||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
||||
energyTimestamp time.Time // timestamp when energy counter was read
|
||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
||||
shortTermFilepath string // path to short term power limit
|
||||
longTermFilepath string // path to long term power limit
|
||||
enabledFilepath string // path to check whether limits are enabled
|
||||
name string
|
||||
|
||||
// tags describing the RAPL zone:
|
||||
// * zone_name, subzone_name: e.g. psys, dram, core, uncore, package-0
|
||||
// * zone_id: e.g. 0:1 (zone 0 sub zone 1)
|
||||
// type=socket for dram, core, uncore, package-* and type=node for psys
|
||||
// type-id=socket id
|
||||
tags map[string]string
|
||||
tags map[string]string
|
||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
||||
energy int64 // current reading of the energy counter in micro joules
|
||||
energyTimestamp time.Time // timestamp when energy counter was read
|
||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
||||
}
|
||||
|
||||
type RAPLCollector struct {
|
||||
@@ -42,40 +33,12 @@ type RAPLCollector struct {
|
||||
// * 0:1 for zone 0 subzone 1
|
||||
ExcludeByID []string `json:"exclude_device_by_id,omitempty"`
|
||||
// Exclude names for RAPL zones, e.g. psys, dram, core, uncore, package-0
|
||||
ExcludeByName []string `json:"exclude_device_by_name,omitempty"`
|
||||
SkipEnergyReading bool `json:"skip_energy_reading,omitempty"`
|
||||
SkipLimitsReading bool `json:"skip_limits_reading,omitempty"`
|
||||
OnlyEnabledLimits bool `json:"only_enabled_limits,omitempty"`
|
||||
ExcludeByName []string `json:"exclude_device_by_name,omitempty"`
|
||||
}
|
||||
raplZoneInfo []RAPLZoneInfo
|
||||
RAPLZoneInfo []RAPLZoneInfo
|
||||
meta map[string]string // default meta information
|
||||
}
|
||||
|
||||
// Get the path to the power limit file for zone selectable by limit name
|
||||
// Common limit names for Intel systems are
|
||||
// - long_term
|
||||
// - short_term
|
||||
// Does not support AMD as AMD systems do not provide the power limits
|
||||
// through sysfs
|
||||
func ZoneLimitFile(folder string, limit_name string) string {
|
||||
nameGlob := filepath.Join(folder, "constraint_*_name")
|
||||
candidates, err := filepath.Glob(nameGlob)
|
||||
if err == nil {
|
||||
for _, c := range candidates {
|
||||
if v, err := os.ReadFile(c); err == nil {
|
||||
if strings.TrimSpace(string(v)) == limit_name {
|
||||
var i int
|
||||
n, err := fmt.Sscanf(filepath.Base(c), "constraint_%d_name", &i)
|
||||
if err == nil && n == 1 {
|
||||
return filepath.Join(folder, fmt.Sprintf("constraint_%d_power_limit_uw", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Init initializes the running average power limit (RAPL) collector
|
||||
func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
|
||||
@@ -95,9 +58,6 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
}
|
||||
|
||||
// Read in the JSON configuration
|
||||
m.config.SkipEnergyReading = false
|
||||
m.config.SkipLimitsReading = false
|
||||
m.config.OnlyEnabledLimits = true
|
||||
if len(config) > 0 {
|
||||
err = json.Unmarshal(config, &m.config)
|
||||
if err != nil {
|
||||
@@ -123,62 +83,50 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
// readZoneInfo reads RAPL monitoring attributes for a zone given by zonePath
|
||||
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
|
||||
readZoneInfo := func(zonePath string) (z struct {
|
||||
name string // zones name e.g. psys, dram, core, uncore, package-0
|
||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
||||
energy int64 // current reading of the energy counter in micro joules
|
||||
energyTimestamp time.Time // timestamp when energy counter was read
|
||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
||||
shortTermFilepath string
|
||||
longTermFilepath string
|
||||
enabledFilepath string
|
||||
name string // zones name e.g. psys, dram, core, uncore, package-0
|
||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
||||
energy int64 // current reading of the energy counter in micro joules
|
||||
energyTimestamp time.Time // timestamp when energy counter was read
|
||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
||||
ok bool // Are all information available?
|
||||
}) {
|
||||
// zones name e.g. psys, dram, core, uncore, package-0
|
||||
|
||||
foundName := false
|
||||
if v, err :=
|
||||
os.ReadFile(
|
||||
filepath.Join(zonePath, "name")); err == nil {
|
||||
foundName = true
|
||||
z.name = strings.TrimSpace(string(v))
|
||||
}
|
||||
|
||||
if os.Geteuid() == 0 && (!m.config.SkipEnergyReading) {
|
||||
// path to a file containing the zones current energy counter in micro joules
|
||||
z.energyFilepath = filepath.Join(zonePath, "energy_uj")
|
||||
// current reading of the energy counter in micro joules
|
||||
if v, err := os.ReadFile(z.energyFilepath); err == nil {
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
z.energy = i
|
||||
// timestamp when energy counter was read
|
||||
z.energyTimestamp = time.Now()
|
||||
}
|
||||
} else {
|
||||
cclog.ComponentError(m.name, "Cannot read energy file for ", z.name, ":", err.Error())
|
||||
// path to a file containing the zones current energy counter in micro joules
|
||||
z.energyFilepath = filepath.Join(zonePath, "energy_uj")
|
||||
|
||||
// current reading of the energy counter in micro joules
|
||||
foundEnergy := false
|
||||
if v, err := os.ReadFile(z.energyFilepath); err == nil {
|
||||
// timestamp when energy counter was read
|
||||
z.energyTimestamp = time.Now()
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
foundEnergy = true
|
||||
z.energy = i
|
||||
}
|
||||
// Range of the above energy counter in micro-joules
|
||||
if v, err :=
|
||||
os.ReadFile(
|
||||
filepath.Join(zonePath, "max_energy_range_uj")); err == nil {
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
z.maxEnergyRange = i
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cclog.ComponentInfo(m.name, "Energy readings for", zonePath, "disabled")
|
||||
}
|
||||
|
||||
if !m.config.SkipLimitsReading {
|
||||
z.shortTermFilepath = ZoneLimitFile(zonePath, "short_term")
|
||||
if _, err := os.Stat(z.shortTermFilepath); err != nil {
|
||||
z.shortTermFilepath = ""
|
||||
// Range of the above energy counter in micro-joules
|
||||
foundMaxEnergyRange := false
|
||||
if v, err :=
|
||||
os.ReadFile(
|
||||
filepath.Join(zonePath, "max_energy_range_uj")); err == nil {
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
foundMaxEnergyRange = true
|
||||
z.maxEnergyRange = i
|
||||
}
|
||||
z.longTermFilepath = ZoneLimitFile(zonePath, "long_term")
|
||||
if _, err := os.Stat(z.longTermFilepath); err != nil {
|
||||
z.longTermFilepath = ""
|
||||
}
|
||||
z.enabledFilepath = filepath.Join(zonePath, "enabled")
|
||||
} else {
|
||||
cclog.ComponentInfo(m.name, "Power limit readings for", zonePath, "disabled")
|
||||
}
|
||||
|
||||
// Are all information available?
|
||||
z.ok = foundName && foundEnergy && foundMaxEnergyRange
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -195,42 +143,25 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
|
||||
for _, zonePath := range zonesPath {
|
||||
zoneID := strings.TrimPrefix(zonePath, zonePrefix)
|
||||
zonetags := make(map[string]string)
|
||||
|
||||
z := readZoneInfo(zonePath)
|
||||
if !isIDExcluded[zoneID] &&
|
||||
if z.ok &&
|
||||
!isIDExcluded[zoneID] &&
|
||||
!isNameExcluded[z.name] {
|
||||
|
||||
si := RAPLZoneInfo{
|
||||
tags: make(map[string]string),
|
||||
energyFilepath: z.energyFilepath,
|
||||
energy: z.energy,
|
||||
energyTimestamp: z.energyTimestamp,
|
||||
maxEnergyRange: z.maxEnergyRange,
|
||||
shortTermFilepath: z.shortTermFilepath,
|
||||
longTermFilepath: z.longTermFilepath,
|
||||
enabledFilepath: z.enabledFilepath,
|
||||
name: z.name,
|
||||
}
|
||||
si.tags["type"] = "node"
|
||||
si.tags["type-id"] = "0"
|
||||
var pid int = 0
|
||||
if strings.HasPrefix(z.name, "package-") {
|
||||
n, err := fmt.Sscanf(z.name, "package-%d", &pid)
|
||||
if err == nil && n == 1 {
|
||||
si.tags["type-id"] = fmt.Sprintf("%d", pid)
|
||||
si.tags["type"] = "socket"
|
||||
}
|
||||
si.name = "pkg"
|
||||
}
|
||||
// Add RAPL monitoring attributes for a zone
|
||||
if _, ok1 := si.tags["type"]; ok1 {
|
||||
if _, ok2 := si.tags["type-id"]; ok2 {
|
||||
m.raplZoneInfo = append(m.raplZoneInfo, si)
|
||||
zonetags["type"] = si.tags["type"]
|
||||
zonetags["type-id"] = si.tags["type-id"]
|
||||
}
|
||||
}
|
||||
m.RAPLZoneInfo =
|
||||
append(
|
||||
m.RAPLZoneInfo,
|
||||
RAPLZoneInfo{
|
||||
tags: map[string]string{
|
||||
"id": zoneID,
|
||||
"zone_name": z.name,
|
||||
},
|
||||
energyFilepath: z.energyFilepath,
|
||||
energy: z.energy,
|
||||
energyTimestamp: z.energyTimestamp,
|
||||
maxEnergyRange: z.maxEnergyRange,
|
||||
})
|
||||
}
|
||||
|
||||
// find all sub zones for the given zone
|
||||
@@ -243,32 +174,29 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
for _, subZonePath := range subZonesPath {
|
||||
subZoneID := strings.TrimPrefix(subZonePath, subZonePrefix)
|
||||
sz := readZoneInfo(subZonePath)
|
||||
|
||||
if len(zoneID) > 0 && len(z.name) > 0 &&
|
||||
sz.ok &&
|
||||
!isIDExcluded[zoneID+":"+subZoneID] &&
|
||||
!isNameExcluded[sz.name] {
|
||||
|
||||
si := RAPLZoneInfo{
|
||||
tags: zonetags,
|
||||
energyFilepath: sz.energyFilepath,
|
||||
energy: sz.energy,
|
||||
energyTimestamp: sz.energyTimestamp,
|
||||
maxEnergyRange: sz.maxEnergyRange,
|
||||
shortTermFilepath: sz.shortTermFilepath,
|
||||
longTermFilepath: sz.longTermFilepath,
|
||||
enabledFilepath: sz.enabledFilepath,
|
||||
name: sz.name,
|
||||
}
|
||||
if _, ok1 := si.tags["type"]; ok1 {
|
||||
if _, ok2 := si.tags["type-id"]; ok2 {
|
||||
m.raplZoneInfo = append(m.raplZoneInfo, si)
|
||||
}
|
||||
}
|
||||
m.RAPLZoneInfo =
|
||||
append(
|
||||
m.RAPLZoneInfo,
|
||||
RAPLZoneInfo{
|
||||
tags: map[string]string{
|
||||
"id": zoneID + ":" + subZoneID,
|
||||
"zone_name": z.name,
|
||||
"sub_zone_name": sz.name,
|
||||
},
|
||||
energyFilepath: sz.energyFilepath,
|
||||
energy: sz.energy,
|
||||
energyTimestamp: sz.energyTimestamp,
|
||||
maxEnergyRange: sz.maxEnergyRange,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.raplZoneInfo == nil {
|
||||
if m.RAPLZoneInfo == nil {
|
||||
return fmt.Errorf("no running average power limit (RAPL) device found in %s", controlTypePath)
|
||||
|
||||
}
|
||||
@@ -277,7 +205,7 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
cclog.ComponentDebug(
|
||||
m.name,
|
||||
"initialized",
|
||||
len(m.raplZoneInfo),
|
||||
len(m.RAPLZoneInfo),
|
||||
"zones with running average power limit (RAPL) monitoring attributes")
|
||||
m.init = true
|
||||
|
||||
@@ -286,91 +214,42 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
|
||||
|
||||
// Read reads running average power limit (RAPL) monitoring attributes for all initialized zones
|
||||
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
|
||||
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
|
||||
for i := range m.raplZoneInfo {
|
||||
p := &m.raplZoneInfo[i]
|
||||
for i := range m.RAPLZoneInfo {
|
||||
p := &m.RAPLZoneInfo[i]
|
||||
|
||||
if os.Geteuid() == 0 && (!m.config.SkipEnergyReading) {
|
||||
// Read current value of the energy counter in micro joules
|
||||
if v, err := os.ReadFile(p.energyFilepath); err == nil {
|
||||
energyTimestamp := time.Now()
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
energy := i
|
||||
// Read current value of the energy counter in micro joules
|
||||
if v, err := os.ReadFile(p.energyFilepath); err == nil {
|
||||
energyTimestamp := time.Now()
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
energy := i
|
||||
|
||||
// Compute average power (Δ energy / Δ time)
|
||||
energyDiff := energy - p.energy
|
||||
if energyDiff < 0 {
|
||||
// Handle overflow:
|
||||
// ( p.maxEnergyRange - p.energy ) + energy
|
||||
// = p.maxEnergyRange + ( energy - p.energy )
|
||||
// = p.maxEnergyRange + diffEnergy
|
||||
energyDiff += p.maxEnergyRange
|
||||
}
|
||||
timeDiff := energyTimestamp.Sub(p.energyTimestamp)
|
||||
averagePower := float64(energyDiff) / float64(timeDiff.Microseconds())
|
||||
|
||||
y, err := lp.NewMetric(
|
||||
fmt.Sprintf("rapl_%s_average_power", p.name),
|
||||
p.tags,
|
||||
m.meta,
|
||||
averagePower,
|
||||
energyTimestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
e, err := lp.NewMetric(
|
||||
fmt.Sprintf("rapl_%s_energy", p.name),
|
||||
p.tags,
|
||||
m.meta,
|
||||
float64(energyDiff)*1e-3,
|
||||
energyTimestamp)
|
||||
if err == nil {
|
||||
e.AddMeta("unit", "Joules")
|
||||
output <- e
|
||||
}
|
||||
|
||||
// Save current energy counter state
|
||||
p.energy = energy
|
||||
p.energyTimestamp = energyTimestamp
|
||||
// Compute average power (Δ energy / Δ time)
|
||||
energyDiff := energy - p.energy
|
||||
if energyDiff < 0 {
|
||||
// Handle overflow:
|
||||
// ( p.maxEnergyRange - p.energy ) + energy
|
||||
// = p.maxEnergyRange + ( energy - p.energy )
|
||||
// = p.maxEnergyRange + diffEnergy
|
||||
energyDiff += p.maxEnergyRange
|
||||
}
|
||||
}
|
||||
}
|
||||
// https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#constraints
|
||||
if !m.config.SkipLimitsReading {
|
||||
skip := false
|
||||
if m.config.OnlyEnabledLimits {
|
||||
if v, err := os.ReadFile(p.enabledFilepath); err == nil {
|
||||
if strings.TrimSpace(string(v)) == "0" {
|
||||
skip = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
if len(p.shortTermFilepath) > 0 {
|
||||
if v, err := os.ReadFile(p.shortTermFilepath); err == nil {
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
name := fmt.Sprintf("rapl_%s_limit_short_term", p.name)
|
||||
y, err := lp.NewMetric(name, p.tags, m.meta, i/1e6, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
}
|
||||
timeDiff := energyTimestamp.Sub(p.energyTimestamp)
|
||||
averagePower := float64(energyDiff) / float64(timeDiff.Microseconds())
|
||||
|
||||
y, err := lp.New(
|
||||
"rapl_average_power",
|
||||
p.tags,
|
||||
m.meta,
|
||||
map[string]interface{}{"value": averagePower},
|
||||
energyTimestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
||||
if len(p.longTermFilepath) > 0 {
|
||||
if v, err := os.ReadFile(p.longTermFilepath); err == nil {
|
||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
||||
name := fmt.Sprintf("rapl_%s_limit_long_term", p.name)
|
||||
y, err := lp.NewMetric(name, p.tags, m.meta, i/1e6, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Save current energy counter state
|
||||
p.energy = energy
|
||||
p.energyTimestamp = energyTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,23 +1,18 @@
|
||||
## `rapl` collector
|
||||
# Running average power limit (RAPL) metric collector
|
||||
|
||||
This collector reads running average power limit (RAPL) monitoring attributes to compute average power consumption metrics. See <https://www.kernel.org/doc/html/latest/power/powercap/powercap.html>.
|
||||
This collector reads running average power limit (RAPL) monitoring attributes to compute average power consumption metrics. See <https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes>.
|
||||
|
||||
The Likwid metric collector provides similar functionality.
|
||||
|
||||
## Configuration
|
||||
|
||||
```json
|
||||
"rapl": {
|
||||
"exclude_device_by_id": ["0:1", "0:2"],
|
||||
"exclude_device_by_name": ["psys"],
|
||||
"skip_energy_reading": false,
|
||||
"skip_limits_reading": false,
|
||||
"only_enabled_limits": true
|
||||
"exclude_device_by_name": ["psys"]
|
||||
}
|
||||
```
|
||||
|
||||
Metrics:
|
||||
* `rapl_<domain>_average_power`: average power consumption in Watt. The average is computed over the entire runtime from the last measurement to the current measurement
|
||||
* `rapl_<domain>_energy`: Difference from the last measurement
|
||||
* `rapl_<domain>_limit_short_term`: Short term powercap setting for the domain
|
||||
* `rapl_<domain>_limit_long_term`: Long term powercap setting for the domain
|
||||
## Metrics
|
||||
|
||||
Only the `rapl_<domain>_average_power` and `rapl_<domain>_energy` metrics require root-permissions. The limits can be read as user. Some domains have limits available but they are not enabled. By default, only enabled domain limits are collected.
|
||||
|
||||
Energy and power measurments can also be done with the Likwid metric collector.
|
||||
* `rapl_average_power`: average power consumption in Watt. The average is computed over the entire runtime from the last measurement to the current measurement
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
|
||||
)
|
||||
|
||||
@@ -162,7 +162,7 @@ func (m *RocmSmiCollector) Init(config json.RawMessage) error {
|
||||
|
||||
// Read collects all metrics belonging to the sample collector
|
||||
// and sends them through the output channel to the collector manager
|
||||
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Create a sample metric
|
||||
timestamp := time.Now()
|
||||
|
||||
@@ -175,119 +175,119 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
|
||||
if !dev.excludeMetrics["rocm_gfx_util"] {
|
||||
value := metrics.Average_gfx_activity
|
||||
y, err := lp.NewMessage("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_umc_util"] {
|
||||
value := metrics.Average_umc_activity
|
||||
y, err := lp.NewMessage("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_mm_util"] {
|
||||
value := metrics.Average_mm_activity
|
||||
y, err := lp.NewMessage("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_avg_power"] {
|
||||
value := metrics.Average_socket_power
|
||||
y, err := lp.NewMessage("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_mem"] {
|
||||
value := metrics.Temperature_mem
|
||||
y, err := lp.NewMessage("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_hotspot"] {
|
||||
value := metrics.Temperature_hotspot
|
||||
y, err := lp.NewMessage("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_edge"] {
|
||||
value := metrics.Temperature_edge
|
||||
y, err := lp.NewMessage("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
|
||||
value := metrics.Temperature_vrgfx
|
||||
y, err := lp.NewMessage("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
|
||||
value := metrics.Temperature_vrsoc
|
||||
y, err := lp.NewMessage("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_temp_vrmem"] {
|
||||
value := metrics.Temperature_vrmem
|
||||
y, err := lp.NewMessage("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_gfx_clock"] {
|
||||
value := metrics.Average_gfxclk_frequency
|
||||
y, err := lp.NewMessage("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_soc_clock"] {
|
||||
value := metrics.Average_socclk_frequency
|
||||
y, err := lp.NewMessage("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_u_clock"] {
|
||||
value := metrics.Average_uclk_frequency
|
||||
y, err := lp.NewMessage("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_v0_clock"] {
|
||||
value := metrics.Average_vclk0_frequency
|
||||
y, err := lp.NewMessage("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_v1_clock"] {
|
||||
value := metrics.Average_vclk1_frequency
|
||||
y, err := lp.NewMessage("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_d0_clock"] {
|
||||
value := metrics.Average_dclk0_frequency
|
||||
y, err := lp.NewMessage("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if !dev.excludeMetrics["rocm_d1_clock"] {
|
||||
value := metrics.Average_dclk1_frequency
|
||||
y, err := lp.NewMessage("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -295,7 +295,7 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
|
||||
if !dev.excludeMetrics["rocm_temp_hbm"] {
|
||||
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
|
||||
value := metrics.Temperature_hbm[i]
|
||||
y, err := lp.NewMessage("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
y.AddTag("stype", "device")
|
||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||
|
@@ -4,8 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// These are the fields we read from the JSON configuration
|
||||
@@ -32,7 +32,7 @@ type SampleCollector struct {
|
||||
func (m *SampleCollector) Init(config json.RawMessage) error {
|
||||
var err error = nil
|
||||
// Always set the name early in Init() to use it in cclog.Component* functions
|
||||
m.name = "SampleCollector"
|
||||
m.name = "InternalCollector"
|
||||
// This is for later use, also call it early
|
||||
m.setup()
|
||||
// Tell whether the collector should be run in parallel with others (reading files, ...)
|
||||
@@ -74,7 +74,7 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
|
||||
|
||||
// Read collects all metrics belonging to the sample collector
|
||||
// and sends them through the output channel to the collector manager
|
||||
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Create a sample metric
|
||||
timestamp := time.Now()
|
||||
|
||||
@@ -85,7 +85,7 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
||||
// stop := readState()
|
||||
// value = (stop - start) / interval.Seconds()
|
||||
|
||||
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil {
|
||||
// Send it to output channel
|
||||
output <- y
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// These are the fields we read from the JSON configuration
|
||||
@@ -25,7 +25,7 @@ type SampleTimerCollector struct {
|
||||
config SampleTimerCollectorConfig // the configuration structure
|
||||
interval time.Duration // the interval parsed from configuration
|
||||
ticker *time.Ticker // own timer
|
||||
output chan lp.CCMessage // own internal output channel
|
||||
output chan lp.CCMetric // own internal output channel
|
||||
}
|
||||
|
||||
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
|
||||
@@ -100,14 +100,14 @@ func (m *SampleTimerCollector) ReadMetrics(timestamp time.Time) {
|
||||
// stop := readState()
|
||||
// value = (stop - start) / interval.Seconds()
|
||||
|
||||
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||
if err == nil && m.output != nil {
|
||||
// Send it to output channel if we have a valid channel
|
||||
m.output <- y
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
// Capture output channel
|
||||
m.output = output
|
||||
}
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const SCHEDSTATFILE = `/proc/schedstat`
|
||||
@@ -96,7 +96,7 @@ func (m *SchedstatCollector) Init(config json.RawMessage) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
|
||||
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
|
||||
running, _ := strconv.ParseInt(linefields[7], 10, 64)
|
||||
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
|
||||
diff_running := running - m.olddata[linefields[0]]["running"]
|
||||
@@ -109,7 +109,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
|
||||
m.olddata[linefields[0]]["waiting"] = waiting
|
||||
value := l_running + l_waiting
|
||||
|
||||
y, err := lp.NewMessage("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
|
||||
y, err := lp.New("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
|
||||
if err == nil {
|
||||
// Send it to output channel
|
||||
output <- y
|
||||
@@ -118,7 +118,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
|
||||
|
||||
// Read collects all metrics belonging to the sample collector
|
||||
// and sends them through the output channel to the collector manager
|
||||
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type SelfCollectorConfig struct {
|
||||
@@ -42,56 +42,56 @@ func (m *SelfCollector) Init(config json.RawMessage) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
timestamp := time.Now()
|
||||
|
||||
if m.config.MemStats {
|
||||
var memstats runtime.MemStats
|
||||
runtime.ReadMemStats(&memstats)
|
||||
|
||||
y, err := lp.NewMessage("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
|
||||
y, err := lp.New("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
|
||||
y, err = lp.New("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
|
||||
y, err = lp.New("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
|
||||
y, err = lp.New("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
|
||||
y, err = lp.New("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
|
||||
y, err = lp.New("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "Bytes")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
|
||||
y, err = lp.New("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if m.config.GoRoutines {
|
||||
y, err := lp.NewMessage("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
|
||||
y, err := lp.New("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
if m.config.CgoCalls {
|
||||
y, err := lp.NewMessage("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
|
||||
y, err := lp.New("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
@@ -102,35 +102,35 @@ func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
if err == nil {
|
||||
sec, nsec := rusage.Utime.Unix()
|
||||
t := float64(sec) + (float64(nsec) * 1e-9)
|
||||
y, err := lp.NewMessage("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
||||
y, err := lp.New("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "seconds")
|
||||
output <- y
|
||||
}
|
||||
sec, nsec = rusage.Stime.Unix()
|
||||
t = float64(sec) + (float64(nsec) * 1e-9)
|
||||
y, err = lp.NewMessage("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
||||
y, err = lp.New("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
||||
if err == nil {
|
||||
y.AddMeta("unit", "seconds")
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
|
||||
y, err = lp.New("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
|
||||
y, err = lp.New("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
|
||||
y, err = lp.New("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
|
||||
y, err = lp.New("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
y, err = lp.NewMessage("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
|
||||
y, err = lp.New("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
|
||||
@@ -171,7 +171,7 @@ func (m *TempCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
|
||||
for _, sensor := range m.sensors {
|
||||
// Read sensor file
|
||||
@@ -190,7 +190,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
continue
|
||||
}
|
||||
x /= 1000
|
||||
y, err := lp.NewMessage(
|
||||
y, err := lp.New(
|
||||
sensor.metricName,
|
||||
sensor.tags,
|
||||
m.meta,
|
||||
@@ -203,7 +203,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
|
||||
// max temperature
|
||||
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
|
||||
y, err := lp.NewMessage(
|
||||
y, err := lp.New(
|
||||
sensor.maxTempName,
|
||||
sensor.tags,
|
||||
m.meta,
|
||||
@@ -217,7 +217,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
|
||||
// critical temperature
|
||||
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
|
||||
y, err := lp.NewMessage(
|
||||
y, err := lp.New(
|
||||
sensor.critTempName,
|
||||
sensor.tags,
|
||||
m.meta,
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const MAX_NUM_PROCS = 10
|
||||
@@ -53,7 +53,7 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
||||
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessag
|
||||
lines := strings.Split(string(stdout), "\n")
|
||||
for i := 1; i < m.config.Num_procs+1; i++ {
|
||||
name := fmt.Sprintf("topproc%d", i)
|
||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
|
||||
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
|
||||
if err == nil {
|
||||
output <- y
|
||||
}
|
||||
|
@@ -12,8 +12,8 @@ The global file contains the paths to the other four files and some global optio
|
||||
"collectors" : "collectors.json",
|
||||
"receivers" : "receivers.json",
|
||||
"router" : "router.json",
|
||||
"interval": "10s",
|
||||
"duration": "1s"
|
||||
"interval": 10,
|
||||
"duration": 1
|
||||
}
|
||||
```
|
||||
|
||||
|
38
go.mod
38
go.mod
@@ -1,49 +1,41 @@
|
||||
module github.com/ClusterCockpit/cc-metric-collector
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.2
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900
|
||||
github.com/ClusterCockpit/cc-lib v0.1.0-beta.1
|
||||
github.com/ClusterCockpit/cc-units v0.4.0
|
||||
github.com/ClusterCockpit/go-rocm-smi v0.3.0
|
||||
github.com/NVIDIA/go-nvml v0.12.0-2
|
||||
github.com/PaesslerAG/gval v1.2.2
|
||||
github.com/expr-lang/expr v1.16.9
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.13.0
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf
|
||||
github.com/influxdata/line-protocol/v2 v2.2.1
|
||||
github.com/nats-io/nats.go v1.39.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/nats-io/nats.go v1.33.1
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/stmcginnis/gofish v0.15.0
|
||||
github.com/tklauser/go-sysconf v0.3.13
|
||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1
|
||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/sys v0.18.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ClusterCockpit/cc-backend v1.4.2 // indirect
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nkeys v0.4.9 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/oapi-codegen/runtime v1.1.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.49.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/net v0.22.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
)
|
||||
|
85
go.sum
85
go.sum
@@ -1,9 +1,3 @@
|
||||
github.com/ClusterCockpit/cc-backend v1.4.2 h1:kTOzqkh9N0564N9nqQThnSs7TAfg8RLgvSm00e5HtIc=
|
||||
github.com/ClusterCockpit/cc-backend v1.4.2/go.mod h1:g8TNHXe4AXej26snu2//jO3mUF980elT93iV/k11O/c=
|
||||
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900 h1:6+WNav16uWTEDC09hkZKEHfBhtc91p/ZcjgCtyntuIg=
|
||||
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900/go.mod h1:EbYeC5t+Y0kW1Q1pP2n9zMqbeYEJITG8YGvAUihXVn4=
|
||||
github.com/ClusterCockpit/cc-lib v0.1.0-beta.1 h1:dz9j0g2cod8+SMDjuoIY6ISpiHHeekhX6yQaeiwiwJw=
|
||||
github.com/ClusterCockpit/cc-lib v0.1.0-beta.1/go.mod h1:kXMskla1i5ZSfXW0vVRIHgGeXMU5zu2PzYOYnUaOr80=
|
||||
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
|
||||
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
|
||||
github.com/ClusterCockpit/go-rocm-smi v0.3.0 h1:1qZnSpG7/NyLtc7AjqnUL9Jb8xtqG1nMVgp69rJfaR8=
|
||||
@@ -13,7 +7,6 @@ github.com/NVIDIA/go-nvml v0.12.0-2 h1:Sg239yy7jmopu/cuvYauoMj9fOpcGMngxVxxS1EBX
|
||||
github.com/NVIDIA/go-nvml v0.12.0-2/go.mod h1:7ruy85eOM73muOc/I37euONSwEyFqZsv5ED9AogD4G0=
|
||||
github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E=
|
||||
github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
|
||||
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
|
||||
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
|
||||
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||
@@ -21,76 +14,57 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
|
||||
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98=
|
||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig=
|
||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo=
|
||||
github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY=
|
||||
github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY=
|
||||
github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE=
|
||||
github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM=
|
||||
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nats-io/nats.go v1.39.0 h1:2/yg2JQjiYYKLwDuBzV0FbB2sIV+eFNkEevlRi4n9lI=
|
||||
github.com/nats-io/nats.go v1.39.0/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nats.go v1.33.1 h1:8TxLZZ/seeEfR97qV0/Bl939tpDnt2Z2fK3HkPypj70=
|
||||
github.com/nats-io/nats.go v1.33.1/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
||||
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI=
|
||||
github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
||||
@@ -103,29 +77,26 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
|
||||
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
|
||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1 h1:P7S/GeHBAFEZIYp0ePPs2kHXoazz8q2KsyxHyQVGCJg=
|
||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1/go.mod h1:9CWpnTUmlQkfdpdutA1nNf4iE5lAVt3QZOu0Z6hahBE=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f h1:oFMYAjX0867ZD2jcNiLBrI9BdpmEkvPyi5YrBGXbamg=
|
||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
||||
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
|
||||
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sys v0.0.0-20210122093101-04d7465088b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||
|
||||
"github.com/PaesslerAG/gval"
|
||||
@@ -31,14 +31,14 @@ type metricAggregator struct {
|
||||
functions []*MetricAggregatorIntervalConfig
|
||||
constants map[string]interface{}
|
||||
language gval.Language
|
||||
output chan lp.CCMessage
|
||||
output chan lp.CCMetric
|
||||
}
|
||||
|
||||
type MetricAggregator interface {
|
||||
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
||||
DeleteAggregation(name string) error
|
||||
Init(output chan lp.CCMessage) error
|
||||
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage)
|
||||
Init(output chan lp.CCMetric) error
|
||||
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric)
|
||||
}
|
||||
|
||||
var metricCacheLanguage = gval.NewLanguage(
|
||||
@@ -74,7 +74,7 @@ var evaluables = struct {
|
||||
mapping: make(map[string]gval.Evaluable),
|
||||
}
|
||||
|
||||
func (c *metricAggregator) Init(output chan lp.CCMessage) error {
|
||||
func (c *metricAggregator) Init(output chan lp.CCMetric) error {
|
||||
c.output = output
|
||||
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
|
||||
c.constants = make(map[string]interface{})
|
||||
@@ -112,7 +112,7 @@ func (c *metricAggregator) Init(output chan lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage) {
|
||||
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric) {
|
||||
vars := make(map[string]interface{})
|
||||
for k, v := range c.constants {
|
||||
vars[k] = v
|
||||
@@ -127,7 +127,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
||||
var valuesInt32 []int32
|
||||
var valuesInt64 []int64
|
||||
var valuesBool []bool
|
||||
matches := make([]lp.CCMessage, 0)
|
||||
matches := make([]lp.CCMetric, 0)
|
||||
for _, m := range metrics {
|
||||
vars["metric"] = m
|
||||
//value, err := gval.Evaluate(f.Condition, vars, c.language)
|
||||
@@ -216,7 +216,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
||||
break
|
||||
}
|
||||
|
||||
copy_tags := func(tags map[string]string, metrics []lp.CCMessage) map[string]string {
|
||||
copy_tags := func(tags map[string]string, metrics []lp.CCMetric) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for key, value := range tags {
|
||||
switch value {
|
||||
@@ -233,7 +233,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
||||
}
|
||||
return out
|
||||
}
|
||||
copy_meta := func(meta map[string]string, metrics []lp.CCMessage) map[string]string {
|
||||
copy_meta := func(meta map[string]string, metrics []lp.CCMetric) map[string]string {
|
||||
out := make(map[string]string)
|
||||
for key, value := range meta {
|
||||
switch value {
|
||||
@@ -253,18 +253,18 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
||||
tags := copy_tags(f.Tags, matches)
|
||||
meta := copy_meta(f.Meta, matches)
|
||||
|
||||
var m lp.CCMessage
|
||||
var m lp.CCMetric
|
||||
switch t := value.(type) {
|
||||
case float64:
|
||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
case float32:
|
||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
case int:
|
||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
case int64:
|
||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
case string:
|
||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||
default:
|
||||
cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name)
|
||||
}
|
||||
@@ -389,7 +389,7 @@ func EvalFloat64Condition(condition string, params map[string]float64) (float64,
|
||||
return value, err
|
||||
}
|
||||
|
||||
func NewAggregator(output chan lp.CCMessage) (MetricAggregator, error) {
|
||||
func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
|
||||
a := new(metricAggregator)
|
||||
err := a.Init(output)
|
||||
if err != nil {
|
||||
|
@@ -1,21 +1,15 @@
|
||||
# CC Metric Router
|
||||
|
||||
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMessages](https://pkg.go.dev/github.com/ClusterCockpit/cc-energy-manager@v0.0.0-20240919152819-92a17f2da4f7/pkg/cc-message.
|
||||
|
||||
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMetrics](../ccMetric/README.md).
|
||||
|
||||
# Configuration
|
||||
|
||||
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) with option `process_messages`.
|
||||
|
||||
```json
|
||||
{
|
||||
"num_cache_intervals" : 1,
|
||||
"interval_timestamp" : true,
|
||||
"hostname_tag" : "hostname",
|
||||
"max_forward" : 50,
|
||||
"process_messages": {
|
||||
"see": "pkg/messageProcessor/README.md"
|
||||
},
|
||||
"add_tags" : [
|
||||
{
|
||||
"key" : "cluster",
|
||||
@@ -69,8 +63,6 @@ The CCMetric router sits in between the collectors and the sinks and can be used
|
||||
|
||||
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
|
||||
|
||||
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) (option `process_messages`) instead of `add_tags`, `delete_tags`, `drop_metrics`, `drop_metrics_if`, `rename_metrics`, `normalize_units` and `change_unit_prefix`. These options are deprecated and will be removed in future versions. Until then, they are added to the message processor.
|
||||
|
||||
# Processing order in the router
|
||||
|
||||
- Add the `hostname_tag` tag (if sent by collectors or cache)
|
||||
@@ -104,8 +96,6 @@ Every time the router receives a metric through any of the channels, it tries to
|
||||
|
||||
# The `rename_metrics` option
|
||||
|
||||
__deprecated__
|
||||
|
||||
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
|
||||
|
||||
```json
|
||||
@@ -117,8 +107,6 @@ In the ClusterCockpit world we specified a set of standard metrics. Since some c
|
||||
|
||||
# Conditional manipulation of tags (`add_tags` and `del_tags`)
|
||||
|
||||
__deprecated__
|
||||
|
||||
Common config format:
|
||||
```json
|
||||
{
|
||||
@@ -130,8 +118,6 @@ Common config format:
|
||||
|
||||
## The `del_tags` option
|
||||
|
||||
__deprecated__
|
||||
|
||||
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
|
||||
|
||||
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
|
||||
@@ -143,8 +129,6 @@ Never delete tags:
|
||||
|
||||
## The `add_tags` option
|
||||
|
||||
__deprecated__
|
||||
|
||||
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
|
||||
|
||||
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
|
||||
@@ -186,8 +170,6 @@ In some cases, you want to drop a metric and don't get it forwarded to the sinks
|
||||
|
||||
## The `drop_metrics` section
|
||||
|
||||
__deprecated__
|
||||
|
||||
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
|
||||
|
||||
```json
|
||||
@@ -203,8 +185,6 @@ The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
|
||||
|
||||
## The `drop_metrics_if` section
|
||||
|
||||
__deprecated__
|
||||
|
||||
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
|
||||
|
||||
```json
|
||||
@@ -220,22 +200,15 @@ The first line is comparable with the example in `drop_metrics`, it drops all me
|
||||
# Manipulating the metric units
|
||||
|
||||
## The `normalize_units` option
|
||||
|
||||
__deprecated__
|
||||
|
||||
|
||||
The cc-metric-collector tries to read the data from the system as it is reported. If available, it tries to read the metric unit from the system as well (e.g. from `/proc/meminfo`). The problem is that, depending on the source, the metric units are named differently. Just think about `byte`, `Byte`, `B`, `bytes`, ...
|
||||
The [cc-units](https://github.com/ClusterCockpit/cc-units) package provides us a normalization option to use the same metric unit name for all metrics. It this option is set to true, all `unit` meta tags are normalized.
|
||||
|
||||
## The `change_unit_prefix` section
|
||||
|
||||
__deprecated__
|
||||
|
||||
It is often the case that metrics are reported by the system using a rather outdated unit prefix (like `/proc/meminfo` still uses kByte despite current memory sizes are in the GByte range). If you want to change the prefix of a unit, you can do that with the help of [cc-units](https://github.com/ClusterCockpit/cc-units). The setting works on the metric name and requires the new prefix for the metric. The cc-units package determines the scaling factor.
|
||||
|
||||
# Aggregate metric values of the current interval with the `interval_aggregates` option
|
||||
|
||||
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0 and is **experimental**
|
||||
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
|
||||
|
||||
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
|
||||
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
|
||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||
)
|
||||
|
||||
@@ -16,7 +16,7 @@ type metricCachePeriod struct {
|
||||
stopstamp time.Time
|
||||
numMetrics int
|
||||
sizeMetrics int
|
||||
metrics []lp.CCMessage
|
||||
metrics []lp.CCMetric
|
||||
}
|
||||
|
||||
// Metric cache data structure
|
||||
@@ -29,21 +29,21 @@ type metricCache struct {
|
||||
ticker mct.MultiChanTicker
|
||||
tickchan chan time.Time
|
||||
done chan bool
|
||||
output chan lp.CCMessage
|
||||
output chan lp.CCMetric
|
||||
aggEngine agg.MetricAggregator
|
||||
}
|
||||
|
||||
type MetricCache interface {
|
||||
Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
|
||||
Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
|
||||
Start()
|
||||
Add(metric lp.CCMessage)
|
||||
GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage)
|
||||
Add(metric lp.CCMetric)
|
||||
GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric)
|
||||
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
||||
DeleteAggregation(name string) error
|
||||
Close()
|
||||
}
|
||||
|
||||
func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
|
||||
func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
|
||||
var err error = nil
|
||||
c.done = make(chan bool)
|
||||
c.wg = wg
|
||||
@@ -55,7 +55,7 @@ func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker,
|
||||
p := new(metricCachePeriod)
|
||||
p.numMetrics = 0
|
||||
p.sizeMetrics = 0
|
||||
p.metrics = make([]lp.CCMessage, 0)
|
||||
p.metrics = make([]lp.CCMetric, 0)
|
||||
c.intervals = append(c.intervals, p)
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (c *metricCache) Start() {
|
||||
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
|
||||
// The intervals list is used as round-robin buffer and the metric list grows dynamically and
|
||||
// to avoid reallocations
|
||||
func (c *metricCache) Add(metric lp.CCMessage) {
|
||||
func (c *metricCache) Add(metric lp.CCMetric) {
|
||||
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
|
||||
c.lock.Lock()
|
||||
p := c.intervals[c.curPeriod]
|
||||
@@ -153,10 +153,10 @@ func (c *metricCache) DeleteAggregation(name string) error {
|
||||
// Get all metrics of a interval. The index is the difference to the current interval, so index=0
|
||||
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
|
||||
// is given (negative index, index larger than configured number of total intervals, ...)
|
||||
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage) {
|
||||
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
|
||||
var start time.Time = time.Now()
|
||||
var stop time.Time = time.Now()
|
||||
var metrics []lp.CCMessage
|
||||
var metrics []lp.CCMetric
|
||||
if index >= 0 && index < c.numPeriods {
|
||||
pindex := c.curPeriod - index
|
||||
if pindex < 0 {
|
||||
@@ -168,10 +168,10 @@ func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage
|
||||
metrics = c.intervals[pindex].metrics
|
||||
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
|
||||
} else {
|
||||
metrics = make([]lp.CCMessage, 0)
|
||||
metrics = make([]lp.CCMetric, 0)
|
||||
}
|
||||
} else {
|
||||
metrics = make([]lp.CCMessage, 0)
|
||||
metrics = make([]lp.CCMetric, 0)
|
||||
}
|
||||
return start, stop, metrics
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func (c *metricCache) Close() {
|
||||
c.done <- true
|
||||
}
|
||||
|
||||
func NewCache(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
|
||||
func NewCache(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
|
||||
c := new(metricCache)
|
||||
err := c.Init(output, ticker, wg, numPeriods)
|
||||
if err != nil {
|
||||
|
@@ -2,7 +2,6 @@ package metricRouter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -10,10 +9,10 @@ import (
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||
units "github.com/ClusterCockpit/cc-units"
|
||||
)
|
||||
|
||||
const ROUTER_MAX_FORWARD = 50
|
||||
@@ -39,17 +38,16 @@ type metricRouterConfig struct {
|
||||
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
|
||||
NormalizeUnits bool `json:"normalize_units"` // Check unit meta flag and normalize it using cc-units
|
||||
ChangeUnitPrefix map[string]string `json:"change_unit_prefix"` // Add prefix that should be applied to the metrics
|
||||
// dropMetrics map[string]bool // Internal map for O(1) lookup
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
dropMetrics map[string]bool // Internal map for O(1) lookup
|
||||
}
|
||||
|
||||
// Metric router data structure
|
||||
type metricRouter struct {
|
||||
hostname string // Hostname used in tags
|
||||
coll_input chan lp.CCMessage // Input channel from CollectorManager
|
||||
recv_input chan lp.CCMessage // Input channel from ReceiveManager
|
||||
cache_input chan lp.CCMessage // Input channel from MetricCache
|
||||
outputs []chan lp.CCMessage // List of all output channels
|
||||
coll_input chan lp.CCMetric // Input channel from CollectorManager
|
||||
recv_input chan lp.CCMetric // Input channel from ReceiveManager
|
||||
cache_input chan lp.CCMetric // Input channel from MetricCache
|
||||
outputs []chan lp.CCMetric // List of all output channels
|
||||
done chan bool // channel to finish / stop metric router
|
||||
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
|
||||
timestamp time.Time // timestamp periodically updated by ticker each interval
|
||||
@@ -58,15 +56,14 @@ type metricRouter struct {
|
||||
cache MetricCache // pointer to MetricCache
|
||||
cachewg sync.WaitGroup // wait group for MetricCache
|
||||
maxForward int // number of metrics to forward maximally in one iteration
|
||||
mp mp.MessageProcessor
|
||||
}
|
||||
|
||||
// MetricRouter access functions
|
||||
type MetricRouter interface {
|
||||
Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error
|
||||
AddCollectorInput(input chan lp.CCMessage)
|
||||
AddReceiverInput(input chan lp.CCMessage)
|
||||
AddOutput(output chan lp.CCMessage)
|
||||
AddCollectorInput(input chan lp.CCMetric)
|
||||
AddReceiverInput(input chan lp.CCMetric)
|
||||
AddOutput(output chan lp.CCMetric)
|
||||
Start()
|
||||
Close()
|
||||
}
|
||||
@@ -78,9 +75,9 @@ type MetricRouter interface {
|
||||
// * ticker (from variable ticker)
|
||||
// * configuration (read from config file in variable routerConfigFile)
|
||||
func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error {
|
||||
r.outputs = make([]chan lp.CCMessage, 0)
|
||||
r.outputs = make([]chan lp.CCMetric, 0)
|
||||
r.done = make(chan bool)
|
||||
r.cache_input = make(chan lp.CCMessage)
|
||||
r.cache_input = make(chan lp.CCMetric)
|
||||
r.wg = wg
|
||||
r.ticker = ticker
|
||||
r.config.MaxForward = ROUTER_MAX_FORWARD
|
||||
@@ -122,56 +119,14 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
|
||||
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
|
||||
}
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = p
|
||||
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
r.config.dropMetrics = make(map[string]bool)
|
||||
for _, mname := range r.config.DropMetrics {
|
||||
r.mp.AddDropMessagesByName(mname)
|
||||
r.config.dropMetrics[mname] = true
|
||||
}
|
||||
for _, cond := range r.config.DropMetricsIf {
|
||||
r.mp.AddDropMessagesByCondition(cond)
|
||||
}
|
||||
for _, data := range r.config.AddTags {
|
||||
cond := data.Condition
|
||||
if cond == "*" {
|
||||
cond = "true"
|
||||
}
|
||||
r.mp.AddAddTagsByCondition(cond, data.Key, data.Value)
|
||||
}
|
||||
for _, data := range r.config.DelTags {
|
||||
cond := data.Condition
|
||||
if cond == "*" {
|
||||
cond = "true"
|
||||
}
|
||||
r.mp.AddDeleteTagsByCondition(cond, data.Key, data.Value)
|
||||
}
|
||||
for oldname, newname := range r.config.RenameMetrics {
|
||||
r.mp.AddRenameMetricByName(oldname, newname)
|
||||
}
|
||||
for metricName, prefix := range r.config.ChangeUnitPrefix {
|
||||
r.mp.AddChangeUnitPrefix(fmt.Sprintf("name == '%s'", metricName), prefix)
|
||||
}
|
||||
r.mp.SetNormalizeUnits(r.config.NormalizeUnits)
|
||||
|
||||
r.mp.AddAddTagsByCondition("true", r.config.HostnameTagName, r.hostname)
|
||||
|
||||
// r.config.dropMetrics = make(map[string]bool)
|
||||
// for _, mname := range r.config.DropMetrics {
|
||||
// r.config.dropMetrics[mname] = true
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func getParamMap(point lp.CCMessage) map[string]interface{} {
|
||||
func getParamMap(point lp.CCMetric) map[string]interface{} {
|
||||
params := make(map[string]interface{})
|
||||
params["metric"] = point
|
||||
params["name"] = point.Name()
|
||||
@@ -189,7 +144,7 @@ func getParamMap(point lp.CCMessage) map[string]interface{} {
|
||||
}
|
||||
|
||||
// DoAddTags adds a tag when condition is fullfiled
|
||||
func (r *metricRouter) DoAddTags(point lp.CCMessage) {
|
||||
func (r *metricRouter) DoAddTags(point lp.CCMetric) {
|
||||
var conditionMatches bool
|
||||
for _, m := range r.config.AddTags {
|
||||
if m.Condition == "*" {
|
||||
@@ -211,81 +166,81 @@ func (r *metricRouter) DoAddTags(point lp.CCMessage) {
|
||||
}
|
||||
|
||||
// DoDelTags removes a tag when condition is fullfiled
|
||||
// func (r *metricRouter) DoDelTags(point lp.CCMessage) {
|
||||
// var conditionMatches bool
|
||||
// for _, m := range r.config.DelTags {
|
||||
// if m.Condition == "*" {
|
||||
// // Condition is always matched
|
||||
// conditionMatches = true
|
||||
// } else {
|
||||
// // Evaluate condition
|
||||
// var err error
|
||||
// conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
|
||||
// if err != nil {
|
||||
// cclog.ComponentError("MetricRouter", err.Error())
|
||||
// conditionMatches = false
|
||||
// }
|
||||
// }
|
||||
// if conditionMatches {
|
||||
// point.RemoveTag(m.Key)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
func (r *metricRouter) DoDelTags(point lp.CCMetric) {
|
||||
var conditionMatches bool
|
||||
for _, m := range r.config.DelTags {
|
||||
if m.Condition == "*" {
|
||||
// Condition is always matched
|
||||
conditionMatches = true
|
||||
} else {
|
||||
// Evaluate condition
|
||||
var err error
|
||||
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", err.Error())
|
||||
conditionMatches = false
|
||||
}
|
||||
}
|
||||
if conditionMatches {
|
||||
point.RemoveTag(m.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Conditional test whether a metric should be dropped
|
||||
// func (r *metricRouter) dropMetric(point lp.CCMessage) bool {
|
||||
// // Simple drop check
|
||||
// if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
|
||||
// return conditionMatches
|
||||
// }
|
||||
func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
|
||||
// Simple drop check
|
||||
if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
|
||||
return conditionMatches
|
||||
}
|
||||
|
||||
// // Checking the dropping conditions
|
||||
// for _, m := range r.config.DropMetricsIf {
|
||||
// conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
|
||||
// if err != nil {
|
||||
// cclog.ComponentError("MetricRouter", err.Error())
|
||||
// conditionMatches = false
|
||||
// }
|
||||
// if conditionMatches {
|
||||
// return conditionMatches
|
||||
// }
|
||||
// }
|
||||
// Checking the dropping conditions
|
||||
for _, m := range r.config.DropMetricsIf {
|
||||
conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", err.Error())
|
||||
conditionMatches = false
|
||||
}
|
||||
if conditionMatches {
|
||||
return conditionMatches
|
||||
}
|
||||
}
|
||||
|
||||
// // No dropping condition met
|
||||
// return false
|
||||
// }
|
||||
// No dropping condition met
|
||||
return false
|
||||
}
|
||||
|
||||
// func (r *metricRouter) prepareUnit(point lp.CCMessage) bool {
|
||||
// if r.config.NormalizeUnits {
|
||||
// if in_unit, ok := point.GetMeta("unit"); ok {
|
||||
// u := units.NewUnit(in_unit)
|
||||
// if u.Valid() {
|
||||
// point.AddMeta("unit", u.Short())
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
|
||||
func (r *metricRouter) prepareUnit(point lp.CCMetric) bool {
|
||||
if r.config.NormalizeUnits {
|
||||
if in_unit, ok := point.GetMeta("unit"); ok {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
point.AddMeta("unit", u.Short())
|
||||
}
|
||||
}
|
||||
}
|
||||
if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
|
||||
|
||||
// newPrefix := units.NewPrefix(newP)
|
||||
newPrefix := units.NewPrefix(newP)
|
||||
|
||||
// if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
|
||||
// u := units.NewUnit(in_unit)
|
||||
// if u.Valid() {
|
||||
// cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
|
||||
// conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
||||
// if conv != nil && out_unit.Valid() {
|
||||
// if val, ok := point.GetField("value"); ok {
|
||||
// point.AddField("value", conv(val))
|
||||
// point.AddMeta("unit", out_unit.Short())
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
|
||||
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
||||
if conv != nil && out_unit.Valid() {
|
||||
if val, ok := point.GetField("value"); ok {
|
||||
point.AddField("value", conv(val))
|
||||
point.AddMeta("unit", out_unit.Short())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
// return true
|
||||
// }
|
||||
return true
|
||||
}
|
||||
|
||||
// Start starts the metric router
|
||||
func (r *metricRouter) Start() {
|
||||
@@ -304,75 +259,59 @@ func (r *metricRouter) Start() {
|
||||
|
||||
// Forward takes a received metric, adds or deletes tags
|
||||
// and forwards it to the output channels
|
||||
// forward := func(point lp.CCMessage) {
|
||||
// cclog.ComponentDebug("MetricRouter", "FORWARD", point)
|
||||
// r.DoAddTags(point)
|
||||
// r.DoDelTags(point)
|
||||
// name := point.Name()
|
||||
// if new, ok := r.config.RenameMetrics[name]; ok {
|
||||
// point.SetName(new)
|
||||
// point.AddMeta("oldname", name)
|
||||
// r.DoAddTags(point)
|
||||
// r.DoDelTags(point)
|
||||
// }
|
||||
forward := func(point lp.CCMetric) {
|
||||
cclog.ComponentDebug("MetricRouter", "FORWARD", point)
|
||||
r.DoAddTags(point)
|
||||
r.DoDelTags(point)
|
||||
name := point.Name()
|
||||
if new, ok := r.config.RenameMetrics[name]; ok {
|
||||
point.SetName(new)
|
||||
point.AddMeta("oldname", name)
|
||||
r.DoAddTags(point)
|
||||
r.DoDelTags(point)
|
||||
}
|
||||
|
||||
// r.prepareUnit(point)
|
||||
r.prepareUnit(point)
|
||||
|
||||
// for _, o := range r.outputs {
|
||||
// o <- point
|
||||
// }
|
||||
// }
|
||||
for _, o := range r.outputs {
|
||||
o <- point
|
||||
}
|
||||
}
|
||||
|
||||
// Foward message received from collector channel
|
||||
coll_forward := func(p lp.CCMessage) {
|
||||
coll_forward := func(p lp.CCMetric) {
|
||||
// receive from metric collector
|
||||
//p.AddTag(r.config.HostnameTagName, r.hostname)
|
||||
p.AddTag(r.config.HostnameTagName, r.hostname)
|
||||
if r.config.IntervalStamp {
|
||||
p.SetTime(r.timestamp)
|
||||
}
|
||||
m, err := r.mp.ProcessMessage(p)
|
||||
if err == nil && m != nil {
|
||||
for _, o := range r.outputs {
|
||||
o <- m
|
||||
}
|
||||
if !r.dropMetric(p) {
|
||||
forward(p)
|
||||
}
|
||||
// if !r.dropMetric(p) {
|
||||
// for _, o := range r.outputs {
|
||||
// o <- point
|
||||
// }
|
||||
// }
|
||||
// even if the metric is dropped, it is stored in the cache for
|
||||
// aggregations
|
||||
if r.config.NumCacheIntervals > 0 {
|
||||
r.cache.Add(m)
|
||||
r.cache.Add(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Forward message received from receivers channel
|
||||
recv_forward := func(p lp.CCMessage) {
|
||||
recv_forward := func(p lp.CCMetric) {
|
||||
// receive from receive manager
|
||||
if r.config.IntervalStamp {
|
||||
p.SetTime(r.timestamp)
|
||||
}
|
||||
m, err := r.mp.ProcessMessage(p)
|
||||
if err == nil && m != nil {
|
||||
for _, o := range r.outputs {
|
||||
o <- m
|
||||
}
|
||||
if !r.dropMetric(p) {
|
||||
forward(p)
|
||||
}
|
||||
// if !r.dropMetric(p) {
|
||||
// forward(p)
|
||||
// }
|
||||
}
|
||||
|
||||
// Forward message received from cache channel
|
||||
cache_forward := func(p lp.CCMessage) {
|
||||
cache_forward := func(p lp.CCMetric) {
|
||||
// receive from metric collector
|
||||
m, err := r.mp.ProcessMessage(p)
|
||||
if err == nil && m != nil {
|
||||
for _, o := range r.outputs {
|
||||
o <- m
|
||||
}
|
||||
if !r.dropMetric(p) {
|
||||
p.AddTag(r.config.HostnameTagName, r.hostname)
|
||||
forward(p)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,17 +358,17 @@ func (r *metricRouter) Start() {
|
||||
}
|
||||
|
||||
// AddCollectorInput adds a channel between metric collector and metric router
|
||||
func (r *metricRouter) AddCollectorInput(input chan lp.CCMessage) {
|
||||
func (r *metricRouter) AddCollectorInput(input chan lp.CCMetric) {
|
||||
r.coll_input = input
|
||||
}
|
||||
|
||||
// AddReceiverInput adds a channel between metric receiver and metric router
|
||||
func (r *metricRouter) AddReceiverInput(input chan lp.CCMessage) {
|
||||
func (r *metricRouter) AddReceiverInput(input chan lp.CCMetric) {
|
||||
r.recv_input = input
|
||||
}
|
||||
|
||||
// AddOutput adds a output channel to the metric router
|
||||
func (r *metricRouter) AddOutput(output chan lp.CCMessage) {
|
||||
func (r *metricRouter) AddOutput(output chan lp.CCMetric) {
|
||||
r.outputs = append(r.outputs, output)
|
||||
}
|
||||
|
||||
|
@@ -296,25 +296,6 @@ func GetTypeList(topology_type string) []int {
|
||||
return []int{}
|
||||
}
|
||||
|
||||
func GetTypeId(hwt HwthreadEntry, topology_type string) (int, error) {
|
||||
var err error = nil
|
||||
switch topology_type {
|
||||
case "node":
|
||||
return 0, err
|
||||
case "socket":
|
||||
return hwt.Socket, err
|
||||
case "die":
|
||||
return hwt.Die, err
|
||||
case "memoryDomain":
|
||||
return hwt.NumaDomain, err
|
||||
case "core":
|
||||
return hwt.Core, err
|
||||
case "hwthread":
|
||||
return hwt.CpuID, err
|
||||
}
|
||||
return -1, fmt.Errorf("unknown topology type '%s'", topology_type)
|
||||
}
|
||||
|
||||
// CpuData returns CPU data for each hardware thread
|
||||
func CpuData() []HwthreadEntry {
|
||||
// return a deep copy to protect cache data
|
||||
@@ -442,22 +423,3 @@ func GetCoreHwthreads(core int) []int {
|
||||
}
|
||||
return cpuList
|
||||
}
|
||||
|
||||
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
|
||||
func GetTypeHwthreads(topology_type string, id int) []int {
|
||||
switch topology_type {
|
||||
case "node":
|
||||
return HwthreadList()
|
||||
case "socket":
|
||||
return GetSocketHwthreads(id)
|
||||
case "die":
|
||||
return GetDieHwthreads(id)
|
||||
case "memoryDomain":
|
||||
return GetNumaDomainHwthreads(id)
|
||||
case "core":
|
||||
return GetCoreHwthreads(id)
|
||||
case "hwthread":
|
||||
return []int{id}
|
||||
}
|
||||
return []int{}
|
||||
}
|
||||
|
@@ -1,266 +0,0 @@
|
||||
# Message Processor Component
|
||||
|
||||
Multiple parts of in the ClusterCockit ecosystem require the processing of CCMessages.
|
||||
The main CC application using it is `cc-metric-collector`. The processing part there was originally in the metric router, the central
|
||||
hub connecting collectors (reading local data), receivers (receiving remote data) and sinks (sending data). Already in early stages, the
|
||||
lack of flexibility caused some trouble:
|
||||
|
||||
> The sysadmins wanted to keep operating their Ganglia based monitoring infrastructure while we developed the CC stack. Ganglia wants the core metrics with
|
||||
> a specific name and resolution (right unit prefix) but there was no conversion of the data in the CC stack, so CC frontend developers wanted a different
|
||||
> resolution for some metrics. The issue was basically the `mem_used` metric showing the currently used memory of the node. Ganglia wants it in `kByte` as provided
|
||||
> by the Linux operating system but CC wanted it in `GByte`.
|
||||
|
||||
With the message processor, the Ganglia sinks can apply the unit prefix changes individually and name the metrics as required by Ganglia.
|
||||
|
||||
## For developers
|
||||
|
||||
Whenever you receive or are about to send a message out, you should provide some processing.
|
||||
|
||||
### Configuration of component
|
||||
|
||||
New operations can be added to the message processor at runtime. Of course, they can also be removed again. For the initial setup, having a configuration file
|
||||
or some fields in a configuration file for the processing.
|
||||
|
||||
The message processor uses the following configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"drop_messages": [
|
||||
"name_of_message_to_drop"
|
||||
],
|
||||
"drop_messages_if": [
|
||||
"condition_when_to_drop_message",
|
||||
"name == 'drop_this'",
|
||||
"tag.hostname == 'this_host'",
|
||||
"meta.unit != 'MB'"
|
||||
],
|
||||
"rename_messages" : {
|
||||
"old_message_name" : "new_message_name"
|
||||
},
|
||||
"rename_messages_if": {
|
||||
"condition_when_to_rename_message" : "new_name"
|
||||
},
|
||||
"add_tags_if": [
|
||||
{
|
||||
"if" : "condition_when_to_add_tag",
|
||||
"key": "name_for_new_tag",
|
||||
"value": "new_tag_value"
|
||||
}
|
||||
],
|
||||
"delete_tags_if": [
|
||||
{
|
||||
"if" : "condition_when_to_delete_tag",
|
||||
"key": "name_of_tag"
|
||||
}
|
||||
],
|
||||
"add_meta_if": [
|
||||
{
|
||||
"if" : "condition_when_to_add_meta_info",
|
||||
"key": "name_for_new_meta_info",
|
||||
"value": "new_meta_info_value"
|
||||
}
|
||||
],
|
||||
"delete_meta_if": [
|
||||
{
|
||||
"if" : "condition_when_to_delete_meta_info",
|
||||
"key": "name_of_meta_info"
|
||||
}
|
||||
],
|
||||
"add_field_if": [
|
||||
{
|
||||
"if" : "condition_when_to_add_field",
|
||||
"key": "name_for_new_field",
|
||||
"value": "new_field_value_but_only_string_at_the_moment"
|
||||
}
|
||||
],
|
||||
"delete_field_if": [
|
||||
{
|
||||
"if" : "condition_when_to_delete_field",
|
||||
"key": "name_of_field"
|
||||
}
|
||||
],
|
||||
"move_tag_to_meta_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_tag_to_meta_info_including_its_value",
|
||||
"key": "name_of_tag",
|
||||
"value": "name_of_meta_info"
|
||||
}
|
||||
],
|
||||
"move_tag_to_field_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_tag_to_fields_including_its_value",
|
||||
"key": "name_of_tag",
|
||||
"value": "name_of_field"
|
||||
}
|
||||
],
|
||||
"move_meta_to_tag_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_meta_info_to_tags_including_its_value",
|
||||
"key": "name_of_meta_info",
|
||||
"value": "name_of_tag"
|
||||
}
|
||||
],
|
||||
"move_meta_to_field_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_meta_info_to_fields_including_its_value",
|
||||
"key": "name_of_tag",
|
||||
"value": "name_of_meta_info"
|
||||
}
|
||||
],
|
||||
"move_field_to_tag_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_field_to_tags_including_its_stringified_value",
|
||||
"key": "name_of_field",
|
||||
"value": "name_of_tag"
|
||||
}
|
||||
],
|
||||
"move_field_to_meta_if": [
|
||||
{
|
||||
"if" : "condition_when_to_move_field_to_meta_info_including_its_stringified_value",
|
||||
"key": "name_of_field",
|
||||
"value": "name_of_meta_info"
|
||||
}
|
||||
],
|
||||
"drop_by_message_type": [
|
||||
"metric",
|
||||
"event",
|
||||
"log",
|
||||
"control"
|
||||
],
|
||||
"change_unit_prefix": {
|
||||
"name == 'metric_with_wrong_unit_prefix'" : "G",
|
||||
"only_if_messagetype == 'metric'": "T"
|
||||
},
|
||||
"normalize_units": true,
|
||||
"add_base_env": {
|
||||
"MY_CONSTANT_FOR_CUSTOM_CONDITIONS": 1.0,
|
||||
"output_value_for_test_metrics": 42.0,
|
||||
},
|
||||
"stage_order": [
|
||||
"rename_messages_if",
|
||||
"drop_messages"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The options `change_unit_prefix` and `normalize_units` are only applied to CCMetrics. It is not possible to delete the field related to each message type as defined in [cc-specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/interfaces/lineprotocol). In short:
|
||||
- CCMetrics always have to have a field named `value`
|
||||
- CCEvents always have to have a field named `event`
|
||||
- CCLogs always have to have a field named `log`
|
||||
- CCControl messages always have to have a field named `control`
|
||||
|
||||
With `add_base_env`, one can specifiy mykey=myvalue pairs that can be used in conditions like `tag.type == mykey`.
|
||||
|
||||
The order in which each message is processed, can be specified with the `stage_order` option. The stage names are the keys in the JSON configuration, thus `change_unit_prefix`, `move_field_to_meta_if`, etc. Stages can be listed multiple times.
|
||||
|
||||
### Using the component
|
||||
In order to load the configuration from a `json.RawMessage`:
|
||||
```golang
|
||||
mp, err := NewMessageProcessor()
|
||||
if err != nil {
|
||||
log.Error("failed to create new message processor")
|
||||
}
|
||||
mp.FromConfigJSON(configJson)
|
||||
```
|
||||
|
||||
After initialization and adding the different operations, the `ProcessMessage()` function applies all operations and returns whether the message should be dropped.
|
||||
|
||||
```golang
|
||||
m := lp.CCMetric{}
|
||||
|
||||
x, err := mp.ProcessMessage(m)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
if x != nil {
|
||||
// process x further
|
||||
} else {
|
||||
// this message got dropped
|
||||
}
|
||||
```
|
||||
|
||||
Single operations can be added and removed at runtime
|
||||
```golang
|
||||
type MessageProcessor interface {
|
||||
// Functions to set the execution order of the processing stages
|
||||
SetStages([]string) error
|
||||
DefaultStages() []string
|
||||
// Function to add variables to the base evaluation environment
|
||||
AddBaseEnv(env map[string]interface{}) error
|
||||
// Functions to add and remove rules
|
||||
AddDropMessagesByName(name string) error
|
||||
RemoveDropMessagesByName(name string)
|
||||
AddDropMessagesByCondition(condition string) error
|
||||
RemoveDropMessagesByCondition(condition string)
|
||||
AddRenameMetricByCondition(condition string, name string) error
|
||||
RemoveRenameMetricByCondition(condition string)
|
||||
AddRenameMetricByName(from, to string) error
|
||||
RemoveRenameMetricByName(from string)
|
||||
SetNormalizeUnits(settings bool)
|
||||
AddChangeUnitPrefix(condition string, prefix string) error
|
||||
RemoveChangeUnitPrefix(condition string)
|
||||
AddAddTagsByCondition(condition, key, value string) error
|
||||
RemoveAddTagsByCondition(condition string)
|
||||
AddDeleteTagsByCondition(condition, key, value string) error
|
||||
RemoveDeleteTagsByCondition(condition string)
|
||||
AddAddMetaByCondition(condition, key, value string) error
|
||||
RemoveAddMetaByCondition(condition string)
|
||||
AddDeleteMetaByCondition(condition, key, value string) error
|
||||
RemoveDeleteMetaByCondition(condition string)
|
||||
AddMoveTagToMeta(condition, key, value string) error
|
||||
RemoveMoveTagToMeta(condition string)
|
||||
AddMoveTagToFields(condition, key, value string) error
|
||||
RemoveMoveTagToFields(condition string)
|
||||
AddMoveMetaToTags(condition, key, value string) error
|
||||
RemoveMoveMetaToTags(condition string)
|
||||
AddMoveMetaToFields(condition, key, value string) error
|
||||
RemoveMoveMetaToFields(condition string)
|
||||
AddMoveFieldToTags(condition, key, value string) error
|
||||
RemoveMoveFieldToTags(condition string)
|
||||
AddMoveFieldToMeta(condition, key, value string) error
|
||||
RemoveMoveFieldToMeta(condition string)
|
||||
// Read in a JSON configuration
|
||||
FromConfigJSON(config json.RawMessage) error
|
||||
ProcessMessage(m lp2.CCMessage) (lp2.CCMessage, error)
|
||||
// Processing functions for legacy CCMetric and current CCMessage
|
||||
ProcessMetric(m lp.CCMetric) (lp2.CCMessage, error)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Syntax for evaluatable terms
|
||||
|
||||
The message processor uses `gval` for evaluating the terms. It provides a basic set of operators like string comparison and arithmetic operations.
|
||||
|
||||
Accessible for operations are
|
||||
- `name` of the message
|
||||
- `timestamp` or `time` of the message
|
||||
- `type`, `type-id` of the message (also `tag_type`, `tag_type-id` and `tag_typeid`)
|
||||
- `stype`, `stype-id` of the message (if message has theses tags, also `tag_stype`, `tag_stype-id` and `tag_stypeid`)
|
||||
- `value` for a CCMetric message (also `field_value`)
|
||||
- `event` for a CCEvent message (also `field_event`)
|
||||
- `control` for a CCControl message (also `field_control`)
|
||||
- `log` for a CCLog message (also `field_log`)
|
||||
- `messagetype` or `msgtype`. Possible values `event`, `metric`, `log` and `control`.
|
||||
|
||||
Generally, all tags are accessible with `tag_<tagkey>`, `tags_<tagkey>` or `tags.<tagkey>`. Similarly for all fields with `field[s]?[_.]<fieldkey>`. For meta information `meta[_.]<metakey>` (there is no `metas[_.]<metakey>`).
|
||||
|
||||
The [syntax of `expr`](https://expr-lang.org/docs/language-definition) is accepted with some additions:
|
||||
- Comparing strings: `==`, `!=`, `str matches regex` (use `%` instead of `\`!)
|
||||
- Combining conditions: `&&`, `||`
|
||||
- Comparing numbers: `==`, `!=`, `<`, `>`, `<=`, `>=`
|
||||
- Test lists: `<value> in <list>`
|
||||
- Topological tests: `tag_type-id in getCpuListOfType("socket", "1")` (test if the metric belongs to socket 1 in local node topology)
|
||||
|
||||
Often the operations are written in JSON files for loading them at startup. In JSON, some characters are not allowed. Therefore, the term syntax reflects that:
|
||||
- use `''` instead of `""` for strings
|
||||
- for the regexes, use `%` instead of `\`
|
||||
|
||||
|
||||
For operations that should be applied on all messages, use the condition `true`.
|
||||
|
||||
### Overhead
|
||||
|
||||
The operations taking conditions are pre-processed, which is commonly the time consuming part but, of course, with each added operation, the time to process a message
|
||||
increases. Moreover, the processing creates a copy of the message.
|
||||
|
@@ -1,988 +0,0 @@
|
||||
package messageprocessor
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lplegacy "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
|
||||
"github.com/expr-lang/expr"
|
||||
"github.com/expr-lang/expr/vm"
|
||||
)
|
||||
|
||||
// Message processor add/delete tag/meta configuration
|
||||
type messageProcessorTagConfig struct {
|
||||
Key string `json:"key"` // Tag name
|
||||
Value string `json:"value,omitempty"` // Tag value
|
||||
Condition string `json:"if"` // Condition for adding or removing corresponding tag
|
||||
}
|
||||
|
||||
type messageProcessorConfig struct {
|
||||
StageOrder []string `json:"stage_order,omitempty"` // List of stages to execute them in the specified order and to skip unrequired ones
|
||||
DropMessages []string `json:"drop_messages,omitempty"` // List of metric names to drop. For fine-grained dropping use drop_messages_if
|
||||
DropMessagesIf []string `json:"drop_messages_if,omitempty"` // List of evaluatable terms to drop messages
|
||||
RenameMessages map[string]string `json:"rename_messages,omitempty"` // Map of metric names to rename
|
||||
RenameMessagesIf map[string]string `json:"rename_messages_if,omitempty"` // Map to rename metric name based on a condition
|
||||
NormalizeUnits bool `json:"normalize_units,omitempty"` // Check unit meta flag and normalize it using cc-units
|
||||
ChangeUnitPrefix map[string]string `json:"change_unit_prefix,omitempty"` // Add prefix that should be applied to the messages
|
||||
AddTagsIf []messageProcessorTagConfig `json:"add_tags_if,omitempty"` // List of tags that are added when the condition is met
|
||||
DelTagsIf []messageProcessorTagConfig `json:"delete_tags_if,omitempty"` // List of tags that are removed when the condition is met
|
||||
AddMetaIf []messageProcessorTagConfig `json:"add_meta_if,omitempty"` // List of meta infos that are added when the condition is met
|
||||
DelMetaIf []messageProcessorTagConfig `json:"delete_meta_if,omitempty"` // List of meta infos that are removed when the condition is met
|
||||
AddFieldIf []messageProcessorTagConfig `json:"add_field_if,omitempty"` // List of fields that are added when the condition is met
|
||||
DelFieldIf []messageProcessorTagConfig `json:"delete_field_if,omitempty"` // List of fields that are removed when the condition is met
|
||||
DropByType []string `json:"drop_by_message_type,omitempty"` // List of message types that should be dropped
|
||||
MoveTagToMeta []messageProcessorTagConfig `json:"move_tag_to_meta_if,omitempty"`
|
||||
MoveTagToField []messageProcessorTagConfig `json:"move_tag_to_field_if,omitempty"`
|
||||
MoveMetaToTag []messageProcessorTagConfig `json:"move_meta_to_tag_if,omitempty"`
|
||||
MoveMetaToField []messageProcessorTagConfig `json:"move_meta_to_field_if,omitempty"`
|
||||
MoveFieldToTag []messageProcessorTagConfig `json:"move_field_to_tag_if,omitempty"`
|
||||
MoveFieldToMeta []messageProcessorTagConfig `json:"move_field_to_meta_if,omitempty"`
|
||||
AddBaseEnv map[string]interface{} `json:"add_base_env,omitempty"`
|
||||
}
|
||||
|
||||
type messageProcessor struct {
|
||||
|
||||
// For thread-safety
|
||||
mutex sync.RWMutex
|
||||
|
||||
// mapping contains all evalables as strings to gval.Evaluable
|
||||
// because it is not possible to get the original string out of
|
||||
// a gval.Evaluable
|
||||
mapping map[string]*vm.Program
|
||||
|
||||
stages []string // order of stage execution
|
||||
dropMessages map[string]struct{} // internal lookup map
|
||||
dropTypes map[string]struct{} // internal lookup map
|
||||
dropMessagesIf map[*vm.Program]struct{} // pre-processed dropMessagesIf
|
||||
renameMessages map[string]string // internal lookup map
|
||||
renameMessagesIf map[*vm.Program]string // pre-processed RenameMessagesIf
|
||||
changeUnitPrefix map[*vm.Program]string // pre-processed ChangeUnitPrefix
|
||||
normalizeUnits bool
|
||||
addTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddTagsIf
|
||||
deleteTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelTagsIf
|
||||
addMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddMetaIf
|
||||
deleteMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelMetaIf
|
||||
addFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddFieldIf
|
||||
deleteFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelFieldIf
|
||||
moveTagToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToMeta
|
||||
moveTagToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToField
|
||||
moveMetaToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToTag
|
||||
moveMetaToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToField
|
||||
moveFieldToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToTag
|
||||
moveFieldToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToMeta
|
||||
}
|
||||
|
||||
type MessageProcessor interface {
|
||||
// Functions to set the execution order of the processing stages
|
||||
SetStages([]string) error
|
||||
DefaultStages() []string
|
||||
// Function to add variables to the base evaluation environment
|
||||
AddBaseEnv(env map[string]interface{}) error
|
||||
// Functions to add and remove rules
|
||||
AddDropMessagesByName(name string) error
|
||||
RemoveDropMessagesByName(name string)
|
||||
AddDropMessagesByCondition(condition string) error
|
||||
RemoveDropMessagesByCondition(condition string)
|
||||
AddRenameMetricByCondition(condition string, name string) error
|
||||
RemoveRenameMetricByCondition(condition string)
|
||||
AddRenameMetricByName(from, to string) error
|
||||
RemoveRenameMetricByName(from string)
|
||||
SetNormalizeUnits(settings bool)
|
||||
AddChangeUnitPrefix(condition string, prefix string) error
|
||||
RemoveChangeUnitPrefix(condition string)
|
||||
AddAddTagsByCondition(condition, key, value string) error
|
||||
RemoveAddTagsByCondition(condition string)
|
||||
AddDeleteTagsByCondition(condition, key, value string) error
|
||||
RemoveDeleteTagsByCondition(condition string)
|
||||
AddAddMetaByCondition(condition, key, value string) error
|
||||
RemoveAddMetaByCondition(condition string)
|
||||
AddDeleteMetaByCondition(condition, key, value string) error
|
||||
RemoveDeleteMetaByCondition(condition string)
|
||||
AddMoveTagToMeta(condition, key, value string) error
|
||||
RemoveMoveTagToMeta(condition string)
|
||||
AddMoveTagToFields(condition, key, value string) error
|
||||
RemoveMoveTagToFields(condition string)
|
||||
AddMoveMetaToTags(condition, key, value string) error
|
||||
RemoveMoveMetaToTags(condition string)
|
||||
AddMoveMetaToFields(condition, key, value string) error
|
||||
RemoveMoveMetaToFields(condition string)
|
||||
AddMoveFieldToTags(condition, key, value string) error
|
||||
RemoveMoveFieldToTags(condition string)
|
||||
AddMoveFieldToMeta(condition, key, value string) error
|
||||
RemoveMoveFieldToMeta(condition string)
|
||||
// Read in a JSON configuration
|
||||
FromConfigJSON(config json.RawMessage) error
|
||||
// Processing functions for legacy CCMetric and current CCMessage
|
||||
ProcessMetric(m lplegacy.CCMetric) (lp.CCMessage, error)
|
||||
ProcessMessage(m lp.CCMessage) (lp.CCMessage, error)
|
||||
//EvalToBool(condition string, parameters map[string]interface{}) (bool, error)
|
||||
//EvalToFloat64(condition string, parameters map[string]interface{}) (float64, error)
|
||||
//EvalToString(condition string, parameters map[string]interface{}) (string, error)
|
||||
}
|
||||
|
||||
const (
|
||||
STAGENAME_DROP_BY_NAME string = "drop_by_name"
|
||||
STAGENAME_DROP_BY_TYPE string = "drop_by_type"
|
||||
STAGENAME_DROP_IF string = "drop_if"
|
||||
STAGENAME_ADD_TAG string = "add_tag"
|
||||
STAGENAME_DELETE_TAG string = "delete_tag"
|
||||
STAGENAME_MOVE_TAG_META string = "move_tag_to_meta"
|
||||
STAGENAME_MOVE_TAG_FIELD string = "move_tag_to_fields"
|
||||
STAGENAME_ADD_META string = "add_meta"
|
||||
STAGENAME_DELETE_META string = "delete_meta"
|
||||
STAGENAME_MOVE_META_TAG string = "move_meta_to_tags"
|
||||
STAGENAME_MOVE_META_FIELD string = "move_meta_to_fields"
|
||||
STAGENAME_ADD_FIELD string = "add_field"
|
||||
STAGENAME_DELETE_FIELD string = "delete_field"
|
||||
STAGENAME_MOVE_FIELD_TAG string = "move_field_to_tags"
|
||||
STAGENAME_MOVE_FIELD_META string = "move_field_to_meta"
|
||||
STAGENAME_RENAME_BY_NAME string = "rename"
|
||||
STAGENAME_RENAME_IF string = "rename_if"
|
||||
STAGENAME_CHANGE_UNIT_PREFIX string = "change_unit_prefix"
|
||||
STAGENAME_NORMALIZE_UNIT string = "normalize_unit"
|
||||
)
|
||||
|
||||
var StageNames = []string{
|
||||
STAGENAME_DROP_BY_NAME,
|
||||
STAGENAME_DROP_BY_TYPE,
|
||||
STAGENAME_DROP_IF,
|
||||
STAGENAME_ADD_TAG,
|
||||
STAGENAME_DELETE_TAG,
|
||||
STAGENAME_MOVE_TAG_META,
|
||||
STAGENAME_MOVE_TAG_FIELD,
|
||||
STAGENAME_ADD_META,
|
||||
STAGENAME_DELETE_META,
|
||||
STAGENAME_MOVE_META_TAG,
|
||||
STAGENAME_MOVE_META_FIELD,
|
||||
STAGENAME_ADD_FIELD,
|
||||
STAGENAME_DELETE_FIELD,
|
||||
STAGENAME_MOVE_FIELD_TAG,
|
||||
STAGENAME_MOVE_FIELD_META,
|
||||
STAGENAME_RENAME_BY_NAME,
|
||||
STAGENAME_RENAME_IF,
|
||||
STAGENAME_CHANGE_UNIT_PREFIX,
|
||||
STAGENAME_NORMALIZE_UNIT,
|
||||
}
|
||||
|
||||
var paramMapPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make(map[string]interface{})
|
||||
},
|
||||
}
|
||||
|
||||
func sanitizeExprString(key string) string {
|
||||
return strings.ReplaceAll(key, "type-id", "typeid")
|
||||
}
|
||||
|
||||
func getParamMap(point lp.CCMetric) map[string]interface{} {
|
||||
params := paramMapPool.Get().(map[string]interface{})
|
||||
params["message"] = point
|
||||
params["msg"] = point
|
||||
params["name"] = point.Name()
|
||||
params["timestamp"] = point.Time().Unix()
|
||||
params["time"] = params["timestamp"]
|
||||
|
||||
fields := paramMapPool.Get().(map[string]interface{})
|
||||
for key, value := range point.Fields() {
|
||||
fields[key] = value
|
||||
switch key {
|
||||
case "value":
|
||||
params["messagetype"] = "metric"
|
||||
params["value"] = value
|
||||
params["metric"] = value
|
||||
case "event":
|
||||
params["messagetype"] = "event"
|
||||
params["event"] = value
|
||||
case "control":
|
||||
params["messagetype"] = "control"
|
||||
params["control"] = value
|
||||
case "log":
|
||||
params["messagetype"] = "log"
|
||||
params["log"] = value
|
||||
default:
|
||||
params["messagetype"] = "unknown"
|
||||
}
|
||||
}
|
||||
params["msgtype"] = params["messagetype"]
|
||||
params["fields"] = fields
|
||||
params["field"] = fields
|
||||
tags := paramMapPool.Get().(map[string]interface{})
|
||||
for key, value := range point.Tags() {
|
||||
tags[sanitizeExprString(key)] = value
|
||||
}
|
||||
params["tags"] = tags
|
||||
params["tag"] = tags
|
||||
meta := paramMapPool.Get().(map[string]interface{})
|
||||
for key, value := range point.Meta() {
|
||||
meta[sanitizeExprString(key)] = value
|
||||
}
|
||||
params["meta"] = meta
|
||||
return params
|
||||
}
|
||||
|
||||
var baseenv = map[string]interface{}{
|
||||
"name": "",
|
||||
"messagetype": "unknown",
|
||||
"msgtype": "unknown",
|
||||
"tag": map[string]interface{}{
|
||||
"type": "unknown",
|
||||
"typeid": "0",
|
||||
"stype": "unknown",
|
||||
"stypeid": "0",
|
||||
"hostname": "localhost",
|
||||
"cluster": "nocluster",
|
||||
},
|
||||
"tags": map[string]interface{}{
|
||||
"type": "unknown",
|
||||
"typeid": "0",
|
||||
"stype": "unknown",
|
||||
"stypeid": "0",
|
||||
"hostname": "localhost",
|
||||
"cluster": "nocluster",
|
||||
},
|
||||
"meta": map[string]interface{}{
|
||||
"unit": "invalid",
|
||||
"source": "unknown",
|
||||
},
|
||||
"fields": map[string]interface{}{
|
||||
"value": 0,
|
||||
"event": "",
|
||||
"control": "",
|
||||
"log": "",
|
||||
},
|
||||
"field": map[string]interface{}{
|
||||
"value": 0,
|
||||
"event": "",
|
||||
"control": "",
|
||||
"log": "",
|
||||
},
|
||||
"timestamp": 1234567890,
|
||||
"msg": lp.EmptyMessage(),
|
||||
"message": lp.EmptyMessage(),
|
||||
}
|
||||
|
||||
func addBaseEnvWalker(values map[string]interface{}) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
for k, v := range values {
|
||||
switch value := v.(type) {
|
||||
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
|
||||
out[k] = value
|
||||
case map[string]interface{}:
|
||||
if _, ok := baseenv[k]; !ok {
|
||||
out[k] = addBaseEnvWalker(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddBaseEnv(env map[string]interface{}) error {
|
||||
for k, v := range env {
|
||||
switch value := v.(type) {
|
||||
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
|
||||
baseenv[k] = value
|
||||
case map[string]interface{}:
|
||||
if _, ok := baseenv[k]; !ok {
|
||||
baseenv[k] = addBaseEnvWalker(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) init() error {
|
||||
mp.stages = make([]string, 0)
|
||||
mp.mapping = make(map[string]*vm.Program)
|
||||
mp.dropMessages = make(map[string]struct{})
|
||||
mp.dropTypes = make(map[string]struct{})
|
||||
mp.dropMessagesIf = make(map[*vm.Program]struct{})
|
||||
mp.renameMessages = make(map[string]string)
|
||||
mp.renameMessagesIf = make(map[*vm.Program]string)
|
||||
mp.changeUnitPrefix = make(map[*vm.Program]string)
|
||||
mp.addTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.addMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.addFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.deleteTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.deleteMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.deleteFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveFieldToMeta = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveFieldToTag = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveMetaToField = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveMetaToTag = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveTagToField = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.moveTagToMeta = make(map[*vm.Program]messageProcessorTagConfig)
|
||||
mp.normalizeUnits = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDropMessagesByName(name string) error {
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.dropMessages[name]; !ok {
|
||||
mp.dropMessages[name] = struct{}{}
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDropMessagesByName(name string) {
|
||||
mp.mutex.Lock()
|
||||
delete(mp.dropMessages, name)
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDropMessagesByType(typestring string) error {
|
||||
valid := []string{"metric", "event", "control", "log"}
|
||||
isValid := false
|
||||
for _, t := range valid {
|
||||
if t == typestring {
|
||||
isValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isValid {
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.dropTypes[typestring]; !ok {
|
||||
cclog.ComponentDebug("MessageProcessor", "Adding type", typestring, "for dropping")
|
||||
mp.dropTypes[typestring] = struct{}{}
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
} else {
|
||||
return fmt.Errorf("invalid message type %s", typestring)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDropMessagesByType(typestring string) {
|
||||
mp.mutex.Lock()
|
||||
delete(mp.dropTypes, typestring)
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) addTagConfig(condition, key, value string, config *map[*vm.Program]messageProcessorTagConfig) error {
|
||||
var err error
|
||||
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
|
||||
}
|
||||
mp.mutex.Lock()
|
||||
if _, ok := (*config)[evaluable]; !ok {
|
||||
mp.mapping[condition] = evaluable
|
||||
(*config)[evaluable] = messageProcessorTagConfig{
|
||||
Condition: condition,
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) removeTagConfig(condition string, config *map[*vm.Program]messageProcessorTagConfig) {
|
||||
mp.mutex.Lock()
|
||||
if e, ok := mp.mapping[condition]; ok {
|
||||
delete(mp.mapping, condition)
|
||||
delete(*config, e)
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddAddTagsByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.addTagsIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveAddTagsByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.addTagsIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDeleteTagsByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.deleteTagsIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDeleteTagsByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.deleteTagsIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddAddMetaByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.addMetaIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveAddMetaByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.addMetaIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDeleteMetaByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.deleteMetaIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDeleteMetaByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.deleteMetaIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddAddFieldByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.addFieldIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveAddFieldByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.addFieldIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDeleteFieldByCondition(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.deleteFieldIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDeleteFieldByCondition(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.deleteFieldIf)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddDropMessagesByCondition(condition string) error {
|
||||
|
||||
var err error
|
||||
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
|
||||
}
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.dropMessagesIf[evaluable]; !ok {
|
||||
mp.mapping[condition] = evaluable
|
||||
mp.dropMessagesIf[evaluable] = struct{}{}
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveDropMessagesByCondition(condition string) {
|
||||
mp.mutex.Lock()
|
||||
if e, ok := mp.mapping[condition]; ok {
|
||||
delete(mp.mapping, condition)
|
||||
delete(mp.dropMessagesIf, e)
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddRenameMetricByCondition(condition string, name string) error {
|
||||
|
||||
var err error
|
||||
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
|
||||
}
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.renameMessagesIf[evaluable]; !ok {
|
||||
mp.mapping[condition] = evaluable
|
||||
mp.renameMessagesIf[evaluable] = name
|
||||
} else {
|
||||
mp.renameMessagesIf[evaluable] = name
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveRenameMetricByCondition(condition string) {
|
||||
mp.mutex.Lock()
|
||||
if e, ok := mp.mapping[condition]; ok {
|
||||
delete(mp.mapping, condition)
|
||||
delete(mp.renameMessagesIf, e)
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) SetNormalizeUnits(setting bool) {
|
||||
mp.normalizeUnits = setting
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddChangeUnitPrefix(condition string, prefix string) error {
|
||||
|
||||
var err error
|
||||
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
|
||||
}
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.changeUnitPrefix[evaluable]; !ok {
|
||||
mp.mapping[condition] = evaluable
|
||||
mp.changeUnitPrefix[evaluable] = prefix
|
||||
} else {
|
||||
mp.changeUnitPrefix[evaluable] = prefix
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveChangeUnitPrefix(condition string) {
|
||||
mp.mutex.Lock()
|
||||
if e, ok := mp.mapping[condition]; ok {
|
||||
delete(mp.mapping, condition)
|
||||
delete(mp.changeUnitPrefix, e)
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddRenameMetricByName(from, to string) error {
|
||||
mp.mutex.Lock()
|
||||
if _, ok := mp.renameMessages[from]; !ok {
|
||||
mp.renameMessages[from] = to
|
||||
}
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveRenameMetricByName(from string) {
|
||||
mp.mutex.Lock()
|
||||
delete(mp.renameMessages, from)
|
||||
mp.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveTagToMeta(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveTagToMeta)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveTagToMeta(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveTagToMeta)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveTagToFields(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveTagToField)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveTagToFields(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveTagToField)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveMetaToTags(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveMetaToTag)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveMetaToTags(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveMetaToTag)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveMetaToFields(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveMetaToField)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveMetaToFields(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveMetaToField)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveFieldToTags(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveFieldToTag)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveFieldToTags(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveFieldToTag)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) AddMoveFieldToMeta(condition, key, value string) error {
|
||||
return mp.addTagConfig(condition, key, value, &mp.moveFieldToMeta)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) RemoveMoveFieldToMeta(condition string) {
|
||||
mp.removeTagConfig(condition, &mp.moveFieldToMeta)
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) SetStages(stages []string) error {
|
||||
newstages := make([]string, 0)
|
||||
if len(stages) == 0 {
|
||||
mp.mutex.Lock()
|
||||
mp.stages = newstages
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
for i, s := range stages {
|
||||
valid := false
|
||||
for _, v := range StageNames {
|
||||
if s == v {
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
if valid {
|
||||
newstages = append(newstages, s)
|
||||
} else {
|
||||
return fmt.Errorf("invalid stage %s at index %d", s, i)
|
||||
}
|
||||
}
|
||||
mp.mutex.Lock()
|
||||
mp.stages = newstages
|
||||
mp.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) DefaultStages() []string {
|
||||
return StageNames
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) FromConfigJSON(config json.RawMessage) error {
|
||||
var c messageProcessorConfig
|
||||
|
||||
err := json.Unmarshal(config, &c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
|
||||
if len(c.StageOrder) > 0 {
|
||||
err = mp.SetStages(c.StageOrder)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
} else {
|
||||
err = mp.SetStages(mp.DefaultStages())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range c.DropMessages {
|
||||
err = mp.AddDropMessagesByName(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, m := range c.DropByType {
|
||||
err = mp.AddDropMessagesByType(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, m := range c.DropMessagesIf {
|
||||
err = mp.AddDropMessagesByCondition(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for k, v := range c.RenameMessagesIf {
|
||||
err = mp.AddRenameMetricByCondition(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for k, v := range c.RenameMessages {
|
||||
err = mp.AddRenameMetricByName(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for k, v := range c.ChangeUnitPrefix {
|
||||
err = mp.AddChangeUnitPrefix(k, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.AddTagsIf {
|
||||
err = mp.AddAddTagsByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.AddMetaIf {
|
||||
err = mp.AddAddMetaByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.AddFieldIf {
|
||||
err = mp.AddAddFieldByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.DelTagsIf {
|
||||
err = mp.AddDeleteTagsByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.DelMetaIf {
|
||||
err = mp.AddDeleteMetaByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.DelFieldIf {
|
||||
err = mp.AddDeleteFieldByCondition(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveTagToMeta {
|
||||
err = mp.AddMoveTagToMeta(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveTagToField {
|
||||
err = mp.AddMoveTagToFields(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveMetaToTag {
|
||||
err = mp.AddMoveMetaToTags(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveMetaToField {
|
||||
err = mp.AddMoveMetaToFields(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveFieldToTag {
|
||||
err = mp.AddMoveFieldToTags(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, c := range c.MoveFieldToMeta {
|
||||
err = mp.AddMoveFieldToMeta(c.Condition, c.Key, c.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, m := range c.DropByType {
|
||||
err = mp.AddDropMessagesByType(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
if len(c.AddBaseEnv) > 0 {
|
||||
err = mp.AddBaseEnv(c.AddBaseEnv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process config JSON: %v", err.Error())
|
||||
}
|
||||
}
|
||||
mp.SetNormalizeUnits(c.NormalizeUnits)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) ProcessMetric(metric lplegacy.CCMetric) (lp.CCMessage, error) {
|
||||
m, err := lp.NewMessage(
|
||||
metric.Name(),
|
||||
metric.Tags(),
|
||||
metric.Meta(),
|
||||
metric.Fields(),
|
||||
metric.Time(),
|
||||
)
|
||||
if err != nil {
|
||||
return m, fmt.Errorf("failed to parse metric to message: %v", err.Error())
|
||||
}
|
||||
return mp.ProcessMessage(m)
|
||||
|
||||
}
|
||||
|
||||
func (mp *messageProcessor) ProcessMessage(m lp.CCMessage) (lp.CCMessage, error) {
|
||||
var err error = nil
|
||||
var out lp.CCMessage = lp.FromMessage(m)
|
||||
|
||||
name := out.Name()
|
||||
|
||||
if len(mp.stages) == 0 {
|
||||
mp.SetStages(mp.DefaultStages())
|
||||
}
|
||||
|
||||
mp.mutex.RLock()
|
||||
defer mp.mutex.RUnlock()
|
||||
|
||||
params := getParamMap(out)
|
||||
|
||||
defer func() {
|
||||
params["field"] = nil
|
||||
params["tag"] = nil
|
||||
paramMapPool.Put(params["fields"])
|
||||
paramMapPool.Put(params["tags"])
|
||||
paramMapPool.Put(params["meta"])
|
||||
paramMapPool.Put(params)
|
||||
}()
|
||||
|
||||
for _, s := range mp.stages {
|
||||
switch s {
|
||||
case STAGENAME_DROP_BY_NAME:
|
||||
if len(mp.dropMessages) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Dropping by message name ", name)
|
||||
if _, ok := mp.dropMessages[name]; ok {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Drop")
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
case STAGENAME_DROP_BY_TYPE:
|
||||
if len(mp.dropTypes) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Dropping by message type")
|
||||
if _, ok := mp.dropTypes[params["messagetype"].(string)]; ok {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Drop")
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
case STAGENAME_DROP_IF:
|
||||
if len(mp.dropMessagesIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Dropping by condition")
|
||||
drop, err := dropMessagesIf(¶ms, &mp.dropMessagesIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if drop {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Drop")
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
case STAGENAME_RENAME_BY_NAME:
|
||||
if len(mp.renameMessages) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Renaming by name match")
|
||||
if newname, ok := mp.renameMessages[name]; ok {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Rename to", newname)
|
||||
out.SetName(newname)
|
||||
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", name)
|
||||
out.AddMeta("oldname", name)
|
||||
}
|
||||
}
|
||||
case STAGENAME_RENAME_IF:
|
||||
if len(mp.renameMessagesIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Renaming by condition")
|
||||
_, err := renameMessagesIf(out, ¶ms, &mp.renameMessagesIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_ADD_TAG:
|
||||
if len(mp.addTagsIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding tags")
|
||||
_, err = addTagIf(out, ¶ms, &mp.addTagsIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_DELETE_TAG:
|
||||
if len(mp.deleteTagsIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Delete tags")
|
||||
_, err = deleteTagIf(out, ¶ms, &mp.deleteTagsIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_ADD_META:
|
||||
if len(mp.addMetaIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding meta information")
|
||||
_, err = addMetaIf(out, ¶ms, &mp.addMetaIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_DELETE_META:
|
||||
if len(mp.deleteMetaIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Delete meta information")
|
||||
_, err = deleteMetaIf(out, ¶ms, &mp.deleteMetaIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_ADD_FIELD:
|
||||
if len(mp.addFieldIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding fields")
|
||||
_, err = addFieldIf(out, ¶ms, &mp.addFieldIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_DELETE_FIELD:
|
||||
if len(mp.deleteFieldIf) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Delete fields")
|
||||
_, err = deleteFieldIf(out, ¶ms, &mp.deleteFieldIf)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_TAG_META:
|
||||
if len(mp.moveTagToMeta) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move tag to meta")
|
||||
_, err := moveTagToMeta(out, ¶ms, &mp.moveTagToMeta)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_TAG_FIELD:
|
||||
if len(mp.moveTagToField) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move tag to fields")
|
||||
_, err := moveTagToField(out, ¶ms, &mp.moveTagToField)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_META_TAG:
|
||||
if len(mp.moveMetaToTag) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move meta to tags")
|
||||
_, err := moveMetaToTag(out, ¶ms, &mp.moveMetaToTag)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_META_FIELD:
|
||||
if len(mp.moveMetaToField) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move meta to fields")
|
||||
_, err := moveMetaToField(out, ¶ms, &mp.moveMetaToField)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_FIELD_META:
|
||||
if len(mp.moveFieldToMeta) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move field to meta")
|
||||
_, err := moveFieldToMeta(out, ¶ms, &mp.moveFieldToMeta)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_MOVE_FIELD_TAG:
|
||||
if len(mp.moveFieldToTag) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move field to tags")
|
||||
_, err := moveFieldToTag(out, ¶ms, &mp.moveFieldToTag)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
}
|
||||
case STAGENAME_NORMALIZE_UNIT:
|
||||
if mp.normalizeUnits {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Normalize units")
|
||||
if lp.IsMetric(out) {
|
||||
_, err := normalizeUnits(out)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
} else {
|
||||
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
|
||||
}
|
||||
}
|
||||
|
||||
case STAGENAME_CHANGE_UNIT_PREFIX:
|
||||
if len(mp.changeUnitPrefix) > 0 {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Change unit prefix")
|
||||
if lp.IsMetric(out) {
|
||||
_, err := changeUnitPrefix(out, ¶ms, &mp.changeUnitPrefix)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
} else {
|
||||
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Get a new instace of a message processor.
|
||||
func NewMessageProcessor() (MessageProcessor, error) {
|
||||
mp := new(messageProcessor)
|
||||
err := mp.init()
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to create MessageProcessor: %v", err.Error())
|
||||
cclog.ComponentError("MessageProcessor", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return mp, nil
|
||||
}
|
@@ -1,262 +0,0 @@
|
||||
package messageprocessor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
lp2 "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
units "github.com/ClusterCockpit/cc-units"
|
||||
"github.com/expr-lang/expr"
|
||||
"github.com/expr-lang/expr/vm"
|
||||
)
|
||||
|
||||
type MessageLocation int
|
||||
|
||||
const (
|
||||
MESSAGE_LOCATION_TAGS MessageLocation = iota
|
||||
MESSAGE_LOCATION_META
|
||||
MESSAGE_LOCATION_FIELDS
|
||||
)
|
||||
|
||||
// Abstract function to move entries from one location to another
|
||||
func moveInMessage(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, from, to MessageLocation) (bool, error) {
|
||||
for d, data := range *checks {
|
||||
value, err := expr.Run(d, *params)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
//cclog.ComponentDebug("MessageProcessor", "Move from", from, "to", to)
|
||||
if value.(bool) {
|
||||
var v string
|
||||
var ok bool = false
|
||||
switch from {
|
||||
case MESSAGE_LOCATION_TAGS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Getting tag key", data.Key)
|
||||
v, ok = message.GetTag(data.Key)
|
||||
case MESSAGE_LOCATION_META:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Getting meta key", data.Key)
|
||||
//cclog.ComponentDebug("MessageProcessor", message.Meta())
|
||||
v, ok = message.GetMeta(data.Key)
|
||||
case MESSAGE_LOCATION_FIELDS:
|
||||
var x interface{}
|
||||
//cclog.ComponentDebug("MessageProcessor", "Getting field key", data.Key)
|
||||
x, ok = message.GetField(data.Key)
|
||||
v = fmt.Sprintf("%v", x)
|
||||
}
|
||||
if ok {
|
||||
switch from {
|
||||
case MESSAGE_LOCATION_TAGS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing tag key", data.Key)
|
||||
message.RemoveTag(data.Key)
|
||||
case MESSAGE_LOCATION_META:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing meta key", data.Key)
|
||||
message.RemoveMeta(data.Key)
|
||||
case MESSAGE_LOCATION_FIELDS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing field key", data.Key)
|
||||
message.RemoveField(data.Key)
|
||||
}
|
||||
switch to {
|
||||
case MESSAGE_LOCATION_TAGS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", v)
|
||||
message.AddTag(data.Value, v)
|
||||
case MESSAGE_LOCATION_META:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", v)
|
||||
message.AddMeta(data.Value, v)
|
||||
case MESSAGE_LOCATION_FIELDS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", v)
|
||||
message.AddField(data.Value, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func deleteIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
|
||||
for d, data := range *checks {
|
||||
value, err := expr.Run(d, *params)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if value.(bool) {
|
||||
switch location {
|
||||
case MESSAGE_LOCATION_FIELDS:
|
||||
switch data.Key {
|
||||
case "value", "event", "log", "control":
|
||||
return false, errors.New("cannot delete protected fields")
|
||||
default:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing field for", data.Key)
|
||||
message.RemoveField(data.Key)
|
||||
}
|
||||
case MESSAGE_LOCATION_TAGS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing tag for", data.Key)
|
||||
message.RemoveTag(data.Key)
|
||||
case MESSAGE_LOCATION_META:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Removing meta for", data.Key)
|
||||
message.RemoveMeta(data.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func addIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
|
||||
for d, data := range *checks {
|
||||
value, err := expr.Run(d, *params)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if value.(bool) {
|
||||
switch location {
|
||||
case MESSAGE_LOCATION_FIELDS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", data.Value)
|
||||
message.AddField(data.Key, data.Value)
|
||||
case MESSAGE_LOCATION_TAGS:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", data.Value)
|
||||
message.AddTag(data.Key, data.Value)
|
||||
case MESSAGE_LOCATION_META:
|
||||
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", data.Value)
|
||||
message.AddMeta(data.Key, data.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func deleteTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return deleteIf(message, params, checks, MESSAGE_LOCATION_TAGS)
|
||||
}
|
||||
|
||||
func addTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return addIf(message, params, checks, MESSAGE_LOCATION_TAGS)
|
||||
}
|
||||
|
||||
func moveTagToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_META)
|
||||
}
|
||||
|
||||
func moveTagToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_FIELDS)
|
||||
}
|
||||
|
||||
func deleteMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return deleteIf(message, params, checks, MESSAGE_LOCATION_META)
|
||||
}
|
||||
|
||||
func addMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return addIf(message, params, checks, MESSAGE_LOCATION_META)
|
||||
}
|
||||
|
||||
func moveMetaToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_TAGS)
|
||||
}
|
||||
|
||||
func moveMetaToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_FIELDS)
|
||||
}
|
||||
|
||||
func deleteFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return deleteIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
|
||||
}
|
||||
|
||||
func addFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return addIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
|
||||
}
|
||||
|
||||
func moveFieldToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_TAGS)
|
||||
}
|
||||
|
||||
func moveFieldToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
|
||||
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_META)
|
||||
}
|
||||
|
||||
func dropMessagesIf(params *map[string]interface{}, checks *map[*vm.Program]struct{}) (bool, error) {
|
||||
for d := range *checks {
|
||||
value, err := expr.Run(d, *params)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if value.(bool) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func normalizeUnits(message lp2.CCMessage) (bool, error) {
|
||||
if in_unit, ok := message.GetMeta("unit"); ok {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
|
||||
message.AddMeta("unit", u.Short())
|
||||
}
|
||||
} else if in_unit, ok := message.GetTag("unit"); ok {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
|
||||
message.AddTag("unit", u.Short())
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func changeUnitPrefix(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
|
||||
for r, n := range *checks {
|
||||
value, err := expr.Run(r, *params)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if value.(bool) {
|
||||
newPrefix := units.NewPrefix(n)
|
||||
//cclog.ComponentDebug("MessageProcessor", "Condition matches, change to prefix", newPrefix.String())
|
||||
if in_unit, ok := message.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
|
||||
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
||||
if conv != nil && out_unit.Valid() {
|
||||
if val, ok := message.GetField("value"); ok {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
|
||||
message.AddField("value", conv(val))
|
||||
message.AddMeta("unit", out_unit.Short())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else if in_unit, ok := message.GetTag("unit"); ok && newPrefix != units.InvalidPrefix {
|
||||
u := units.NewUnit(in_unit)
|
||||
if u.Valid() {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
|
||||
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
||||
if conv != nil && out_unit.Valid() {
|
||||
if val, ok := message.GetField("value"); ok {
|
||||
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
|
||||
message.AddField("value", conv(val))
|
||||
message.AddTag("unit", out_unit.Short())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func renameMessagesIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
|
||||
for d, n := range *checks {
|
||||
value, err := expr.Run(d, *params)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
|
||||
}
|
||||
if value.(bool) {
|
||||
old := message.Name()
|
||||
//cclog.ComponentDebug("MessageProcessor", "Rename to", n)
|
||||
message.SetName(n)
|
||||
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", old)
|
||||
message.AddMeta("oldname", old)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
@@ -1,396 +0,0 @@
|
||||
package messageprocessor
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
)
|
||||
|
||||
func generate_message_lists(num_lists, num_entries int) ([][]lp.CCMessage, error) {
|
||||
mlist := make([][]lp.CCMessage, 0)
|
||||
for j := 0; j < num_lists; j++ {
|
||||
out := make([]lp.CCMessage, 0)
|
||||
for i := 0; i < num_entries; i++ {
|
||||
var x lp.CCMessage
|
||||
var err error = nil
|
||||
switch {
|
||||
case i%4 == 0:
|
||||
x, err = lp.NewEvent("myevent", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "nothing happend", time.Now())
|
||||
case i%4 == 1:
|
||||
x, err = lp.NewMetric("mymetric", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{"unit": "kByte"}, 12.145, time.Now())
|
||||
case i%4 == 2:
|
||||
x, err = lp.NewLog("mylog", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "disk status: OK", time.Now())
|
||||
case i%4 == 3:
|
||||
x, err = lp.NewGetControl("mycontrol", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, time.Now())
|
||||
}
|
||||
if err == nil {
|
||||
x.AddTag("hostname", "myhost")
|
||||
out = append(out, x)
|
||||
} else {
|
||||
return nil, errors.New("failed to create message")
|
||||
}
|
||||
}
|
||||
mlist = append(mlist, out)
|
||||
}
|
||||
return mlist, nil
|
||||
}
|
||||
|
||||
func TestNewMessageProcessor(t *testing.T) {
|
||||
_, err := NewMessageProcessor()
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
type Configs struct {
|
||||
name string
|
||||
config json.RawMessage
|
||||
drop bool
|
||||
errors bool
|
||||
pre func(msg lp.CCMessage) error
|
||||
check func(msg lp.CCMessage) error
|
||||
}
|
||||
|
||||
var test_configs = []Configs{
|
||||
{
|
||||
name: "single_dropif_nomatch",
|
||||
config: json.RawMessage(`{"drop_messages_if": [ "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
|
||||
},
|
||||
{
|
||||
name: "drop_by_name",
|
||||
config: json.RawMessage(`{"drop_messages": [ "net_bytes_in"]}`),
|
||||
drop: true,
|
||||
},
|
||||
{
|
||||
name: "drop_by_type_match",
|
||||
config: json.RawMessage(`{"drop_by_message_type": [ "metric"]}`),
|
||||
drop: true,
|
||||
},
|
||||
{
|
||||
name: "drop_by_type_nomatch",
|
||||
config: json.RawMessage(`{"drop_by_message_type": [ "event"]}`),
|
||||
},
|
||||
{
|
||||
name: "single_dropif_match",
|
||||
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'"]}`),
|
||||
drop: true,
|
||||
},
|
||||
{
|
||||
name: "double_dropif_match_nomatch",
|
||||
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'", "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
|
||||
drop: true,
|
||||
},
|
||||
{
|
||||
name: "rename_simple",
|
||||
config: json.RawMessage(`{"rename_messages": { "net_bytes_in" : "net_bytes_out", "rapl_power": "cpu_power"}}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.Name() != "net_bytes_out" {
|
||||
return errors.New("expected name net_bytes_out but still have net_bytes_in")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rename_match",
|
||||
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_in'" : "net_bytes_out", "name == 'rapl_power'": "cpu_power"}}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.Name() != "net_bytes_out" {
|
||||
return errors.New("expected name net_bytes_out but still have net_bytes_in")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rename_nomatch",
|
||||
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_out'" : "net_bytes_in", "name == 'rapl_power'": "cpu_power"}}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.Name() != "net_bytes_in" {
|
||||
return errors.New("expected name net_bytes_in but still have net_bytes_out")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add_tag",
|
||||
config: json.RawMessage(`{"add_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "cluster", "value" : "mycluster"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if !msg.HasTag("cluster") {
|
||||
return errors.New("expected new tag 'cluster' but not present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "del_tag",
|
||||
config: json.RawMessage(`{"delete_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "type"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasTag("type") {
|
||||
return errors.New("expected to have no 'type' but still present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add_meta",
|
||||
config: json.RawMessage(`{"add_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "source", "value" : "example"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if !msg.HasMeta("source") {
|
||||
return errors.New("expected new tag 'source' but not present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "del_meta",
|
||||
config: json.RawMessage(`{"delete_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "unit"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasMeta("unit") {
|
||||
return errors.New("expected to have no 'unit' but still present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add_field",
|
||||
config: json.RawMessage(`{"add_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value" : "example"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if !msg.HasField("myfield") {
|
||||
return errors.New("expected new tag 'source' but not present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete_fields_if_protected",
|
||||
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "value"}]}`),
|
||||
errors: true,
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if !msg.HasField("value") {
|
||||
return errors.New("expected to still have 'value' field because it is a protected field key")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete_fields_if_unprotected",
|
||||
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "testfield"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasField("testfield") {
|
||||
return errors.New("expected to still have 'testfield' field but should be deleted")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pre: func(msg lp.CCMessage) error {
|
||||
msg.AddField("testfield", 4.123)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single_change_prefix_match",
|
||||
config: json.RawMessage(`{"change_unit_prefix": {"name == 'net_bytes_in' && tags.type == 'node'": "M"}}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if u, ok := msg.GetMeta("unit"); ok {
|
||||
if u != "MB" {
|
||||
return fmt.Errorf("expected unit MB but have %s", u)
|
||||
}
|
||||
} else if u, ok := msg.GetTag("unit"); ok {
|
||||
if u != "MB" {
|
||||
return fmt.Errorf("expected unit MB but have %s", u)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "normalize_units",
|
||||
config: json.RawMessage(`{"normalize_units": true}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if u, ok := msg.GetMeta("unit"); ok {
|
||||
if u != "B" {
|
||||
return fmt.Errorf("expected unit B but have %s", u)
|
||||
}
|
||||
} else if u, ok := msg.GetTag("unit"); ok {
|
||||
if u != "B" {
|
||||
return fmt.Errorf("expected unit B but have %s", u)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_tag_to_meta",
|
||||
config: json.RawMessage(`{"move_tag_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasTag("type-id") || !msg.HasMeta("typeid") {
|
||||
return errors.New("moving tag 'type-id' to meta 'typeid' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pre: func(msg lp.CCMessage) error {
|
||||
msg.AddTag("type-id", "0")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_tag_to_field",
|
||||
config: json.RawMessage(`{"move_tag_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasTag("type-id") || !msg.HasField("typeid") {
|
||||
return errors.New("moving tag 'type-id' to field 'typeid' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pre: func(msg lp.CCMessage) error {
|
||||
msg.AddTag("type-id", "0")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_meta_to_tag",
|
||||
config: json.RawMessage(`{"move_meta_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasMeta("unit") || !msg.HasTag("unit") {
|
||||
return errors.New("moving meta 'unit' to tag 'unit' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_meta_to_field",
|
||||
config: json.RawMessage(`{"move_meta_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasMeta("unit") || !msg.HasField("unit") {
|
||||
return errors.New("moving meta 'unit' to field 'unit' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_field_to_tag",
|
||||
config: json.RawMessage(`{"move_field_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasField("myfield") || !msg.HasTag("field") {
|
||||
return errors.New("moving meta 'myfield' to tag 'field' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pre: func(msg lp.CCMessage) error {
|
||||
msg.AddField("myfield", 12)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "move_field_to_meta",
|
||||
config: json.RawMessage(`{"move_field_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
|
||||
check: func(msg lp.CCMessage) error {
|
||||
if msg.HasField("myfield") || !msg.HasMeta("field") {
|
||||
return errors.New("moving meta 'myfield' to meta 'field' failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pre: func(msg lp.CCMessage) error {
|
||||
msg.AddField("myfield", 12)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestConfigList(t *testing.T) {
|
||||
for _, c := range test_configs {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
m, err := lp.NewMetric("net_bytes_in", map[string]string{"type": "node", "type-id": "0"}, map[string]string{"unit": "Byte"}, float64(1024.0), time.Now())
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if c.pre != nil {
|
||||
if err = c.pre(m); err != nil {
|
||||
t.Errorf("error running pre-test function: %v", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mp, err := NewMessageProcessor()
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
err = mp.FromConfigJSON(c.config)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
//t.Log(m.ToLineProtocol(nil))
|
||||
out, err := mp.ProcessMessage(m)
|
||||
if err != nil && !c.errors {
|
||||
cclog.SetDebug()
|
||||
mp.ProcessMessage(m)
|
||||
t.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if out == nil && !c.drop {
|
||||
t.Error("fail, message should NOT be dropped but processor signalled dropping")
|
||||
return
|
||||
} else if out != nil && c.drop {
|
||||
t.Error("fail, message should be dropped but processor signalled NO dropping")
|
||||
return
|
||||
}
|
||||
// {
|
||||
// if c.drop {
|
||||
// t.Error("fail, message should be dropped but processor signalled NO dropping")
|
||||
// } else {
|
||||
// t.Error("fail, message should NOT be dropped but processor signalled dropping")
|
||||
// }
|
||||
// cclog.SetDebug()
|
||||
// mp.ProcessMessage(m)
|
||||
// return
|
||||
// }
|
||||
if c.check != nil {
|
||||
if err := c.check(out); err != nil {
|
||||
t.Errorf("check failed with %v", err.Error())
|
||||
t.Log("Rerun with debugging")
|
||||
cclog.SetDebug()
|
||||
mp.ProcessMessage(m)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessing(b *testing.B) {
|
||||
|
||||
mlist, err := generate_message_lists(b.N, 1000)
|
||||
if err != nil {
|
||||
b.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
mp, err := NewMessageProcessor()
|
||||
if err != nil {
|
||||
b.Error(err.Error())
|
||||
return
|
||||
}
|
||||
err = mp.FromConfigJSON(json.RawMessage(`{"move_meta_to_tag_if": [{"if" : "name == 'mymetric'", "key":"unit", "value":"unit"}]}`))
|
||||
if err != nil {
|
||||
b.Error(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, m := range mlist[i] {
|
||||
if _, err := mp.ProcessMessage(m); err != nil {
|
||||
b.Errorf("failed processing message '%s': %v", m.ToLineProtocol(nil), err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.ReportMetric(float64(b.Elapsed())/float64(len(mlist)*b.N), "ns/message")
|
||||
}
|
@@ -10,15 +10,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
)
|
||||
|
||||
const HTTP_RECEIVER_PORT = "8080"
|
||||
|
||||
type HttpReceiverConfig struct {
|
||||
defaultReceiverConfig
|
||||
Type string `json:"type"`
|
||||
Addr string `json:"address"`
|
||||
Port string `json:"port"`
|
||||
Path string `json:"path"`
|
||||
@@ -39,7 +39,7 @@ type HttpReceiverConfig struct {
|
||||
|
||||
type HttpReceiver struct {
|
||||
receiver
|
||||
//meta map[string]string
|
||||
meta map[string]string
|
||||
config HttpReceiverConfig
|
||||
server *http.Server
|
||||
wg sync.WaitGroup
|
||||
@@ -85,20 +85,8 @@ func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
|
||||
if r.config.useBasicAuth && len(r.config.Password) == 0 {
|
||||
return errors.New("basic authentication requires password")
|
||||
}
|
||||
msgp, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = msgp
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
r.mp.AddAddMetaByCondition("true", "source", r.name)
|
||||
|
||||
//r.meta = map[string]string{"source": r.name}
|
||||
r.meta = map[string]string{"source": r.name}
|
||||
p := r.config.Path
|
||||
if !strings.HasPrefix(p, "/") {
|
||||
p = "/" + p
|
||||
@@ -149,24 +137,80 @@ func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if r.sink != nil {
|
||||
buf := make([]byte, 0, req.ContentLength)
|
||||
len, err := req.Body.Read(buf)
|
||||
if err == nil && len > 0 {
|
||||
messages, err := lp.FromBytes(buf)
|
||||
|
||||
d := influx.NewDecoder(req.Body)
|
||||
for d.Next() {
|
||||
|
||||
// Decode measurement name
|
||||
measurement, err := d.Measurement()
|
||||
if err != nil {
|
||||
msg := "ServerHttp: Failed to decode measurement: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode tags
|
||||
tags := make(map[string]string)
|
||||
for {
|
||||
key, value, err := d.NextTag()
|
||||
if err != nil {
|
||||
msg := "ServerHttp: Failed to decode messages: " + err.Error()
|
||||
msg := "ServerHttp: Failed to decode tag: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
for _, y := range messages {
|
||||
m, err := r.mp.ProcessMessage(y)
|
||||
if err == nil && m != nil {
|
||||
r.sink <- m
|
||||
}
|
||||
if key == nil {
|
||||
break
|
||||
}
|
||||
tags[string(key)] = string(value)
|
||||
}
|
||||
|
||||
// Decode fields
|
||||
fields := make(map[string]interface{})
|
||||
for {
|
||||
key, value, err := d.NextField()
|
||||
if err != nil {
|
||||
msg := "ServerHttp: Failed to decode field: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if key == nil {
|
||||
break
|
||||
}
|
||||
fields[string(key)] = value.Interface()
|
||||
}
|
||||
|
||||
// Decode time stamp
|
||||
t, err := d.Time(influx.Nanosecond, time.Time{})
|
||||
if err != nil {
|
||||
msg := "ServerHttp: Failed to decode time stamp: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
y, _ := lp.New(
|
||||
string(measurement),
|
||||
tags,
|
||||
r.meta,
|
||||
fields,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.sink != nil {
|
||||
r.sink <- y
|
||||
}
|
||||
}
|
||||
|
||||
// Check for IO errors
|
||||
err := d.Err()
|
||||
if err != nil {
|
||||
msg := "ServerHttp: Failed to decode: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
@@ -13,10 +13,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/ClusterCockpit/cc-metric-collector/pkg/hostlist"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
)
|
||||
|
||||
type IPMIReceiverClientConfig struct {
|
||||
@@ -32,13 +31,11 @@ type IPMIReceiverClientConfig struct {
|
||||
Password string // Password to use for authentication
|
||||
CLIOptions []string // Additional command line options for ipmi-sensors
|
||||
isExcluded map[string]bool // is metric excluded
|
||||
mp mp.MessageProcessor
|
||||
}
|
||||
|
||||
type IPMIReceiver struct {
|
||||
receiver
|
||||
config struct {
|
||||
defaultReceiverConfig
|
||||
Interval time.Duration
|
||||
|
||||
// Client config for each IPMI hosts
|
||||
@@ -46,11 +43,10 @@ type IPMIReceiver struct {
|
||||
}
|
||||
|
||||
// Storage for static information
|
||||
//meta map[string]string
|
||||
meta map[string]string
|
||||
|
||||
done chan bool // channel to finish / stop IPMI receiver
|
||||
wg sync.WaitGroup // wait group for IPMI receiver
|
||||
mp mp.MessageProcessor
|
||||
}
|
||||
|
||||
// doReadMetrics reads metrics from all configure IPMI hosts.
|
||||
@@ -217,7 +213,7 @@ func (r *IPMIReceiver) doReadMetric() {
|
||||
continue
|
||||
}
|
||||
|
||||
y, err := lp.NewMessage(
|
||||
y, err := lp.New(
|
||||
metric,
|
||||
map[string]string{
|
||||
"hostname": host,
|
||||
@@ -234,14 +230,7 @@ func (r *IPMIReceiver) doReadMetric() {
|
||||
},
|
||||
time.Now())
|
||||
if err == nil {
|
||||
mc, err := clientConfig.mp.ProcessMessage(y)
|
||||
if err == nil && mc != nil {
|
||||
m, err := r.mp.ProcessMessage(mc)
|
||||
if err == nil && m != nil {
|
||||
r.sink <- m
|
||||
}
|
||||
}
|
||||
|
||||
r.sink <- y
|
||||
}
|
||||
}
|
||||
|
||||
@@ -307,12 +296,11 @@ func (r *IPMIReceiver) Close() {
|
||||
// NewIPMIReceiver creates a new instance of the redfish receiver
|
||||
// Initialize the receiver by giving it a name and reading in the config JSON
|
||||
func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
var err error
|
||||
r := new(IPMIReceiver)
|
||||
|
||||
// Config options from config file
|
||||
configJSON := struct {
|
||||
defaultReceiverConfig
|
||||
Type string `json:"type"`
|
||||
|
||||
// How often the IPMI sensor metrics should be read and send to the sink (default: 30 s)
|
||||
IntervalString string `json:"interval,omitempty"`
|
||||
@@ -343,8 +331,7 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
|
||||
// Additional command line options for ipmi-sensors
|
||||
CLIOptions []string `json:"cli_options,omitempty"`
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
CLIOptions []string `json:"cli_options,omitempty"`
|
||||
} `json:"client_config"`
|
||||
}{
|
||||
// Set defaults values
|
||||
@@ -360,15 +347,8 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
// Create done channel
|
||||
r.done = make(chan bool)
|
||||
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = p
|
||||
|
||||
// Set static information
|
||||
//r.meta = map[string]string{"source": r.name}
|
||||
r.mp.AddAddMetaByCondition("true", "source", r.name)
|
||||
r.meta = map[string]string{"source": r.name}
|
||||
|
||||
// Read the IPMI receiver specific JSON config
|
||||
if len(config) > 0 {
|
||||
@@ -380,18 +360,12 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Convert interval string representation to duration
|
||||
|
||||
var err error
|
||||
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
|
||||
if err != nil {
|
||||
err := fmt.Errorf(
|
||||
"failed to parse duration string interval='%s': %w",
|
||||
"Failed to parse duration string interval='%s': %w",
|
||||
configJSON.IntervalString,
|
||||
err,
|
||||
)
|
||||
@@ -532,16 +506,6 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
for _, key := range configJSON.ExcludeMetrics {
|
||||
isExcluded[key] = true
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
if len(clientConfigJSON.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(clientConfigJSON.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
r.config.ClientConfigs = append(
|
||||
r.config.ClientConfigs,
|
||||
@@ -556,7 +520,6 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
Password: password,
|
||||
CLIOptions: cliOptions,
|
||||
isExcluded: isExcluded,
|
||||
mp: p,
|
||||
})
|
||||
}
|
||||
|
||||
|
@@ -1,15 +1,11 @@
|
||||
package receivers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type defaultReceiverConfig struct {
|
||||
Type string `json:"type"`
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// Receiver configuration: Listen address, port
|
||||
@@ -23,15 +19,14 @@ type ReceiverConfig struct {
|
||||
|
||||
type receiver struct {
|
||||
name string
|
||||
sink chan lp.CCMessage
|
||||
mp mp.MessageProcessor
|
||||
sink chan lp.CCMetric
|
||||
}
|
||||
|
||||
type Receiver interface {
|
||||
Start()
|
||||
Close() // Close / finish metric receiver
|
||||
Name() string // Name of the metric receiver
|
||||
SetSink(sink chan lp.CCMessage) // Set sink channel
|
||||
Close() // Close / finish metric receiver
|
||||
Name() string // Name of the metric receiver
|
||||
SetSink(sink chan lp.CCMetric) // Set sink channel
|
||||
}
|
||||
|
||||
// Name returns the name of the metric receiver
|
||||
@@ -40,6 +35,6 @@ func (r *receiver) Name() string {
|
||||
}
|
||||
|
||||
// SetSink set the sink channel
|
||||
func (r *receiver) SetSink(sink chan lp.CCMessage) {
|
||||
func (r *receiver) SetSink(sink chan lp.CCMetric) {
|
||||
r.sink = sink
|
||||
}
|
||||
|
@@ -4,28 +4,25 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
nats "github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
type NatsReceiverConfig struct {
|
||||
defaultReceiverConfig
|
||||
Addr string `json:"address"`
|
||||
Port string `json:"port"`
|
||||
Subject string `json:"subject"`
|
||||
User string `json:"user,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
NkeyFile string `json:"nkey_file,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Addr string `json:"address"`
|
||||
Port string `json:"port"`
|
||||
Subject string `json:"subject"`
|
||||
}
|
||||
|
||||
type NatsReceiver struct {
|
||||
receiver
|
||||
nc *nats.Conn
|
||||
//meta map[string]string
|
||||
nc *nats.Conn
|
||||
meta map[string]string
|
||||
config NatsReceiverConfig
|
||||
}
|
||||
|
||||
@@ -39,17 +36,65 @@ func (r *NatsReceiver) Start() {
|
||||
// _NatsReceive receives subscribed messages from the NATS server
|
||||
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
|
||||
|
||||
if r.sink != nil {
|
||||
messages, err := lp.FromBytes(m.Data)
|
||||
d := influx.NewDecoderWithBytes(m.Data)
|
||||
for d.Next() {
|
||||
|
||||
// Decode measurement name
|
||||
measurement, err := d.Measurement()
|
||||
if err != nil {
|
||||
msg := "_NatsReceive: Failed to decode messages: " + err.Error()
|
||||
msg := "_NatsReceive: Failed to decode measurement: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
return
|
||||
}
|
||||
for _, y := range messages {
|
||||
m, err := r.mp.ProcessMessage(y)
|
||||
if err == nil && m != nil && r.sink != nil {
|
||||
r.sink <- m
|
||||
|
||||
// Decode tags
|
||||
tags := make(map[string]string)
|
||||
for {
|
||||
key, value, err := d.NextTag()
|
||||
if err != nil {
|
||||
msg := "_NatsReceive: Failed to decode tag: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
return
|
||||
}
|
||||
if key == nil {
|
||||
break
|
||||
}
|
||||
tags[string(key)] = string(value)
|
||||
}
|
||||
|
||||
// Decode fields
|
||||
fields := make(map[string]interface{})
|
||||
for {
|
||||
key, value, err := d.NextField()
|
||||
if err != nil {
|
||||
msg := "_NatsReceive: Failed to decode field: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
return
|
||||
}
|
||||
if key == nil {
|
||||
break
|
||||
}
|
||||
fields[string(key)] = value.Interface()
|
||||
}
|
||||
|
||||
// Decode time stamp
|
||||
t, err := d.Time(influx.Nanosecond, time.Time{})
|
||||
if err != nil {
|
||||
msg := "_NatsReceive: Failed to decode time: " + err.Error()
|
||||
cclog.ComponentError(r.name, msg)
|
||||
return
|
||||
}
|
||||
|
||||
y, _ := lp.New(
|
||||
string(measurement),
|
||||
tags,
|
||||
r.meta,
|
||||
fields,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.sink != nil {
|
||||
r.sink <- y
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,7 +109,6 @@ func (r *NatsReceiver) Close() {
|
||||
|
||||
// NewNatsReceiver creates a new Receiver which subscribes to messages from a NATS server
|
||||
func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
var uinfo nats.Option = nil
|
||||
r := new(NatsReceiver)
|
||||
r.name = fmt.Sprintf("NatsReceiver(%s)", name)
|
||||
|
||||
@@ -83,40 +127,16 @@ func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
len(r.config.Subject) == 0 {
|
||||
return nil, errors.New("not all configuration variables set required by NatsReceiver")
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = p
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Set metadata
|
||||
// r.meta = map[string]string{
|
||||
// "source": r.name,
|
||||
// }
|
||||
r.mp.AddAddMetaByCondition("true", "source", r.name)
|
||||
|
||||
if len(r.config.User) > 0 && len(r.config.Password) > 0 {
|
||||
uinfo = nats.UserInfo(r.config.User, r.config.Password)
|
||||
} else if len(r.config.NkeyFile) > 0 {
|
||||
_, err := os.Stat(r.config.NkeyFile)
|
||||
if err == nil {
|
||||
uinfo = nats.UserCredentials(r.config.NkeyFile)
|
||||
} else {
|
||||
cclog.ComponentError(r.name, "NKEY file", r.config.NkeyFile, "does not exist: %v", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
r.meta = map[string]string{
|
||||
"source": r.name,
|
||||
}
|
||||
|
||||
// Connect to NATS server
|
||||
url := fmt.Sprintf("nats://%s:%s", r.config.Addr, r.config.Port)
|
||||
cclog.ComponentDebug(r.name, "NewNatsReceiver", url, "Subject", r.config.Subject)
|
||||
if nc, err := nats.Connect(url, uinfo); err == nil {
|
||||
if nc, err := nats.Connect(url); err == nil {
|
||||
r.nc = nc
|
||||
} else {
|
||||
r.nc = nil
|
||||
|
@@ -10,10 +10,7 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
|
||||
"type": "nats",
|
||||
"address" : "nats-server.example.org",
|
||||
"port" : "4222",
|
||||
"subject" : "subject",
|
||||
"user": "natsuser",
|
||||
"password": "natssecret",
|
||||
"nkey_file": "/path/to/nkey_file"
|
||||
"subject" : "subject"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -22,9 +19,6 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
|
||||
- `address`: Address of the NATS control server
|
||||
- `port`: Port of the NATS control server
|
||||
- `subject`: Subscribes to this subject and receive metrics
|
||||
- `user`: Connect to nats using this user
|
||||
- `password`: Connect to nats using this password
|
||||
- `nkey_file`: Path to credentials file with NKEY
|
||||
|
||||
### Debugging
|
||||
|
||||
|
@@ -13,7 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type PrometheusReceiverConfig struct {
|
||||
@@ -74,7 +74,7 @@ func (r *PrometheusReceiver) Start() {
|
||||
}
|
||||
value, err := strconv.ParseFloat(lineSplit[1], 64)
|
||||
if err == nil {
|
||||
y, err := lp.NewMessage(name, tags, r.meta, map[string]interface{}{"value": value}, t)
|
||||
y, err := lp.New(name, tags, r.meta, map[string]interface{}{"value": value}, t)
|
||||
if err == nil {
|
||||
r.sink <- y
|
||||
}
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){
|
||||
@@ -19,14 +19,14 @@ var AvailableReceivers = map[string]func(name string, config json.RawMessage) (R
|
||||
|
||||
type receiveManager struct {
|
||||
inputs []Receiver
|
||||
output chan lp.CCMessage
|
||||
output chan lp.CCMetric
|
||||
config []json.RawMessage
|
||||
}
|
||||
|
||||
type ReceiveManager interface {
|
||||
Init(wg *sync.WaitGroup, receiverConfigFile string) error
|
||||
AddInput(name string, rawConfig json.RawMessage) error
|
||||
AddOutput(output chan lp.CCMessage)
|
||||
AddOutput(output chan lp.CCMetric)
|
||||
Start()
|
||||
Close()
|
||||
}
|
||||
@@ -93,7 +93,7 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rm *receiveManager) AddOutput(output chan lp.CCMessage) {
|
||||
func (rm *receiveManager) AddOutput(output chan lp.CCMetric) {
|
||||
rm.output = output
|
||||
for _, r := range rm.inputs {
|
||||
r.SetSink(rm.output)
|
||||
|
@@ -13,10 +13,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/ClusterCockpit/cc-metric-collector/pkg/hostlist"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
|
||||
// See: https://pkg.go.dev/github.com/stmcginnis/gofish
|
||||
"github.com/stmcginnis/gofish"
|
||||
@@ -43,8 +42,6 @@ type RedfishReceiverClientConfig struct {
|
||||
readSensorURLs map[string][]string
|
||||
|
||||
gofish gofish.ClientConfig
|
||||
|
||||
mp mp.MessageProcessor
|
||||
}
|
||||
|
||||
// RedfishReceiver configuration:
|
||||
@@ -52,7 +49,6 @@ type RedfishReceiver struct {
|
||||
receiver
|
||||
|
||||
config struct {
|
||||
defaultReceiverConfig
|
||||
fanout int
|
||||
Interval time.Duration
|
||||
HttpTimeout time.Duration
|
||||
@@ -83,19 +79,13 @@ func setMetricValue(value any) map[string]interface{} {
|
||||
}
|
||||
|
||||
// sendMetric sends the metric through the sink channel
|
||||
func (r *RedfishReceiver) sendMetric(mp mp.MessageProcessor, name string, tags map[string]string, meta map[string]string, value any, timestamp time.Time) {
|
||||
func (r *RedfishReceiver) sendMetric(name string, tags map[string]string, meta map[string]string, value any, timestamp time.Time) {
|
||||
|
||||
deleteEmptyTags(tags)
|
||||
deleteEmptyTags(meta)
|
||||
y, err := lp.NewMessage(name, tags, meta, setMetricValue(value), timestamp)
|
||||
y, err := lp.New(name, tags, meta, setMetricValue(value), timestamp)
|
||||
if err == nil {
|
||||
mc, err := mp.ProcessMessage(y)
|
||||
if err == nil && mc != nil {
|
||||
m, err := r.mp.ProcessMessage(mc)
|
||||
if err == nil && m != nil {
|
||||
r.sink <- m
|
||||
}
|
||||
}
|
||||
r.sink <- y
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +119,7 @@ func (r *RedfishReceiver) readSensors(
|
||||
"unit": "degC",
|
||||
}
|
||||
|
||||
r.sendMetric(clientConfig.mp, "temperature", tags, meta, sensor.Reading, time.Now())
|
||||
r.sendMetric("temperature", tags, meta, sensor.Reading, time.Now())
|
||||
}
|
||||
|
||||
writeFanSpeedSensor := func(sensor *redfish.Sensor) {
|
||||
@@ -155,7 +145,7 @@ func (r *RedfishReceiver) readSensors(
|
||||
"unit": string(sensor.ReadingUnits),
|
||||
}
|
||||
|
||||
r.sendMetric(clientConfig.mp, "fan_speed", tags, meta, sensor.Reading, time.Now())
|
||||
r.sendMetric("fan_speed", tags, meta, sensor.Reading, time.Now())
|
||||
}
|
||||
|
||||
writePowerSensor := func(sensor *redfish.Sensor) {
|
||||
@@ -182,7 +172,7 @@ func (r *RedfishReceiver) readSensors(
|
||||
"unit": "watts",
|
||||
}
|
||||
|
||||
r.sendMetric(clientConfig.mp, "power", tags, meta, sensor.Reading, time.Now())
|
||||
r.sendMetric("power", tags, meta, sensor.Reading, time.Now())
|
||||
}
|
||||
|
||||
if _, ok := clientConfig.readSensorURLs[chassis.ID]; !ok {
|
||||
@@ -350,7 +340,7 @@ func (r *RedfishReceiver) readThermalMetrics(
|
||||
// ReadingCelsius shall be the current value of the temperature sensor's reading.
|
||||
value := temperature.ReadingCelsius
|
||||
|
||||
r.sendMetric(clientConfig.mp, "temperature", tags, meta, value, timestamp)
|
||||
r.sendMetric("temperature", tags, meta, value, timestamp)
|
||||
}
|
||||
|
||||
for _, fan := range thermal.Fans {
|
||||
@@ -391,7 +381,7 @@ func (r *RedfishReceiver) readThermalMetrics(
|
||||
"unit": string(fan.ReadingUnits),
|
||||
}
|
||||
|
||||
r.sendMetric(clientConfig.mp, "fan_speed", tags, meta, fan.Reading, timestamp)
|
||||
r.sendMetric("fan_speed", tags, meta, fan.Reading, timestamp)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -489,7 +479,7 @@ func (r *RedfishReceiver) readPowerMetrics(
|
||||
}
|
||||
|
||||
for name, value := range metrics {
|
||||
r.sendMetric(clientConfig.mp, name, tags, meta, value, timestamp)
|
||||
r.sendMetric(name, tags, meta, value, timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,7 +561,7 @@ func (r *RedfishReceiver) readProcessorMetrics(
|
||||
if !clientConfig.isExcluded[namePower] &&
|
||||
// Some servers return "ConsumedPowerWatt":65535 instead of "ConsumedPowerWatt":null
|
||||
processorMetrics.ConsumedPowerWatt != 65535 {
|
||||
r.sendMetric(clientConfig.mp, namePower, tags, metaPower, processorMetrics.ConsumedPowerWatt, timestamp)
|
||||
r.sendMetric(namePower, tags, metaPower, processorMetrics.ConsumedPowerWatt, timestamp)
|
||||
}
|
||||
// Set meta data tags
|
||||
metaThermal := map[string]string{
|
||||
@@ -583,7 +573,7 @@ func (r *RedfishReceiver) readProcessorMetrics(
|
||||
nameThermal := "temperature"
|
||||
|
||||
if !clientConfig.isExcluded[nameThermal] {
|
||||
r.sendMetric(clientConfig.mp, nameThermal, tags, metaThermal, processorMetrics.TemperatureCelsius, timestamp)
|
||||
r.sendMetric(nameThermal, tags, metaThermal, processorMetrics.TemperatureCelsius, timestamp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -786,13 +776,11 @@ func (r *RedfishReceiver) Close() {
|
||||
// NewRedfishReceiver creates a new instance of the redfish receiver
|
||||
// Initialize the receiver by giving it a name and reading in the config JSON
|
||||
func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
var err error
|
||||
r := new(RedfishReceiver)
|
||||
|
||||
// Config options from config file
|
||||
configJSON := struct {
|
||||
Type string `json:"type"`
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
Type string `json:"type"`
|
||||
|
||||
// Maximum number of simultaneous redfish connections (default: 64)
|
||||
Fanout int `json:"fanout,omitempty"`
|
||||
@@ -832,8 +820,7 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
DisableThermalMetrics bool `json:"disable_thermal_metrics"`
|
||||
|
||||
// Per client excluded metrics
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||
} `json:"client_config"`
|
||||
}{
|
||||
// Set defaults values
|
||||
@@ -859,24 +846,13 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = p
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Convert interval string representation to duration
|
||||
|
||||
var err error
|
||||
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
|
||||
if err != nil {
|
||||
err := fmt.Errorf(
|
||||
"failed to parse duration string interval='%s': %w",
|
||||
"Failed to parse duration string interval='%s': %w",
|
||||
configJSON.IntervalString,
|
||||
err,
|
||||
)
|
||||
@@ -888,7 +864,7 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
r.config.HttpTimeout, err = time.ParseDuration(configJSON.HttpTimeoutString)
|
||||
if err != nil {
|
||||
err := fmt.Errorf(
|
||||
"failed to parse duration string http_timeout='%s': %w",
|
||||
"Failed to parse duration string http_timeout='%s': %w",
|
||||
configJSON.HttpTimeoutString,
|
||||
err,
|
||||
)
|
||||
@@ -972,18 +948,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
for _, key := range configJSON.ExcludeMetrics {
|
||||
isExcluded[key] = true
|
||||
}
|
||||
p, err = mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
cclog.ComponentError(r.name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if len(clientConfigJSON.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(clientConfigJSON.MessageProcessor)
|
||||
if err != nil {
|
||||
cclog.ComponentError(r.name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
hostList, err := hostlist.Expand(clientConfigJSON.HostList)
|
||||
if err != nil {
|
||||
@@ -1014,7 +978,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
Endpoint: endpoint,
|
||||
HTTPClient: httpClient,
|
||||
},
|
||||
mp: p,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1039,7 +1002,7 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
for i := range r.config.ClientConfigs {
|
||||
host := r.config.ClientConfigs[i].Hostname
|
||||
if isDuplicate[host] {
|
||||
err := fmt.Errorf("found duplicate client config for host %s", host)
|
||||
err := fmt.Errorf("Found duplicate client config for host %s", host)
|
||||
cclog.ComponentError(r.name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -5,13 +5,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
)
|
||||
|
||||
// SampleReceiver configuration: receiver type, listen address, port
|
||||
// The defaultReceiverConfig contains the keys 'type' and 'process_messages'
|
||||
type SampleReceiverConfig struct {
|
||||
defaultReceiverConfig
|
||||
Type string `json:"type"`
|
||||
Addr string `json:"address"`
|
||||
Port string `json:"port"`
|
||||
}
|
||||
@@ -21,6 +19,7 @@ type SampleReceiver struct {
|
||||
config SampleReceiverConfig
|
||||
|
||||
// Storage for static information
|
||||
meta map[string]string
|
||||
// Use in case of own go routine
|
||||
// done chan bool
|
||||
// wg sync.WaitGroup
|
||||
@@ -80,19 +79,8 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
|
||||
r.name = fmt.Sprintf("SampleReceiver(%s)", name)
|
||||
|
||||
// create new message processor
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
cclog.ComponentError(r.name, "Initialization of message processor failed:", err.Error())
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
r.mp = p
|
||||
// Set static information
|
||||
err = r.mp.AddAddMetaByCondition("true", "source", r.name)
|
||||
if err != nil {
|
||||
cclog.ComponentError(r.name, fmt.Sprintf("Failed to add static information source=%s:", r.name), err.Error())
|
||||
return nil, fmt.Errorf("failed to add static information source=%s: %v", r.name, err.Error())
|
||||
}
|
||||
r.meta = map[string]string{"source": r.name}
|
||||
|
||||
// Set defaults in r.config
|
||||
// Allow overwriting these defaults by reading config JSON
|
||||
@@ -106,15 +94,6 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Add message processor config
|
||||
if len(r.config.MessageProcessor) > 0 {
|
||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
||||
if err != nil {
|
||||
cclog.ComponentError(r.name, "Failed parsing JSON for message processor:", err.Error())
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all required fields in the configuration are set
|
||||
// Use 'if len(r.config.Option) > 0' for strings
|
||||
|
||||
|
39
router.json
39
router.json
@@ -1,23 +1,22 @@
|
||||
{
|
||||
"process_messages" : {
|
||||
"add_tag_if": [
|
||||
{
|
||||
"key" : "cluster",
|
||||
"value" : "testcluster",
|
||||
"if" : "true"
|
||||
},
|
||||
{
|
||||
"key" : "test",
|
||||
"value" : "testing",
|
||||
"if" : "name == 'temp_package_id_0'"
|
||||
}
|
||||
],
|
||||
"delete_tag_if": [
|
||||
{
|
||||
"key" : "unit",
|
||||
"if" : "true"
|
||||
}
|
||||
]
|
||||
},
|
||||
"add_tags" : [
|
||||
{
|
||||
"key" : "cluster",
|
||||
"value" : "testcluster",
|
||||
"if" : "*"
|
||||
},
|
||||
{
|
||||
"key" : "test",
|
||||
"value" : "testing",
|
||||
"if" : "name == 'temp_package_id_0'"
|
||||
}
|
||||
],
|
||||
"delete_tags" : [
|
||||
{
|
||||
"key" : "unit",
|
||||
"value" : "*",
|
||||
"if" : "*"
|
||||
}
|
||||
],
|
||||
"interval_timestamp" : true
|
||||
}
|
||||
|
@@ -1,175 +0,0 @@
|
||||
#!/bin/bash -l
|
||||
|
||||
SRCDIR="$(pwd)"
|
||||
DESTDIR="$1"
|
||||
|
||||
if [ -z "$DESTDIR" ]; then
|
||||
echo "Destination folder not provided"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
COLLECTORS=$(find "${SRCDIR}/collectors" -name "*Metric.md")
|
||||
SINKS=$(find "${SRCDIR}/sinks" -name "*Sink.md")
|
||||
RECEIVERS=$(find "${SRCDIR}/receivers" -name "*Receiver.md")
|
||||
|
||||
|
||||
|
||||
# Collectors
|
||||
mkdir -p "${DESTDIR}/collectors"
|
||||
for F in $COLLECTORS; do
|
||||
echo "$F"
|
||||
FNAME=$(basename "$F")
|
||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
||||
echo "'${TITLE//\`/}'"
|
||||
if [ "${TITLE}" == "" ]; then continue; fi
|
||||
rm --force "${DESTDIR}/collectors/${FNAME}"
|
||||
cat << EOF >> "${DESTDIR}/collectors/${FNAME}"
|
||||
---
|
||||
title: ${TITLE//\`/}
|
||||
description: >
|
||||
Toplevel ${FNAME/.md/}
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Collector, ${FNAME/Metric.md/}]
|
||||
weight: 2
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "$F" >> "${DESTDIR}/collectors/${FNAME}"
|
||||
done
|
||||
|
||||
if [ -e "${SRCDIR}/collectors/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/collectors/_index.md"
|
||||
---
|
||||
title: cc-metric-collector's collectors
|
||||
description: Documentation of cc-metric-collector's collectors
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Collector, General]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/collectors/README.md" >> "${DESTDIR}/collectors/_index.md"
|
||||
fi
|
||||
|
||||
# Sinks
|
||||
mkdir -p "${DESTDIR}/sinks"
|
||||
for F in $SINKS; do
|
||||
echo "$F"
|
||||
FNAME=$(basename "$F")
|
||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
||||
echo "'${TITLE//\`/}'"
|
||||
if [ "${TITLE}" == "" ]; then continue; fi
|
||||
rm --force "${DESTDIR}/sinks/${FNAME}"
|
||||
cat << EOF >> "${DESTDIR}/sinks/${FNAME}"
|
||||
---
|
||||
title: ${TITLE//\`/}
|
||||
description: >
|
||||
Toplevel ${FNAME/.md/}
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Sink, ${FNAME/Sink.md/}]
|
||||
weight: 2
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "$F" >> "${DESTDIR}/sinks/${FNAME}"
|
||||
done
|
||||
|
||||
if [ -e "${SRCDIR}/collectors/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/sinks/_index.md"
|
||||
---
|
||||
title: cc-metric-collector's sinks
|
||||
description: Documentation of cc-metric-collector's sinks
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Sink, General]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/sinks/README.md" >> "${DESTDIR}/sinks/_index.md"
|
||||
fi
|
||||
|
||||
|
||||
# Receivers
|
||||
mkdir -p "${DESTDIR}/receivers"
|
||||
for F in $RECEIVERS; do
|
||||
echo "$F"
|
||||
FNAME=$(basename "$F")
|
||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
||||
echo "'${TITLE//\`/}'"
|
||||
if [ "${TITLE}" == "" ]; then continue; fi
|
||||
rm --force "${DESTDIR}/receivers/${FNAME}"
|
||||
cat << EOF >> "${DESTDIR}/receivers/${FNAME}"
|
||||
---
|
||||
title: ${TITLE//\`/}
|
||||
description: >
|
||||
Toplevel ${FNAME/.md/}
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Receiver, ${FNAME/Receiver.md/}]
|
||||
weight: 2
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "$F" >> "${DESTDIR}/receivers/${FNAME}"
|
||||
done
|
||||
|
||||
if [ -e "${SRCDIR}/receivers/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/receivers/_index.md"
|
||||
---
|
||||
title: cc-metric-collector's receivers
|
||||
description: Documentation of cc-metric-collector's receivers
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Receiver, General]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/receivers/README.md" >> "${DESTDIR}/receivers/_index.md"
|
||||
fi
|
||||
|
||||
mkdir -p "${DESTDIR}/internal/metricRouter"
|
||||
if [ -e "${SRCDIR}/internal/metricRouter/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/internal/metricRouter/_index.md"
|
||||
---
|
||||
title: cc-metric-collector's router
|
||||
description: Documentation of cc-metric-collector's router
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Router, General]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/internal/metricRouter/README.md" >> "${DESTDIR}/internal/metricRouter/_index.md"
|
||||
fi
|
||||
|
||||
if [ -e "${SRCDIR}/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/_index.md"
|
||||
---
|
||||
title: cc-metric-collector
|
||||
description: Documentation of cc-metric-collector
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, General]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/README.md" >> "${DESTDIR}/_index.md"
|
||||
sed -i -e 's+README.md+_index.md+g' "${DESTDIR}/_index.md"
|
||||
fi
|
||||
|
||||
|
||||
mkdir -p "${DESTDIR}/pkg/messageProcessor"
|
||||
if [ -e "${SRCDIR}/pkg/messageProcessor/README.md" ]; then
|
||||
cat << EOF > "${DESTDIR}/pkg/messageProcessor/_index.md"
|
||||
---
|
||||
title: cc-metric-collector's message processor
|
||||
description: Documentation of cc-metric-collector's message processor
|
||||
categories: [cc-metric-collector]
|
||||
tags: [cc-metric-collector, Message Processor]
|
||||
weight: 40
|
||||
---
|
||||
|
||||
EOF
|
||||
cat "${SRCDIR}/pkg/messageProcessor/README.md" >> "${DESTDIR}/pkg/messageProcessor/_index.md"
|
||||
fi
|
||||
|
@@ -17,7 +17,7 @@ This folder contains the SinkManager and sink implementations for the cc-metric-
|
||||
The configuration file for the sinks is a list of configurations. The `type` field in each specifies which sink to initialize.
|
||||
|
||||
```json
|
||||
{
|
||||
[
|
||||
"mystdout" : {
|
||||
"type" : "stdout",
|
||||
"meta_as_tags" : [
|
||||
@@ -31,7 +31,7 @@ The configuration file for the sinks is a list of configurations. The `type` fie
|
||||
"database" : "ccmetric",
|
||||
"password" : "<jwt token>"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
|
@@ -4,10 +4,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
func GangliaMetricName(point lp.CCMessage) string {
|
||||
func GangliaMetricName(point lp.CCMetric) string {
|
||||
name := point.Name()
|
||||
metricType, typeOK := point.GetTag("type")
|
||||
metricTid, tidOk := point.GetTag("type-id")
|
||||
@@ -39,7 +39,7 @@ func GangliaMetricRename(name string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
func GangliaSlopeType(point lp.CCMessage) uint {
|
||||
func GangliaSlopeType(point lp.CCMetric) uint {
|
||||
name := point.Name()
|
||||
if name == "mem_total" || name == "swap_total" {
|
||||
return 0
|
||||
@@ -151,7 +151,7 @@ type GangliaMetricConfig struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func GetCommonGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
|
||||
func GetCommonGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
|
||||
mname := GangliaMetricRename(point.Name())
|
||||
if oldname, ok := point.GetMeta("oldname"); ok {
|
||||
mname = GangliaMetricRename(oldname)
|
||||
@@ -207,7 +207,7 @@ func GetCommonGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func GetGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
|
||||
func GetGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
|
||||
mname := GangliaMetricRename(point.Name())
|
||||
if oldname, ok := point.GetMeta("oldname"); ok {
|
||||
mname = GangliaMetricRename(oldname)
|
||||
|
@@ -10,9 +10,8 @@ import (
|
||||
// "time"
|
||||
"os/exec"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const GMETRIC_EXEC = `gmetric`
|
||||
@@ -36,53 +35,50 @@ type GangliaSink struct {
|
||||
config GangliaSinkConfig
|
||||
}
|
||||
|
||||
func (s *GangliaSink) Write(msg lp.CCMessage) error {
|
||||
func (s *GangliaSink) Write(point lp.CCMetric) error {
|
||||
var err error = nil
|
||||
//var tagsstr []string
|
||||
var argstr []string
|
||||
|
||||
point, err := s.mp.ProcessMessage(msg)
|
||||
if err == nil && point != nil {
|
||||
// Get metric config (type, value, ... in suitable format)
|
||||
conf := GetCommonGangliaConfig(point)
|
||||
if len(conf.Type) == 0 {
|
||||
conf = GetGangliaConfig(point)
|
||||
}
|
||||
if len(conf.Type) == 0 {
|
||||
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
|
||||
}
|
||||
|
||||
if s.config.AddGangliaGroup {
|
||||
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
|
||||
}
|
||||
if s.config.AddUnits && len(conf.Unit) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
|
||||
}
|
||||
|
||||
if len(s.config.ClusterName) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
|
||||
}
|
||||
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
|
||||
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
|
||||
// }
|
||||
if len(s.gmetric_config) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
|
||||
}
|
||||
if s.config.AddTypeToName {
|
||||
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
|
||||
} else {
|
||||
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
|
||||
}
|
||||
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
|
||||
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
|
||||
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
|
||||
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
|
||||
|
||||
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
|
||||
command := exec.Command(s.gmetric_path, argstr...)
|
||||
command.Wait()
|
||||
_, err = command.Output()
|
||||
// Get metric config (type, value, ... in suitable format)
|
||||
conf := GetCommonGangliaConfig(point)
|
||||
if len(conf.Type) == 0 {
|
||||
conf = GetGangliaConfig(point)
|
||||
}
|
||||
if len(conf.Type) == 0 {
|
||||
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
|
||||
}
|
||||
|
||||
if s.config.AddGangliaGroup {
|
||||
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
|
||||
}
|
||||
if s.config.AddUnits && len(conf.Unit) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
|
||||
}
|
||||
|
||||
if len(s.config.ClusterName) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
|
||||
}
|
||||
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
|
||||
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
|
||||
// }
|
||||
if len(s.gmetric_config) > 0 {
|
||||
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
|
||||
}
|
||||
if s.config.AddTypeToName {
|
||||
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
|
||||
} else {
|
||||
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
|
||||
}
|
||||
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
|
||||
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
|
||||
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
|
||||
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
|
||||
|
||||
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
|
||||
command := exec.Command(s.gmetric_path, argstr...)
|
||||
command.Wait()
|
||||
_, err = command.Output()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -108,13 +104,6 @@ func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
|
||||
}
|
||||
s.gmetric_path = ""
|
||||
s.gmetric_config = ""
|
||||
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
|
||||
if len(s.config.GmetricPath) > 0 {
|
||||
p, err := exec.LookPath(s.config.GmetricPath)
|
||||
if err == nil {
|
||||
@@ -133,15 +122,5 @@ func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
|
||||
if len(s.config.GmetricConfig) > 0 {
|
||||
s.gmetric_config = s.config.GmetricConfig
|
||||
}
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
@@ -8,18 +8,14 @@ The `ganglia` sink uses the `gmetric` tool of the [Ganglia Monitoring System](ht
|
||||
{
|
||||
"<name>": {
|
||||
"type": "ganglia",
|
||||
"meta_as_tags" : true,
|
||||
"gmetric_path" : "/path/to/gmetric",
|
||||
"add_ganglia_group" : true,
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"add_ganglia_group" : true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `type`: makes the sink an `ganglia` sink
|
||||
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||
- `gmetric_path`: Path to `gmetric` executable (optional). If not given, the sink searches in `$PATH` for `gmetric`.
|
||||
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.
|
@@ -9,9 +9,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
@@ -51,12 +50,18 @@ type HttpSinkConfig struct {
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
type key_value_pair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
type HttpSink struct {
|
||||
sink
|
||||
client *http.Client
|
||||
// influx line protocol encoder
|
||||
encoder influx.Encoder
|
||||
|
||||
// List of tags and meta data tags which should be used as tags
|
||||
extended_tag_list []key_value_pair
|
||||
// Flush() runs in another goroutine and accesses the influx line protocol encoder,
|
||||
// so this encoderLock has to protect the encoder
|
||||
encoderLock sync.Mutex
|
||||
@@ -70,25 +75,78 @@ type HttpSink struct {
|
||||
}
|
||||
|
||||
// Write sends metric m as http message
|
||||
func (s *HttpSink) Write(msg lp.CCMessage) error {
|
||||
func (s *HttpSink) Write(m lp.CCMetric) error {
|
||||
|
||||
// submit m only after applying processing/dropping rules
|
||||
m, err := s.mp.ProcessMessage(msg)
|
||||
if err == nil && m != nil {
|
||||
// Lock for encoder usage
|
||||
s.encoderLock.Lock()
|
||||
// Lock for encoder usage
|
||||
s.encoderLock.Lock()
|
||||
|
||||
err = EncoderAdd(&s.encoder, m)
|
||||
// Encode measurement name
|
||||
s.encoder.StartLine(m.Name())
|
||||
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
|
||||
// Check that encoding worked
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding failed: %v", err)
|
||||
// copy tags and meta data which should be used as tags
|
||||
s.extended_tag_list = s.extended_tag_list[:0]
|
||||
for key, value := range m.Tags() {
|
||||
s.extended_tag_list =
|
||||
append(
|
||||
s.extended_tag_list,
|
||||
key_value_pair{
|
||||
key: key,
|
||||
value: value,
|
||||
},
|
||||
)
|
||||
}
|
||||
for _, key := range s.config.MetaAsTags {
|
||||
if value, ok := m.GetMeta(key); ok {
|
||||
s.extended_tag_list =
|
||||
append(
|
||||
s.extended_tag_list,
|
||||
key_value_pair{
|
||||
key: key,
|
||||
value: value,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Encode tags (they musts be in lexical order)
|
||||
slices.SortFunc(
|
||||
s.extended_tag_list,
|
||||
func(a key_value_pair, b key_value_pair) int {
|
||||
if a.key < b.key {
|
||||
return -1
|
||||
}
|
||||
if a.key > b.key {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
},
|
||||
)
|
||||
for i := range s.extended_tag_list {
|
||||
s.encoder.AddTag(
|
||||
s.extended_tag_list[i].key,
|
||||
s.extended_tag_list[i].value,
|
||||
)
|
||||
}
|
||||
|
||||
// Encode fields
|
||||
for key, value := range m.Fields() {
|
||||
s.encoder.AddField(key, influx.MustNewValue(value))
|
||||
}
|
||||
|
||||
// Encode time stamp
|
||||
s.encoder.EndLine(m.Time())
|
||||
|
||||
// Check for encoder errors
|
||||
err := s.encoder.Err()
|
||||
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
|
||||
// Check that encoding worked
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding failed: %v", err)
|
||||
}
|
||||
|
||||
if s.config.flushDelay == 0 {
|
||||
|
||||
// Directly flush if no flush delay is configured
|
||||
@@ -213,7 +271,7 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
||||
s.config.Timeout = "5s"
|
||||
s.config.FlushDelay = "5s"
|
||||
s.config.MaxRetries = 3
|
||||
s.config.Precision = "s"
|
||||
s.config.Precision = "ns"
|
||||
cclog.ComponentDebug(s.name, "Init()")
|
||||
|
||||
// Read config
|
||||
@@ -239,11 +297,6 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
||||
if s.config.useBasicAuth && len(s.config.Password) == 0 {
|
||||
return nil, errors.New("basic authentication requires password")
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
|
||||
if len(s.config.IdleConnTimeout) > 0 {
|
||||
t, err := time.ParseDuration(s.config.IdleConnTimeout)
|
||||
@@ -266,17 +319,7 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
||||
cclog.ComponentDebug(s.name, "Init(): flushDelay", t)
|
||||
}
|
||||
}
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
}
|
||||
|
||||
precision := influx.Second
|
||||
precision := influx.Nanosecond
|
||||
if len(s.config.Precision) > 0 {
|
||||
switch s.config.Precision {
|
||||
case "s":
|
||||
@@ -301,6 +344,7 @@ func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
||||
|
||||
// Configure influx line protocol encoder
|
||||
s.encoder.SetPrecision(precision)
|
||||
s.extended_tag_list = make([]key_value_pair, 0)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
@@ -8,6 +8,9 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
|
||||
{
|
||||
"<name>": {
|
||||
"type": "http",
|
||||
"meta_as_tags" : [
|
||||
"meta-key"
|
||||
],
|
||||
"url" : "https://my-monitoring.example.com:1234/api/write",
|
||||
"jwt" : "blabla.blabla.blabla",
|
||||
"username": "myUser",
|
||||
@@ -16,16 +19,13 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
|
||||
"idle_connection_timeout" : "5s",
|
||||
"flush_delay": "2s",
|
||||
"batch_size": 1000,
|
||||
"precision": "s",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"precision": "s"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `type`: makes the sink an `http` sink
|
||||
- `meta_as_tags`: Move specific meta information to the tags in the output (optional)
|
||||
- `url`: The full URL of the endpoint
|
||||
- `jwt`: JSON web tokens for authentication (Using the *Bearer* scheme)
|
||||
- `username`: username for basic authentication
|
||||
@@ -35,10 +35,8 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
|
||||
- `idle_connection_timeout`: Timeout for idle connections (default '120s'). Should be larger than the measurement interval to keep the connection open
|
||||
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)
|
||||
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
|
||||
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 'ns')
|
||||
|
||||
### Using `http` sink for communication with cc-metric-store
|
||||
### Using HttpSink for communication with cc-metric-store
|
||||
|
||||
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
|
||||
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to set `"precision": "s"`.
|
@@ -10,9 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||
influxdb2ApiHttp "github.com/influxdata/influxdb-client-go/v2/api/http"
|
||||
@@ -37,8 +36,6 @@ type InfluxAsyncSinkConfig struct {
|
||||
InfluxMaxRetryTime string `json:"max_retry_time,omitempty"`
|
||||
CustomFlushInterval string `json:"custom_flush_interval,omitempty"`
|
||||
MaxRetryAttempts uint `json:"max_retry_attempts,omitempty"`
|
||||
// Timestamp precision
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
type InfluxAsyncSink struct {
|
||||
@@ -96,22 +93,7 @@ func (s *InfluxAsyncSink) connect() error {
|
||||
&tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
)
|
||||
|
||||
precision := time.Second
|
||||
if len(s.config.Precision) > 0 {
|
||||
switch s.config.Precision {
|
||||
case "s":
|
||||
precision = time.Second
|
||||
case "ms":
|
||||
precision = time.Millisecond
|
||||
case "us":
|
||||
precision = time.Microsecond
|
||||
case "ns":
|
||||
precision = time.Nanosecond
|
||||
}
|
||||
}
|
||||
clientOptions.SetPrecision(precision)
|
||||
).SetPrecision(time.Second)
|
||||
|
||||
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
|
||||
s.writeApi = s.client.WriteAPI(s.config.Organization, s.config.Database)
|
||||
@@ -130,7 +112,7 @@ func (s *InfluxAsyncSink) connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *InfluxAsyncSink) Write(m lp.CCMessage) error {
|
||||
func (s *InfluxAsyncSink) Write(m lp.CCMetric) error {
|
||||
if s.customFlushInterval != 0 && s.flushTimer == nil {
|
||||
// Run a batched flush for all lines that have arrived in the defined interval
|
||||
s.flushTimer = time.AfterFunc(s.customFlushInterval, func() {
|
||||
@@ -139,10 +121,9 @@ func (s *InfluxAsyncSink) Write(m lp.CCMessage) error {
|
||||
}
|
||||
})
|
||||
}
|
||||
msg, err := s.mp.ProcessMessage(m)
|
||||
if err == nil && msg != nil {
|
||||
s.writeApi.WritePoint(msg.ToPoint(nil))
|
||||
}
|
||||
s.writeApi.WritePoint(
|
||||
m.ToPoint(s.meta_as_tags),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -177,7 +158,6 @@ func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
|
||||
s.config.CustomFlushInterval = ""
|
||||
s.customFlushInterval = time.Duration(0)
|
||||
s.config.MaxRetryAttempts = 1
|
||||
s.config.Precision = "s"
|
||||
|
||||
// Default retry intervals (in seconds)
|
||||
// 1 2
|
||||
@@ -220,24 +200,10 @@ func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
|
||||
if len(s.config.Password) == 0 {
|
||||
return nil, errors.New("missing password configuration required by InfluxSink")
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Create lookup map to use meta infos as tags in the output metric
|
||||
// s.meta_as_tags = make(map[string]bool)
|
||||
// for _, k := range s.config.MetaAsTags {
|
||||
// s.meta_as_tags[k] = true
|
||||
// }
|
||||
s.meta_as_tags = make(map[string]bool)
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
s.meta_as_tags[k] = true
|
||||
}
|
||||
|
||||
toUint := func(duration string, def uint) uint {
|
||||
|
@@ -19,13 +19,9 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
|
||||
"batch_size": 200,
|
||||
"retry_interval" : "1s",
|
||||
"retry_exponential_base" : 2,
|
||||
"precision": "s",
|
||||
"max_retries": 20,
|
||||
"max_retry_time" : "168h",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"meta_as_tags" : [],
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -43,12 +39,6 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
|
||||
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
|
||||
- `max_retries`: Maximal number of retry attempts
|
||||
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
|
||||
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
- `meta_as_tags`: move meta information keys to tags (optional)
|
||||
|
||||
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)
|
||||
|
||||
### Using `influxasync` sink for communication with cc-metric-store
|
||||
|
||||
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
|
@@ -10,9 +10,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
@@ -59,8 +58,6 @@ type InfluxSink struct {
|
||||
InfluxMaxRetryTime string `json:"max_retry_time,omitempty"`
|
||||
// Specify whether to use GZip compression in write requests
|
||||
InfluxUseGzip bool `json:"use_gzip"`
|
||||
// Timestamp precision
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
// influx line protocol encoder
|
||||
@@ -209,20 +206,7 @@ func (s *InfluxSink) connect() error {
|
||||
)
|
||||
|
||||
// Set time precision
|
||||
precision := time.Second
|
||||
if len(s.config.Precision) > 0 {
|
||||
switch s.config.Precision {
|
||||
case "s":
|
||||
precision = time.Second
|
||||
case "ms":
|
||||
precision = time.Millisecond
|
||||
case "us":
|
||||
precision = time.Microsecond
|
||||
case "ns":
|
||||
precision = time.Nanosecond
|
||||
}
|
||||
}
|
||||
clientOptions.SetPrecision(precision)
|
||||
clientOptions.SetPrecision(time.Nanosecond)
|
||||
|
||||
// Create new writeAPI
|
||||
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
|
||||
@@ -240,19 +224,28 @@ func (s *InfluxSink) connect() error {
|
||||
}
|
||||
|
||||
// Write sends metric m in influxDB line protocol
|
||||
func (s *InfluxSink) Write(msg lp.CCMessage) error {
|
||||
func (s *InfluxSink) Write(m lp.CCMetric) error {
|
||||
|
||||
m, err := s.mp.ProcessMessage(msg)
|
||||
if err == nil && m != nil {
|
||||
// Lock for encoder usage
|
||||
s.encoderLock.Lock()
|
||||
// Lock for encoder usage
|
||||
s.encoderLock.Lock()
|
||||
|
||||
// Encode measurement name
|
||||
s.encoder.StartLine(m.Name())
|
||||
// Encode measurement name
|
||||
s.encoder.StartLine(m.Name())
|
||||
|
||||
// copy tags and meta data which should be used as tags
|
||||
s.extended_tag_list = s.extended_tag_list[:0]
|
||||
for key, value := range m.Tags() {
|
||||
// copy tags and meta data which should be used as tags
|
||||
s.extended_tag_list = s.extended_tag_list[:0]
|
||||
for key, value := range m.Tags() {
|
||||
s.extended_tag_list =
|
||||
append(
|
||||
s.extended_tag_list,
|
||||
key_value_pair{
|
||||
key: key,
|
||||
value: value,
|
||||
},
|
||||
)
|
||||
}
|
||||
for _, key := range s.config.MetaAsTags {
|
||||
if value, ok := m.GetMeta(key); ok {
|
||||
s.extended_tag_list =
|
||||
append(
|
||||
s.extended_tag_list,
|
||||
@@ -262,57 +255,45 @@ func (s *InfluxSink) Write(msg lp.CCMessage) error {
|
||||
},
|
||||
)
|
||||
}
|
||||
// for _, key := range s.config.MetaAsTags {
|
||||
// if value, ok := m.GetMeta(key); ok {
|
||||
// s.extended_tag_list =
|
||||
// append(
|
||||
// s.extended_tag_list,
|
||||
// key_value_pair{
|
||||
// key: key,
|
||||
// value: value,
|
||||
// },
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
|
||||
// Encode tags (they musts be in lexical order)
|
||||
slices.SortFunc(
|
||||
s.extended_tag_list,
|
||||
func(a key_value_pair, b key_value_pair) int {
|
||||
if a.key < b.key {
|
||||
return -1
|
||||
}
|
||||
if a.key > b.key {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
},
|
||||
)
|
||||
for i := range s.extended_tag_list {
|
||||
s.encoder.AddTag(
|
||||
s.extended_tag_list[i].key,
|
||||
s.extended_tag_list[i].value,
|
||||
)
|
||||
}
|
||||
|
||||
// Encode fields
|
||||
for key, value := range m.Fields() {
|
||||
s.encoder.AddField(key, influx.MustNewValue(value))
|
||||
}
|
||||
|
||||
// Encode time stamp
|
||||
s.encoder.EndLine(m.Time())
|
||||
|
||||
// Check for encoder errors
|
||||
if err := s.encoder.Err(); err != nil {
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
|
||||
return fmt.Errorf("encoding failed: %v", err)
|
||||
}
|
||||
s.numRecordsInEncoder++
|
||||
}
|
||||
|
||||
// Encode tags (they musts be in lexical order)
|
||||
slices.SortFunc(
|
||||
s.extended_tag_list,
|
||||
func(a key_value_pair, b key_value_pair) int {
|
||||
if a.key < b.key {
|
||||
return -1
|
||||
}
|
||||
if a.key > b.key {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
},
|
||||
)
|
||||
for i := range s.extended_tag_list {
|
||||
s.encoder.AddTag(
|
||||
s.extended_tag_list[i].key,
|
||||
s.extended_tag_list[i].value,
|
||||
)
|
||||
}
|
||||
|
||||
// Encode fields
|
||||
for key, value := range m.Fields() {
|
||||
s.encoder.AddField(key, influx.MustNewValue(value))
|
||||
}
|
||||
|
||||
// Encode time stamp
|
||||
s.encoder.EndLine(m.Time())
|
||||
|
||||
// Check for encoder errors
|
||||
if err := s.encoder.Err(); err != nil {
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
|
||||
return fmt.Errorf("Encoding failed: %v", err)
|
||||
}
|
||||
s.numRecordsInEncoder++
|
||||
|
||||
if s.config.flushDelay == 0 {
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
@@ -436,7 +417,6 @@ func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
|
||||
// Set config default values
|
||||
s.config.BatchSize = 1000
|
||||
s.config.FlushInterval = "1s"
|
||||
s.config.Precision = "s"
|
||||
|
||||
// Read config
|
||||
if len(config) > 0 {
|
||||
@@ -463,20 +443,11 @@ func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
|
||||
if len(s.config.Password) == 0 {
|
||||
return s, errors.New("missing password configuration required by InfluxSink")
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Create lookup map to use meta infos as tags in the output metric
|
||||
s.meta_as_tags = make(map[string]bool)
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
s.meta_as_tags[k] = true
|
||||
}
|
||||
|
||||
// Configure flush delay duration
|
||||
|
@@ -17,17 +17,14 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
|
||||
"ssl": true,
|
||||
"flush_delay" : "1s",
|
||||
"batch_size" : 1000,
|
||||
"use_gzip": true,
|
||||
"precision": "s",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"use_gzip": true
|
||||
"meta_as_tags" : [],
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `type`: makes the sink an `influxdb` sink
|
||||
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||
- `database`: All metrics are written to this bucket
|
||||
- `host`: Hostname of the InfluxDB database server
|
||||
- `port`: Port number (as string) of the InfluxDB database server
|
||||
@@ -37,9 +34,6 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
|
||||
- `ssl`: Use SSL connection
|
||||
- `flush_delay`: Group metrics coming in to a single batch
|
||||
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
|
||||
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
|
||||
Influx client options:
|
||||
=======
|
||||
@@ -52,7 +46,3 @@ Influx client options:
|
||||
- `max_retries`: maximum count of retry attempts of failed writes
|
||||
- `max_retry_time`: maximum total retry timeout
|
||||
- `use_gzip`: Specify whether to use GZip compression in write requests
|
||||
|
||||
### Using `influxdb` sink for communication with cc-metric-store
|
||||
|
||||
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
|
@@ -72,9 +72,8 @@ import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/NVIDIA/go-nvml/pkg/dl"
|
||||
)
|
||||
|
||||
@@ -111,102 +110,99 @@ type LibgangliaSink struct {
|
||||
cstrCache map[string]*C.char
|
||||
}
|
||||
|
||||
func (s *LibgangliaSink) Write(msg lp.CCMessage) error {
|
||||
func (s *LibgangliaSink) Write(point lp.CCMetric) error {
|
||||
var err error = nil
|
||||
var c_name *C.char
|
||||
var c_value *C.char
|
||||
var c_type *C.char
|
||||
var c_unit *C.char
|
||||
|
||||
point, err := s.mp.ProcessMessage(msg)
|
||||
if err == nil && point != nil {
|
||||
// helper function for looking up C strings in the cache
|
||||
lookup := func(key string) *C.char {
|
||||
if _, exist := s.cstrCache[key]; !exist {
|
||||
s.cstrCache[key] = C.CString(key)
|
||||
}
|
||||
return s.cstrCache[key]
|
||||
// helper function for looking up C strings in the cache
|
||||
lookup := func(key string) *C.char {
|
||||
if _, exist := s.cstrCache[key]; !exist {
|
||||
s.cstrCache[key] = C.CString(key)
|
||||
}
|
||||
|
||||
conf := GetCommonGangliaConfig(point)
|
||||
if len(conf.Type) == 0 {
|
||||
conf = GetGangliaConfig(point)
|
||||
}
|
||||
if len(conf.Type) == 0 {
|
||||
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
|
||||
}
|
||||
|
||||
if s.config.AddTypeToName {
|
||||
conf.Name = GangliaMetricName(point)
|
||||
}
|
||||
|
||||
c_value = C.CString(conf.Value)
|
||||
c_type = lookup(conf.Type)
|
||||
c_name = lookup(conf.Name)
|
||||
|
||||
// Add unit
|
||||
unit := ""
|
||||
if s.config.AddUnits {
|
||||
unit = conf.Unit
|
||||
}
|
||||
c_unit = lookup(unit)
|
||||
|
||||
// Determine the slope of the metric. Ganglia's own collector mostly use
|
||||
// 'both' but the mem and swap total uses 'zero'.
|
||||
slope_type := C.GANGLIA_SLOPE_BOTH
|
||||
switch conf.Slope {
|
||||
case "zero":
|
||||
slope_type = C.GANGLIA_SLOPE_ZERO
|
||||
case "both":
|
||||
slope_type = C.GANGLIA_SLOPE_BOTH
|
||||
}
|
||||
|
||||
// Create a new Ganglia metric
|
||||
gmetric := C.Ganglia_metric_create(s.global_context)
|
||||
// Set name, value, type and unit in the Ganglia metric
|
||||
// The default slope_type is both directions, so up and down. Some metrics want 'zero' slope, probably constant.
|
||||
// The 'tmax' value is by default 300.
|
||||
rval := C.int(0)
|
||||
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), C.uint(conf.Tmax), 0)
|
||||
switch rval {
|
||||
case 1:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return errors.New("invalid parameters")
|
||||
case 2:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return errors.New("one of your parameters has an invalid character '\"'")
|
||||
case 3:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return fmt.Errorf("the type parameter \"%s\" is not a valid type", conf.Type)
|
||||
case 4:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return fmt.Errorf("the value parameter \"%s\" does not represent a number", conf.Value)
|
||||
default:
|
||||
}
|
||||
|
||||
// Set the cluster name, otherwise it takes it from the configuration file
|
||||
if len(s.config.ClusterName) > 0 {
|
||||
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
|
||||
}
|
||||
// Set the group metadata in the Ganglia metric if configured
|
||||
if s.config.AddGangliaGroup {
|
||||
c_group := lookup(conf.Group)
|
||||
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
|
||||
}
|
||||
|
||||
// Now we send the metric
|
||||
// gmetric does provide some more options like description and other options
|
||||
// but they are not provided by the collectors
|
||||
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
|
||||
if rval != 0 {
|
||||
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
|
||||
// fall throuph to use Ganglia_metric_destroy from common cleanup
|
||||
}
|
||||
// Cleanup Ganglia metric
|
||||
C.Ganglia_metric_destroy(gmetric)
|
||||
// Free the value C string, the only one not stored in the cache
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return s.cstrCache[key]
|
||||
}
|
||||
|
||||
conf := GetCommonGangliaConfig(point)
|
||||
if len(conf.Type) == 0 {
|
||||
conf = GetGangliaConfig(point)
|
||||
}
|
||||
if len(conf.Type) == 0 {
|
||||
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
|
||||
}
|
||||
|
||||
if s.config.AddTypeToName {
|
||||
conf.Name = GangliaMetricName(point)
|
||||
}
|
||||
|
||||
c_value = C.CString(conf.Value)
|
||||
c_type = lookup(conf.Type)
|
||||
c_name = lookup(conf.Name)
|
||||
|
||||
// Add unit
|
||||
unit := ""
|
||||
if s.config.AddUnits {
|
||||
unit = conf.Unit
|
||||
}
|
||||
c_unit = lookup(unit)
|
||||
|
||||
// Determine the slope of the metric. Ganglia's own collector mostly use
|
||||
// 'both' but the mem and swap total uses 'zero'.
|
||||
slope_type := C.GANGLIA_SLOPE_BOTH
|
||||
switch conf.Slope {
|
||||
case "zero":
|
||||
slope_type = C.GANGLIA_SLOPE_ZERO
|
||||
case "both":
|
||||
slope_type = C.GANGLIA_SLOPE_BOTH
|
||||
}
|
||||
|
||||
// Create a new Ganglia metric
|
||||
gmetric := C.Ganglia_metric_create(s.global_context)
|
||||
// Set name, value, type and unit in the Ganglia metric
|
||||
// The default slope_type is both directions, so up and down. Some metrics want 'zero' slope, probably constant.
|
||||
// The 'tmax' value is by default 300.
|
||||
rval := C.int(0)
|
||||
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), C.uint(conf.Tmax), 0)
|
||||
switch rval {
|
||||
case 1:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return errors.New("invalid parameters")
|
||||
case 2:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return errors.New("one of your parameters has an invalid character '\"'")
|
||||
case 3:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return fmt.Errorf("the type parameter \"%s\" is not a valid type", conf.Type)
|
||||
case 4:
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return fmt.Errorf("the value parameter \"%s\" does not represent a number", conf.Value)
|
||||
default:
|
||||
}
|
||||
|
||||
// Set the cluster name, otherwise it takes it from the configuration file
|
||||
if len(s.config.ClusterName) > 0 {
|
||||
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
|
||||
}
|
||||
// Set the group metadata in the Ganglia metric if configured
|
||||
if s.config.AddGangliaGroup {
|
||||
c_group := lookup(conf.Group)
|
||||
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
|
||||
}
|
||||
|
||||
// Now we send the metric
|
||||
// gmetric does provide some more options like description and other options
|
||||
// but they are not provided by the collectors
|
||||
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
|
||||
if rval != 0 {
|
||||
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
|
||||
// fall throuph to use Ganglia_metric_destroy from common cleanup
|
||||
}
|
||||
// Cleanup Ganglia metric
|
||||
C.Ganglia_metric_destroy(gmetric)
|
||||
// Free the value C string, the only one not stored in the cache
|
||||
C.free(unsafe.Pointer(c_value))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -245,20 +241,6 @@ func NewLibgangliaSink(name string, config json.RawMessage) (Sink, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
}
|
||||
lib := dl.New(s.config.GangliaLib, GANGLIA_LIB_DL_FLAGS)
|
||||
if lib == nil {
|
||||
return nil, fmt.Errorf("error instantiating DynamicLibrary for %s", s.config.GangliaLib)
|
||||
|
@@ -15,23 +15,18 @@ The `libganglia` sink has probably less overhead compared to the `ganglia` sink
|
||||
"cluster_name": "MyCluster",
|
||||
"add_ganglia_group" : true,
|
||||
"add_type_to_name": true,
|
||||
"add_units" : true,
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"add_units" : true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `type`: makes the sink an `libganglia` sink
|
||||
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||
- `gmond_config`: Path to the Ganglia configuration file `gmond.conf` (default: `/etc/ganglia/gmond.conf`)
|
||||
- `cluster_name`: Set a cluster name for the metric. If not set, it is taken from `gmond_config`
|
||||
- `add_ganglia_group`: Add a Ganglia metric group based on meta information. Some old versions of `gmetric` do not support the `--group` option
|
||||
- `add_type_to_name`: Ganglia commonly uses only node-level metrics but with cc-metric-collector, there are metrics for cpus, memory domains, CPU sockets and the whole node. In order to get eeng, this option prefixes the metric name with `<type><type-id>_` or `device_` depending on the metric tags and meta information. For metrics of the whole node `type=node`, no prefix is added
|
||||
- `add_units`: Add metric value unit if there is a `unit` entry in the metric tags or meta information
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
|
||||
### Ganglia Installation
|
||||
|
||||
|
@@ -1,88 +1,27 @@
|
||||
package sinks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
"golang.org/x/exp/slices"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type defaultSinkConfig struct {
|
||||
MetaAsTags []string `json:"meta_as_tags,omitempty"`
|
||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
||||
Type string `json:"type"`
|
||||
MetaAsTags []string `json:"meta_as_tags,omitempty"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type sink struct {
|
||||
meta_as_tags map[string]bool // Use meta data tags as tags
|
||||
mp mp.MessageProcessor // message processor for the sink
|
||||
name string // Name of the sink
|
||||
meta_as_tags map[string]bool // Use meta data tags as tags
|
||||
name string // Name of the sink
|
||||
}
|
||||
|
||||
type Sink interface {
|
||||
Write(point lp.CCMessage) error // Write metric to the sink
|
||||
Flush() error // Flush buffered metrics
|
||||
Close() // Close / finish metric sink
|
||||
Name() string // Name of the metric sink
|
||||
Write(point lp.CCMetric) error // Write metric to the sink
|
||||
Flush() error // Flush buffered metrics
|
||||
Close() // Close / finish metric sink
|
||||
Name() string // Name of the metric sink
|
||||
}
|
||||
|
||||
// Name returns the name of the metric sink
|
||||
func (s *sink) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
type key_value_pair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
func EncoderAdd(encoder *influx.Encoder, msg lp.CCMessage) error {
|
||||
// Encode measurement name
|
||||
encoder.StartLine(msg.Name())
|
||||
|
||||
tag_list := make([]key_value_pair, 0, 10)
|
||||
|
||||
// copy tags and meta data which should be used as tags
|
||||
for key, value := range msg.Tags() {
|
||||
tag_list =
|
||||
append(
|
||||
tag_list,
|
||||
key_value_pair{
|
||||
key: key,
|
||||
value: value,
|
||||
},
|
||||
)
|
||||
}
|
||||
// Encode tags (they musts be in lexical order)
|
||||
slices.SortFunc(
|
||||
tag_list,
|
||||
func(a key_value_pair, b key_value_pair) int {
|
||||
if a.key < b.key {
|
||||
return -1
|
||||
}
|
||||
if a.key > b.key {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
},
|
||||
)
|
||||
for i := range tag_list {
|
||||
encoder.AddTag(
|
||||
tag_list[i].key,
|
||||
tag_list[i].value,
|
||||
)
|
||||
}
|
||||
|
||||
// Encode fields
|
||||
for key, value := range msg.Fields() {
|
||||
encoder.AddField(key, influx.MustNewValue(value))
|
||||
}
|
||||
|
||||
// Encode time stamp
|
||||
encoder.EndLine(msg.Time())
|
||||
|
||||
// Return encoder errors
|
||||
return encoder.Err()
|
||||
}
|
||||
|
@@ -5,16 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
influx "github.com/influxdata/line-protocol"
|
||||
nats "github.com/nats-io/nats.go"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type NatsSinkConfig struct {
|
||||
@@ -25,21 +22,18 @@ type NatsSinkConfig struct {
|
||||
User string `json:"user,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
FlushDelay string `json:"flush_delay,omitempty"`
|
||||
flushDelay time.Duration
|
||||
NkeyFile string `json:"nkey_file,omitempty"`
|
||||
// Timestamp precision
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}
|
||||
|
||||
type NatsSink struct {
|
||||
sink
|
||||
client *nats.Conn
|
||||
encoder influx.Encoder
|
||||
encoderLock sync.Mutex
|
||||
config NatsSinkConfig
|
||||
client *nats.Conn
|
||||
encoder *influx.Encoder
|
||||
buffer *bytes.Buffer
|
||||
config NatsSinkConfig
|
||||
|
||||
lock sync.Mutex
|
||||
flushDelay time.Duration
|
||||
flushTimer *time.Timer
|
||||
timerLock sync.Mutex
|
||||
}
|
||||
|
||||
func (s *NatsSink) connect() error {
|
||||
@@ -48,13 +42,6 @@ func (s *NatsSink) connect() error {
|
||||
var nc *nats.Conn
|
||||
if len(s.config.User) > 0 && len(s.config.Password) > 0 {
|
||||
uinfo = nats.UserInfo(s.config.User, s.config.Password)
|
||||
} else if len(s.config.NkeyFile) > 0 {
|
||||
if _, err := os.Stat(s.config.NkeyFile); err == nil {
|
||||
uinfo = nats.UserCredentials(s.config.NkeyFile)
|
||||
} else {
|
||||
cclog.ComponentError(s.name, "NKEY file", s.config.NkeyFile, "does not exist: %v", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
uri := fmt.Sprintf("nats://%s:%s", s.config.Host, s.config.Port)
|
||||
cclog.ComponentDebug(s.name, "Connect to", uri)
|
||||
@@ -72,65 +59,33 @@ func (s *NatsSink) connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NatsSink) Write(m lp.CCMessage) error {
|
||||
msg, err := s.mp.ProcessMessage(m)
|
||||
if err == nil && msg != nil {
|
||||
// Lock for encoder usage
|
||||
s.encoderLock.Lock()
|
||||
|
||||
// Add message to encoder
|
||||
err = EncoderAdd(&s.encoder, m)
|
||||
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
|
||||
// Check that encoding worked
|
||||
if err != nil {
|
||||
cclog.ComponentError(s.name, "Write:", err.Error())
|
||||
return err
|
||||
}
|
||||
func (s *NatsSink) Write(m lp.CCMetric) error {
|
||||
s.lock.Lock()
|
||||
_, err := s.encoder.Encode(m.ToPoint(s.meta_as_tags))
|
||||
s.lock.Unlock()
|
||||
if err != nil {
|
||||
cclog.ComponentError(s.name, "Write:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if s.config.flushDelay == 0 {
|
||||
// Directly flush if no flush delay is configured
|
||||
return s.Flush()
|
||||
} else if s.timerLock.TryLock() {
|
||||
// Setup flush timer when flush delay is configured
|
||||
// and no other timer is already running
|
||||
if s.flushTimer != nil {
|
||||
|
||||
// Restarting existing flush timer
|
||||
cclog.ComponentDebug(s.name, "Write(): Restarting flush timer")
|
||||
s.flushTimer.Reset(s.config.flushDelay)
|
||||
} else {
|
||||
|
||||
// Creating and starting flush timer
|
||||
cclog.ComponentDebug(s.name, "Write(): Starting new flush timer")
|
||||
s.flushTimer = time.AfterFunc(
|
||||
s.config.flushDelay,
|
||||
func() {
|
||||
defer s.timerLock.Unlock()
|
||||
cclog.ComponentDebug(s.name, "Starting flush triggered by flush timer")
|
||||
if err := s.Flush(); err != nil {
|
||||
cclog.ComponentError(s.name, "Flush triggered by flush timer: flush failed:", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
if s.flushDelay == 0 {
|
||||
s.Flush()
|
||||
} else if s.flushTimer == nil {
|
||||
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
|
||||
s.Flush()
|
||||
})
|
||||
} else {
|
||||
s.flushTimer.Reset(s.flushDelay)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NatsSink) Flush() error {
|
||||
// Lock for encoder usage
|
||||
// Own lock for as short as possible: the time it takes to clone the buffer.
|
||||
s.encoderLock.Lock()
|
||||
|
||||
buf := slices.Clone(s.encoder.Bytes())
|
||||
s.encoder.Reset()
|
||||
|
||||
// Unlock encoder usage
|
||||
s.encoderLock.Unlock()
|
||||
s.lock.Lock()
|
||||
buf := append([]byte{}, s.buffer.Bytes()...) // copy bytes
|
||||
s.buffer.Reset()
|
||||
s.lock.Unlock()
|
||||
|
||||
if len(buf) == 0 {
|
||||
return nil
|
||||
@@ -144,23 +99,14 @@ func (s *NatsSink) Flush() error {
|
||||
}
|
||||
|
||||
func (s *NatsSink) Close() {
|
||||
// Stop existing timer and immediately flush
|
||||
if s.flushTimer != nil {
|
||||
if ok := s.flushTimer.Stop(); ok {
|
||||
s.timerLock.Unlock()
|
||||
}
|
||||
}
|
||||
cclog.ComponentDebug(s.name, "Close NATS connection")
|
||||
cclog.ComponentDebug(s.name, "Close")
|
||||
s.client.Close()
|
||||
}
|
||||
|
||||
func NewNatsSink(name string, config json.RawMessage) (Sink, error) {
|
||||
s := new(NatsSink)
|
||||
s.name = fmt.Sprintf("NatsSink(%s)", name)
|
||||
s.config.flushDelay = 5 * time.Second
|
||||
s.config.FlushDelay = "5s"
|
||||
s.config.Port = "4222"
|
||||
s.config.Precision = "s"
|
||||
s.flushDelay = 10 * time.Second
|
||||
if len(config) > 0 {
|
||||
d := json.NewDecoder(bytes.NewReader(config))
|
||||
d.DisallowUnknownFields()
|
||||
@@ -174,51 +120,28 @@ func NewNatsSink(name string, config json.RawMessage) (Sink, error) {
|
||||
len(s.config.Subject) == 0 {
|
||||
return nil, errors.New("not all configuration variables set required by NatsSink")
|
||||
}
|
||||
// Create a new message processor
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
// Read config related to message processor
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Add meta_as_tags list to message processor
|
||||
// Create lookup map to use meta infos as tags in the output metric
|
||||
s.meta_as_tags = make(map[string]bool)
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
s.meta_as_tags[k] = true
|
||||
}
|
||||
|
||||
// Setup Influx line protocol encoder
|
||||
precision := influx.Second
|
||||
if len(s.config.Precision) > 0 {
|
||||
switch s.config.Precision {
|
||||
case "s":
|
||||
precision = influx.Second
|
||||
case "ms":
|
||||
precision = influx.Millisecond
|
||||
case "us":
|
||||
precision = influx.Microsecond
|
||||
case "ns":
|
||||
precision = influx.Nanosecond
|
||||
}
|
||||
}
|
||||
|
||||
s.encoder.SetPrecision(precision)
|
||||
// Setup Influx line protocol
|
||||
s.buffer = &bytes.Buffer{}
|
||||
s.buffer.Grow(1025)
|
||||
s.encoder = influx.NewEncoder(s.buffer)
|
||||
s.encoder.SetPrecision(time.Second)
|
||||
s.encoder.SetMaxLineBytes(1024)
|
||||
// Setup infos for connection
|
||||
if err := s.connect(); err != nil {
|
||||
return nil, fmt.Errorf("unable to connect: %v", err)
|
||||
}
|
||||
|
||||
s.flushTimer = nil
|
||||
if len(s.config.FlushDelay) > 0 {
|
||||
t, err := time.ParseDuration(s.config.FlushDelay)
|
||||
if err == nil {
|
||||
s.config.flushDelay = t
|
||||
cclog.ComponentDebug(s.name, "Init(): flushDelay", t)
|
||||
if len(s.config.FlushDelay) != 0 {
|
||||
var err error
|
||||
s.flushDelay, err = time.ParseDuration(s.config.FlushDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -13,13 +13,7 @@ The `nats` sink publishes all metrics into a NATS network. The publishing key is
|
||||
"port": "4222",
|
||||
"user": "exampleuser",
|
||||
"password" : "examplepw",
|
||||
"nkey_file": "/path/to/nkey_file",
|
||||
"flush_delay": "10s",
|
||||
"precision": "s",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"meta_as_tags" : [],
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -30,12 +24,4 @@ The `nats` sink publishes all metrics into a NATS network. The publishing key is
|
||||
- `port`: Port number (as string) of the NATS server
|
||||
- `user`: Username for basic authentication
|
||||
- `password`: Password for basic authentication
|
||||
- `nkey_file`: Path to credentials file with NKEY
|
||||
- `flush_delay`: Maximum time until metrics are sent out (default '5s')
|
||||
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
|
||||
### Using `nats` sink for communication with cc-metric-store
|
||||
|
||||
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
|
||||
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||
|
@@ -10,9 +10,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -50,13 +49,11 @@ func intToFloat64(input interface{}) (float64, error) {
|
||||
return float64(value), nil
|
||||
case int64:
|
||||
return float64(value), nil
|
||||
case uint64:
|
||||
return float64(value), nil
|
||||
}
|
||||
return 0, errors.New("cannot cast value to float64")
|
||||
}
|
||||
|
||||
func getLabelValue(metric lp.CCMessage) []string {
|
||||
func getLabelValue(metric lp.CCMetric) []string {
|
||||
labelValues := []string{}
|
||||
if tid, tidok := metric.GetTag("type-id"); tidok && metric.HasTag("type") {
|
||||
labelValues = append(labelValues, tid)
|
||||
@@ -69,7 +66,7 @@ func getLabelValue(metric lp.CCMessage) []string {
|
||||
return labelValues
|
||||
}
|
||||
|
||||
func getLabelNames(metric lp.CCMessage) []string {
|
||||
func getLabelNames(metric lp.CCMetric) []string {
|
||||
labelNames := []string{}
|
||||
if t, tok := metric.GetTag("type"); tok && metric.HasTag("type-id") {
|
||||
labelNames = append(labelNames, t)
|
||||
@@ -82,7 +79,7 @@ func getLabelNames(metric lp.CCMessage) []string {
|
||||
return labelNames
|
||||
}
|
||||
|
||||
func (s *PrometheusSink) newMetric(metric lp.CCMessage) error {
|
||||
func (s *PrometheusSink) newMetric(metric lp.CCMetric) error {
|
||||
var value float64 = 0
|
||||
name := metric.Name()
|
||||
opts := prometheus.GaugeOpts{
|
||||
@@ -120,7 +117,7 @@ func (s *PrometheusSink) newMetric(metric lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PrometheusSink) updateMetric(metric lp.CCMessage) error {
|
||||
func (s *PrometheusSink) updateMetric(metric lp.CCMetric) error {
|
||||
var value float64 = 0.0
|
||||
name := metric.Name()
|
||||
labelValues := getLabelValue(metric)
|
||||
@@ -153,12 +150,8 @@ func (s *PrometheusSink) updateMetric(metric lp.CCMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PrometheusSink) Write(m lp.CCMessage) error {
|
||||
msg, err := s.mp.ProcessMessage(m)
|
||||
if err == nil && msg != nil {
|
||||
err = s.updateMetric(m)
|
||||
}
|
||||
return err
|
||||
func (s *PrometheusSink) Write(m lp.CCMetric) error {
|
||||
return s.updateMetric(m)
|
||||
}
|
||||
|
||||
func (s *PrometheusSink) Flush() error {
|
||||
@@ -187,20 +180,6 @@ func NewPrometheusSink(name string, config json.RawMessage) (Sink, error) {
|
||||
cclog.ComponentError(s.name, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
}
|
||||
s.labelMetrics = make(map[string]*prometheus.GaugeVec)
|
||||
s.nodeMetrics = make(map[string]prometheus.Gauge)
|
||||
s.promWg.Add(1)
|
||||
|
@@ -11,11 +11,7 @@ The `prometheus` sink publishes all metrics via an HTTP server ready to be scrap
|
||||
"type": "prometheus",
|
||||
"host": "localhost",
|
||||
"port": "8080",
|
||||
"path": "metrics",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"path": "metrics"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -25,5 +21,3 @@ The `prometheus` sink publishes all metrics via an HTTP server ready to be scrap
|
||||
- `port`: Portnumber (as string) for the HTTP server
|
||||
- `path`: Path where the metrics should be servered. The metrics will be published at `host`:`port`/`path`
|
||||
- `group_as_namespace`: Most metrics contain a group as meta information like 'memory', 'load'. With this the metric names are extended to `group`_`name` if possible.
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
@@ -6,9 +6,8 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type SampleSinkConfig struct {
|
||||
@@ -29,14 +28,9 @@ type SampleSink struct {
|
||||
// See: metricSink.go
|
||||
|
||||
// Code to submit a single CCMetric to the sink
|
||||
func (s *SampleSink) Write(point lp.CCMessage) error {
|
||||
func (s *SampleSink) Write(point lp.CCMetric) error {
|
||||
// based on s.meta_as_tags use meta infos as tags
|
||||
// moreover, submit the point to the message processor
|
||||
// to apply drop/modify rules
|
||||
msg, err := s.mp.ProcessMessage(point)
|
||||
if err == nil && msg != nil {
|
||||
log.Print(msg)
|
||||
}
|
||||
log.Print(point)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -72,24 +66,10 @@ func NewSampleSink(name string, config json.RawMessage) (Sink, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize and configure the message processor
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
|
||||
// Add message processor configuration
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = p.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Add rules to move meta information to tag space
|
||||
// Replacing the legacy 'meta_as_tags' configuration
|
||||
// Create lookup map to use meta infos as tags in the output metric
|
||||
s.meta_as_tags = make(map[string]bool)
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
s.meta_as_tags[k] = true
|
||||
}
|
||||
|
||||
// Check if all required fields in the config are set
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
const SINK_MAX_FORWARD = 50
|
||||
@@ -21,12 +21,11 @@ var AvailableSinks = map[string]func(name string, config json.RawMessage) (Sink,
|
||||
"influxdb": NewInfluxSink,
|
||||
"influxasync": NewInfluxAsyncSink,
|
||||
"http": NewHttpSink,
|
||||
"prometheus": NewPrometheusSink,
|
||||
}
|
||||
|
||||
// Metric collector manager data structure
|
||||
type sinkManager struct {
|
||||
input chan lp.CCMessage // input channel
|
||||
input chan lp.CCMetric // input channel
|
||||
done chan bool // channel to finish / stop metric sink manager
|
||||
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
|
||||
sinks map[string]Sink // Mapping sink name to sink
|
||||
@@ -36,7 +35,7 @@ type sinkManager struct {
|
||||
// Sink manager access functions
|
||||
type SinkManager interface {
|
||||
Init(wg *sync.WaitGroup, sinkConfigFile string) error
|
||||
AddInput(input chan lp.CCMessage)
|
||||
AddInput(input chan lp.CCMetric)
|
||||
AddOutput(name string, config json.RawMessage) error
|
||||
Start()
|
||||
Close()
|
||||
@@ -108,7 +107,7 @@ func (sm *sinkManager) Start() {
|
||||
cclog.ComponentDebug("SinkManager", "DONE")
|
||||
}
|
||||
|
||||
toTheSinks := func(p lp.CCMessage) {
|
||||
toTheSinks := func(p lp.CCMetric) {
|
||||
// Send received metric to all outputs
|
||||
cclog.ComponentDebug("SinkManager", "WRITE", p)
|
||||
for _, s := range sm.sinks {
|
||||
@@ -139,7 +138,7 @@ func (sm *sinkManager) Start() {
|
||||
}
|
||||
|
||||
// AddInput adds the input channel to the sink manager
|
||||
func (sm *sinkManager) AddInput(input chan lp.CCMessage) {
|
||||
func (sm *sinkManager) AddInput(input chan lp.CCMetric) {
|
||||
sm.input = input
|
||||
}
|
||||
|
||||
|
@@ -8,9 +8,8 @@ import (
|
||||
"strings"
|
||||
|
||||
// "time"
|
||||
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
|
||||
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||
)
|
||||
|
||||
type StdoutSink struct {
|
||||
@@ -22,14 +21,11 @@ type StdoutSink struct {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StdoutSink) Write(m lp.CCMessage) error {
|
||||
msg, err := s.mp.ProcessMessage(m)
|
||||
if err == nil && msg != nil {
|
||||
fmt.Fprint(
|
||||
s.output,
|
||||
msg.ToLineProtocol(s.meta_as_tags),
|
||||
)
|
||||
}
|
||||
func (s *StdoutSink) Write(m lp.CCMetric) error {
|
||||
fmt.Fprint(
|
||||
s.output,
|
||||
m.ToLineProtocol(s.meta_as_tags),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -45,7 +41,6 @@ func (s *StdoutSink) Close() {
|
||||
}
|
||||
|
||||
func NewStdoutSink(name string, config json.RawMessage) (Sink, error) {
|
||||
|
||||
s := new(StdoutSink)
|
||||
s.name = fmt.Sprintf("StdoutSink(%s)", name)
|
||||
if len(config) > 0 {
|
||||
@@ -56,11 +51,6 @@ func NewStdoutSink(name string, config json.RawMessage) (Sink, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p, err := mp.NewMessageProcessor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
||||
}
|
||||
s.mp = p
|
||||
|
||||
s.output = os.Stdout
|
||||
if len(s.config.Output) > 0 {
|
||||
@@ -77,21 +67,10 @@ func NewStdoutSink(name string, config json.RawMessage) (Sink, error) {
|
||||
s.output = f
|
||||
}
|
||||
}
|
||||
|
||||
// Add message processor configuration
|
||||
if len(s.config.MessageProcessor) > 0 {
|
||||
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
||||
}
|
||||
}
|
||||
// Create lookup map to use meta infos as tags in the output metric
|
||||
// s.meta_as_tags = make(map[string]bool)
|
||||
// for _, k := range s.config.MetaAsTags {
|
||||
// s.meta_as_tags[k] = true
|
||||
// }
|
||||
s.meta_as_tags = make(map[string]bool)
|
||||
for _, k := range s.config.MetaAsTags {
|
||||
s.mp.AddMoveMetaToTags("true", k, k)
|
||||
s.meta_as_tags[k] = true
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
@@ -10,11 +10,7 @@ The `stdout` sink is the most simple sink provided by cc-metric-collector. It wr
|
||||
"<name>": {
|
||||
"type": "stdout",
|
||||
"meta_as_tags" : [],
|
||||
"output_file" : "mylogfile.log",
|
||||
"process_messages" : {
|
||||
"see" : "docs of message processor for valid fields"
|
||||
},
|
||||
"meta_as_tags" : []
|
||||
"output_file" : "mylogfile.log"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -22,6 +18,5 @@ The `stdout` sink is the most simple sink provided by cc-metric-collector. It wr
|
||||
- `type`: makes the sink an `stdout` sink
|
||||
- `meta_as_tags`: print meta information as tags in the output (optional)
|
||||
- `output_file`: Write all data to the selected file (optional). There are two 'special' files: `stdout` and `stderr`. If this option is not provided, the default value is `stdout`
|
||||
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
|
||||
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user