Compare commits

..

1 Commits

Author SHA1 Message Date
Thomas Roehl
1ce40aea16 Add AppMetricReceiver 2022-11-29 13:44:20 +01:00
103 changed files with 2999 additions and 7072 deletions

View File

@@ -8,17 +8,16 @@ on:
push:
tags:
- '**'
workflow_dispatch:
jobs:
#
# Build on AlmaLinux 8 using go-toolset
# Build on AlmaLinux 8.5 using go-toolset
#
AlmaLinux8-RPM-build:
AlmaLinux-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:8
container: almalinux:8.5
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
outputs:
@@ -36,146 +35,60 @@ jobs:
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
# Use dnf to install build dependencies
- name: Install build dependencies
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
rpm -i go*.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
run: make RPM
# AlmaLinux 8 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el8' in the RPM file names
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8 container contains the
# This step replaces the substring 'el8' to 'alma85'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8.5 container contains the
# 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el8/alma8/)
- name: Rename RPMs (s/el8/alma85/)
id: rpmrename
run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el8/alma8}"
NEW_SRPM=${OLD_SRPM/el8/alma8}
NEW_RPM="${OLD_RPM/el8/alma85}"
NEW_SRPM=${OLD_SRPM/el8/alma85}
mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
echo "::set-output name=SRPM::${NEW_SRPM}"
echo "::set-output name=RPM::${NEW_RPM}"
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector RPM for AlmaLinux 8
name: cc-metric-collector RPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector SRPM for AlmaLinux 8
name: cc-metric-collector SRPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.SRPM }}
overwrite: true
#
# Build on AlmaLinux 9 using go-toolset
#
AlmaLinux9-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:9
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
outputs:
rpm : ${{steps.rpmrename.outputs.RPM}}
srpm : ${{steps.rpmrename.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
# AlmaLinux 9 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el9' in the RPM file names
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8 container contains the
# 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el9/alma9/)
id: rpmrename
run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el9/alma9}"
NEW_SRPM=${OLD_SRPM/el9/alma9}
mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector RPM for AlmaLinux 9
path: ${{ steps.rpmrename.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector SRPM for AlmaLinux 9
path: ${{ steps.rpmrename.outputs.SRPM }}
overwrite: true
#
# Build on UBI 8 using go-toolset
#
UBI-8-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c35984d70cc534b3a3784e?container-tabs=gti
container: registry.access.redhat.com/ubi8/ubi:8.8-1032.1692772289
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
# The job outputs link to the outputs of the 'rpmbuild' step
outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}}
@@ -190,110 +103,42 @@ jobs:
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
# Use dnf to install build dependencies
- name: Install build dependencies
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
rpm -i go*.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
run: make RPM
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector RPM for UBI 8
path: ${{ steps.rpmbuild.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector SRPM for UBI 8
path: ${{ steps.rpmbuild.outputs.SRPM }}
overwrite: true
#
# Build on UBI 9 using go-toolset
# Build on Ubuntu 20.04 using official go package
#
UBI-9-RPM-build:
Ubuntu-focal-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi9
# The job outputs link to the outputs of the 'rpmbuild' step
# The job outputs link to the outputs of the 'rpmbuild' step
outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}}
srpm : ${{steps.rpmbuild.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector RPM for UBI 9
path: ${{ steps.rpmbuild.outputs.RPM }}
overwrite: true
- name: Save SRPM as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector SRPM for UBI 9
path: ${{ steps.rpmbuild.outputs.SRPM }}
overwrite: true
#
# Build on Ubuntu 22.04 using official go package
#
Ubuntu-jammy-build:
runs-on: ubuntu-latest
container: ubuntu:22.04
container: ubuntu:20.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
@@ -308,86 +153,35 @@ jobs:
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
# Use official golang package
- name: Install Golang
run: |
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
go version
- name: DEB build MetricCollector
id: dpkg-build
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
make DEB
- name: Rename DEB (add '_ubuntu22.04')
- name: Rename DEB (add '_ubuntu20.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
echo "::set-output name=DEB::${NEW_DEB_FILE}"
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: cc-metric-collector DEB for Ubuntu 22.04
name: cc-metric-collector DEB for Ubuntu 20.04
path: ${{ steps.debrename.outputs.DEB }}
overwrite: true
#
# Build on Ubuntu 24.04 using official go package
#
Ubuntu-noblenumbat-build:
runs-on: ubuntu-latest
container: ubuntu:24.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
deb : ${{steps.debrename.outputs.DEB}}
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make DEB
- name: Rename DEB (add '_ubuntu24.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu24.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 24.04
path: ${{ steps.debrename.outputs.DEB }}
overwrite: true
#
# Create release with fresh RPMs
@@ -395,55 +189,32 @@ jobs:
Release:
runs-on: ubuntu-latest
# We need the RPMs, so add dependency
needs: [AlmaLinux8-RPM-build, AlmaLinux9-RPM-build, UBI-8-RPM-build, UBI-9-RPM-build, Ubuntu-jammy-build, Ubuntu-noblenumbat-build]
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build]
steps:
# See: https://github.com/actions/download-artifact
- name: Download AlmaLinux 8 RPM
uses: actions/download-artifact@v4
- name: Download AlmaLinux 8.5 RPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector RPM for AlmaLinux 8
- name: Download AlmaLinux 8 SRPM
uses: actions/download-artifact@v4
name: cc-metric-collector RPM for AlmaLinux 8.5
- name: Download AlmaLinux 8.5 SRPM
uses: actions/download-artifact@v2
with:
name: cc-metric-collector SRPM for AlmaLinux 8
- name: Download AlmaLinux 9 RPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector RPM for AlmaLinux 9
- name: Download AlmaLinux 9 SRPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector SRPM for AlmaLinux 9
name: cc-metric-collector SRPM for AlmaLinux 8.5
- name: Download UBI 8 RPM
uses: actions/download-artifact@v4
uses: actions/download-artifact@v2
with:
name: cc-metric-collector RPM for UBI 8
- name: Download UBI 8 SRPM
uses: actions/download-artifact@v4
uses: actions/download-artifact@v2
with:
name: cc-metric-collector SRPM for UBI 8
- name: Download UBI 9 RPM
uses: actions/download-artifact@v4
- name: Download Ubuntu 20.04 DEB
uses: actions/download-artifact@v2
with:
name: cc-metric-collector RPM for UBI 9
- name: Download UBI 9 SRPM
uses: actions/download-artifact@v4
with:
name: cc-metric-collector SRPM for UBI 9
- name: Download Ubuntu 22.04 DEB
uses: actions/download-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 22.04
- name: Download Ubuntu 24.04 DEB
uses: actions/download-artifact@v4
with:
name: cc-metric-collector DEB for Ubuntu 24.04
name: cc-metric-collector DEB for Ubuntu 20.04
# The download actions do not publish the name of the downloaded file,
# so we re-use the job outputs of the parent jobs. The files are all
@@ -454,51 +225,31 @@ jobs:
- name: Set RPM variables
id: files
run: |
ALMA_8_RPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.rpm}}")
ALMA_8_SRPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.srpm}}")
ALMA_9_RPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.rpm}}")
ALMA_9_SRPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.srpm}}")
ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
UBI_9_RPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.rpm}}")
UBI_9_SRPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.srpm}}")
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
U_2404_DEB=$(basename "${{ needs.Ubuntu-noblenumbat-build.outputs.deb}}")
echo "ALMA_8_RPM::${ALMA_8_RPM}"
echo "ALMA_8_SRPM::${ALMA_8_SRPM}"
echo "ALMA_9_RPM::${ALMA_9_RPM}"
echo "ALMA_9_SRPM::${ALMA_9_SRPM}"
U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}")
echo "ALMA_85_RPM::${ALMA_85_RPM}"
echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "UBI_8_RPM::${UBI_8_RPM}"
echo "UBI_8_SRPM::${UBI_8_SRPM}"
echo "UBI_9_RPM::${UBI_9_RPM}"
echo "UBI_9_SRPM::${UBI_9_SRPM}"
echo "U_2204_DEB::${U_2204_DEB}"
echo "U_2404_DEB::${U_2404_DEB}"
echo "ALMA_8_RPM=${ALMA_8_RPM}" >> $GITHUB_OUTPUT
echo "ALMA_8_SRPM=${ALMA_8_SRPM}" >> $GITHUB_OUTPUT
echo "ALMA_9_RPM=${ALMA_9_RPM}" >> $GITHUB_OUTPUT
echo "ALMA_9_SRPM=${ALMA_9_SRPM}" >> $GITHUB_OUTPUT
echo "UBI_8_RPM=${UBI_8_RPM}" >> $GITHUB_OUTPUT
echo "UBI_8_SRPM=${UBI_8_SRPM}" >> $GITHUB_OUTPUT
echo "UBI_9_RPM=${UBI_9_RPM}" >> $GITHUB_OUTPUT
echo "UBI_9_SRPM=${UBI_9_SRPM}" >> $GITHUB_OUTPUT
echo "U_2204_DEB=${U_2204_DEB}" >> $GITHUB_OUTPUT
echo "U_2404_DEB=${U_2404_DEB}" >> $GITHUB_OUTPUT
echo "U_2004_DEB::${U_2004_DEB}"
echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
echo "::set-output name=U_2004_DEB::${U_2004_DEB}"
# See: https://github.com/softprops/action-gh-release
- name: Release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/')
with:
name: cc-metric-collector-${{github.ref_name}}
files: |
${{ steps.files.outputs.ALMA_8_RPM }}
${{ steps.files.outputs.ALMA_8_SRPM }}
${{ steps.files.outputs.ALMA_9_RPM }}
${{ steps.files.outputs.ALMA_9_SRPM }}
${{ steps.files.outputs.ALMA_85_RPM }}
${{ steps.files.outputs.ALMA_85_SRPM }}
${{ steps.files.outputs.UBI_8_RPM }}
${{ steps.files.outputs.UBI_8_SRPM }}
${{ steps.files.outputs.UBI_9_RPM }}
${{ steps.files.outputs.UBI_9_SRPM }}
${{ steps.files.outputs.U_2204_DEB }}
${{ steps.files.outputs.U_2404_DEB }}
${{ steps.files.outputs.U_2004_DEB }}

View File

@@ -4,31 +4,28 @@
name: Run Test
# Run on event push
on:
push:
workflow_dispatch:
on: push
jobs:
#
# Job build-1-21
# Build on latest Ubuntu using golang version 1.21
# Job build-1-18
# Build on latest Ubuntu using golang version 1.18
#
build-1-21:
build-1-18:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: '1.21'
go-version: '1.18.2'
- name: Build MetricCollector
run: make
@@ -37,298 +34,27 @@ jobs:
run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Job build-1-22
# Build on latest Ubuntu using golang version 1.22
# Job build-1-19
# Build on latest Ubuntu using golang version 1.19
#
build-1-22:
build-1-19:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.19'
- name: Build MetricCollector
run: make
- name: Run MetricCollector once
run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Job build-1-23
# Build on latest Ubuntu using golang version 1.23
#
build-1-23:
runs-on: ubuntu-latest
steps:
# See: https://github.com/marketplace/actions/checkout
# Checkout git repository and submodules
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: '1.23'
- name: Build MetricCollector
run: make
- name: Run MetricCollector once
run: ./cc-metric-collector --once --config .github/ci-config.json
#
# Build on AlmaLinux 8
#
AlmaLinux8-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:8
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on AlmaLinux 9
#
AlmaLinux9-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:9
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on UBI 8 using go-toolset
#
UBI-8-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi8
# The job outputs link to the outputs of the 'rpmbuild' step
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on UBI 9 using go-toolset
#
UBI-9-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: redhat/ubi9
# The job outputs link to the outputs of the 'rpmbuild' step
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# See: https://github.com/marketplace/actions/setup-go-environment
# - name: Setup Golang
# uses: actions/setup-go@v5
# with:
# go-version: 'stable'
- name: Setup Golang
run: |
dnf --assumeyes --disableplugin=subscription-manager install \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
- name: RPM build MetricCollector
id: rpmbuild
run: |
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
make RPM
#
# Build on Ubuntu 22.04 using official go package
#
Ubuntu-jammy-build:
runs-on: ubuntu-latest
container: ubuntu:22.04
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
make DEB
#
# Build on Ubuntu 24.04 using official go package
#
Ubuntu-noblenumbat-build:
runs-on: ubuntu-latest
container: ubuntu:24.04
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
# See: https://github.com/marketplace/actions/setup-go-environment
- name: Setup Golang
uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: DEB build MetricCollector
id: dpkg-build
run: |
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
make DEB

View File

@@ -1,29 +0,0 @@
{
"title": "cc-metric-collector",
"description": "Monitoring agent for ClusterCockpit.",
"creators": [
{
"affiliation": "Regionales Rechenzentrum Erlangen, Friedrich-Alexander-Universität Erlangen-Nürnberg",
"name": "Thomas Gruber",
"orcid": "0000-0001-5560-6964"
},
{
"affiliation": "Steinbuch Centre for Computing, Karlsruher Institut für Technologie",
"name": "Holger Obermaier",
"orcid": "0000-0002-6830-6626"
}
],
"upload_type": "software",
"license": "MIT",
"access_right": "open",
"keywords": [
"performance-monitoring",
"cluster-monitoring",
"open-source"
],
"communities": [
{
"identifier": "clustercockpit"
}
]
}

View File

@@ -84,7 +84,7 @@ RPM: scripts/cc-metric-collector.spec
@COMMITISH="HEAD"
@VERS=$$(git describe --tags $${COMMITISH})
@VERS=$${VERS#v}
@VERS=$$(echo $${VERS} | sed -e s+'-'+'_'+g)
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}")
@PREFIX="$${NAME}-$${VERSION}"
@FORMAT="tar.gz"
@@ -96,8 +96,10 @@ RPM: scripts/cc-metric-collector.spec
@if [[ "$${GITHUB_ACTIONS}" == true ]]; then
@ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm"
@ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm"
@ echo "SRPM=$${SRPMFILE}" >> $${GITHUB_OUTPUT}
@ echo "RPM=$${RPMFILE}" >> $${GITHUB_OUTPUT}
@ echo "RPM: $${RPMFILE}"
@ echo "SRPM: $${SRPMFILE}"
@ echo "::set-output name=SRPM::$${SRPMFILE}"
@ echo "::set-output name=RPM::$${RPMFILE}"
@fi
.PHONY: DEB
@@ -106,24 +108,21 @@ DEB: scripts/cc-metric-collector.deb.control $(APP)
@WORKSPACE=$${PWD}/.dpkgbuild
@DEBIANDIR=$${WORKSPACE}/debian
@DEBIANBINDIR=$${WORKSPACE}/DEBIAN
@mkdir --parents --verbose $${WORKSPACE} $${DEBIANBINDIR}
@mkdir --parents --verbose $$WORKSPACE $$DEBIANBINDIR
#@mkdir --parents --verbose $$DEBIANDIR
@CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control"
@COMMITISH="HEAD"
@VERS=$$(git describe --tags --abbrev=0 $${COMMITISH})
@if [ -z "$${VERS}" ]; then VERS=${GITHUB_REF_NAME}; fi
@VERS=$${VERS#v}
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
@ARCH=$$(uname -m)
@ARCH=$$(echo $${ARCH} | sed -e s+'_'+'-'+g)
@if [ "$${ARCH}" = "x86-64" ]; then ARCH=amd64; fi
@ARCH=$$(echo $$ARCH | sed -e s+'_'+'-'+g)
@PREFIX="$${NAME}-$${VERSION}_$${ARCH}"
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$${WORKSPACE}"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
@SIZE="$$(awk -v size="$${SIZE_BYTES}" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
@sed -e s+"{VERSION}"+"$${VERS}"+g -e s+"{INSTALLED_SIZE}"+"$${SIZE}"+g -e s+"{ARCH}"+"$${ARCH}"+g $${CONTROLFILE} > $${DEBIANBINDIR}/control
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$$WORKSPACE"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
@SIZE="$$(awk -v size="$$SIZE_BYTES" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
#@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANDIR}/control
@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANBINDIR}/control
@make PREFIX=$${WORKSPACE} install
@DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb"
@dpkg-deb -b $${WORKSPACE} "$${DEB_FILE}"
@if [ "$${GITHUB_ACTIONS}" = "true" ]; then
@ echo "DEB=$${DEB_FILE}" >> $${GITHUB_OUTPUT}
@fi
@dpkg-deb -b $${WORKSPACE} "$$DEB_FILE"
@rm -r "$${WORKSPACE}"

View File

@@ -8,10 +8,6 @@ There is a single timer loop that triggers all collectors serially, collects the
The receiver runs as a go routine side-by-side with the timer loop and asynchronously forwards received metrics to the sink.
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7438287.svg)](https://doi.org/10.5281/zenodo.7438287)
# Configuration
Configuration is implemented using a single json document that is distributed over network and may be persisted as file.

View File

@@ -17,7 +17,7 @@ import (
mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
)
@@ -54,7 +54,7 @@ type RuntimeConfig struct {
ReceiveManager receivers.ReceiveManager
MultiChanTicker mct.MultiChanTicker
Channels []chan lp.CCMessage
Channels []chan lp.CCMetric
Sync sync.WaitGroup
}
@@ -242,7 +242,7 @@ func mainFunc() int {
}
// Connect metric router to sink manager
RouterToSinksChannel := make(chan lp.CCMessage, 200)
RouterToSinksChannel := make(chan lp.CCMetric, 200)
rcfg.SinkManager.AddInput(RouterToSinksChannel)
rcfg.MetricRouter.AddOutput(RouterToSinksChannel)
@@ -254,7 +254,7 @@ func mainFunc() int {
}
// Connect collector manager to metric router
CollectToRouterChannel := make(chan lp.CCMessage, 200)
CollectToRouterChannel := make(chan lp.CCMetric, 200)
rcfg.CollectManager.AddOutput(CollectToRouterChannel)
rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel)
@@ -267,7 +267,7 @@ func mainFunc() int {
}
// Connect receive manager to metric router
ReceiveToRouterChannel := make(chan lp.CCMessage, 200)
ReceiveToRouterChannel := make(chan lp.CCMetric, 200)
rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel)
rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel)
use_recv = true

View File

@@ -1,33 +1,31 @@
# LIKWID version
LIKWID_VERSION := 5.4.1
LIKWID_INSTALLED_FOLDER := $(shell dirname $$(which likwid-topology 2>/dev/null) 2>/dev/null)
LIKWID_VERSION = 5.2.1
LIKWID_INSTALLED_FOLDER=$(shell dirname $(shell which likwid-topology 2>/dev/null) 2>/dev/null)
LIKWID_FOLDER := $(CURDIR)/likwid
LIKWID_FOLDER="$(shell pwd)/likwid"
all: likwid
all: $(LIKWID_FOLDER)/likwid.h
.ONESHELL:
.PHONY: likwid
likwid:
if [ -n "$(LIKWID_INSTALLED_FOLDER)" ]; then
# Using likwid include files from system installation
INCLUDE_DIR="$(LIKWID_INSTALLED_FOLDER)/../include"
mkdir --parents --verbose "$(LIKWID_FOLDER)"
cp "$${INCLUDE_DIR}"/*.h "$(LIKWID_FOLDER)"
else
# Using likwid include files from downloaded tar archive
if [ -d "$(LIKWID_FOLDER)" ]; then
rm --recursive "$(LIKWID_FOLDER)"
fi
BUILD_FOLDER="$${PWD}/likwidbuild"
mkdir --parents --verbose "$${BUILD_FOLDER}"
wget --output-document=- http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz |
tar --directory="$${BUILD_FOLDER}" --extract --gz
install -D --verbose --preserve-timestamps --mode=0644 --target-directory="$(LIKWID_FOLDER)" "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/likwid*.h
rm --recursive "$${BUILD_FOLDER}"
.PHONY: $(LIKWID_FOLDER)/likwid.h
$(LIKWID_FOLDER)/likwid.h:
if [ "$(LIKWID_INSTALLED_FOLDER)" != "" ]; then \
BASE="$(LIKWID_INSTALLED_FOLDER)/../include"; \
mkdir -p $(LIKWID_FOLDER); \
cp $$BASE/*.h $(LIKWID_FOLDER); \
else \
BUILD_FOLDER="$${PWD}/likwidbuild"; \
if [ -d $(LIKWID_FOLDER) ]; then rm -r $(LIKWID_FOLDER); fi; \
mkdir --parents --verbose $(LIKWID_FOLDER) $${BUILD_FOLDER}; \
wget -P "$${BUILD_FOLDER}" http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz; \
tar -C $${BUILD_FOLDER} -xf $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION).tar.gz; \
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/likwid*.h $(LIKWID_FOLDER)/; \
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/bstrlib.h $(LIKWID_FOLDER)/; \
rm -r $${BUILD_FOLDER}; \
fi
.PHONY: clean
clean:
rm -rf likwid
.PHONY: clean

View File

@@ -15,7 +15,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
@@ -110,7 +110,7 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
return nil
}
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -216,7 +216,7 @@ func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMess
for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32)
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
output <- y
}

View File

@@ -15,7 +15,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// Struct for the collector-specific JSON config
@@ -103,7 +103,7 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
return nil
}
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -208,7 +208,7 @@ func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCM
for key, data := range m.matches {
value, _ := strconv.ParseFloat(data, 32)
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
output <- y
}

View File

@@ -6,8 +6,8 @@ import (
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
)
@@ -40,14 +40,13 @@ var AvailableCollectors = map[string]MetricCollector{
"rocm_smi": new(RocmSmiCollector),
"self": new(SelfCollector),
"schedstat": new(SchedstatCollector),
"nfsiostat": new(NfsIOStatCollector),
}
// Metric collector manager data structure
type collectorManager struct {
collectors []MetricCollector // List of metric collectors to read in parallel
serial []MetricCollector // List of metric collectors to read serially
output chan lp.CCMessage // Output channels
output chan lp.CCMetric // Output channels
done chan bool // channel to finish / stop metric collector manager
ticker mct.MultiChanTicker // periodically ticking once each interval
duration time.Duration // duration (for metrics that measure over a given duration)
@@ -60,7 +59,7 @@ type collectorManager struct {
// Metric collector manager access functions
type CollectorManager interface {
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
AddOutput(output chan lp.CCMessage)
AddOutput(output chan lp.CCMetric)
Start()
Close()
}
@@ -187,7 +186,7 @@ func (cm *collectorManager) Start() {
}
// AddOutput adds the output channel to the metric collector manager
func (cm *collectorManager) AddOutput(output chan lp.CCMessage) {
func (cm *collectorManager) AddOutput(output chan lp.CCMetric) {
cm.output = output
}

View File

@@ -10,22 +10,33 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
//
// CPUFreqCollector
// a metric collector to measure the current frequency of the CPUs
// as obtained from /proc/cpuinfo
// Only measure on the first hyperthread
//
type CPUFreqCpuInfoCollectorTopology struct {
isHT bool
tagSet map[string]string
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
physicalPackageID string // socket / package ID
physicalPackageID_int int64
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
isHT bool
numNonHT string // number of non hyperthreading processors
numNonHT_int int64
tagSet map[string]string
}
type CPUFreqCpuInfoCollector struct {
metricCollector
topology []CPUFreqCpuInfoCollectorTopology
topology []*CPUFreqCpuInfoCollectorTopology
}
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
@@ -54,9 +65,11 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
// Collect topology information from file cpuinfo
foundFreq := false
processor := ""
var numNonHT_int int64 = 0
coreID := ""
physicalPackageID := ""
m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0)
var maxPhysicalPackageID int64 = 0
m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
coreSeenBefore := make(map[string]bool)
// Read cpuinfo file, line by line
@@ -85,22 +98,41 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
len(coreID) > 0 &&
len(physicalPackageID) > 0 {
topology := new(CPUFreqCpuInfoCollectorTopology)
// Processor
topology.processor = processor
// Core ID
topology.coreID = coreID
topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
}
// Physical package ID
topology.physicalPackageID = physicalPackageID
topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
}
// increase maximun socket / package ID, when required
if topology.physicalPackageID_int > maxPhysicalPackageID {
maxPhysicalPackageID = topology.physicalPackageID_int
}
// is hyperthread?
globalID := physicalPackageID + ":" + coreID
topology.isHT = coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
if !topology.isHT {
// increase number on non hyper thread cores
numNonHT_int++
}
// store collected topology information
m.topology = append(m.topology,
CPUFreqCpuInfoCollectorTopology{
isHT: coreSeenBefore[globalID],
tagSet: map[string]string{
"type": "hwthread",
"type-id": processor,
"package_id": physicalPackageID,
},
},
)
// mark core as seen before
coreSeenBefore[globalID] = true
m.topology = append(m.topology, topology)
// reset topology information
foundFreq = false
@@ -110,16 +142,31 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
}
}
// Check if at least one CPU with frequency information was detected
if len(m.topology) == 0 {
return fmt.Errorf("no CPU frequency info found in %s", cpuInfoFile)
// Check if at least one CPU with frequency information was detected
if len(m.topology) == 0 {
return fmt.Errorf("No CPU frequency info found in %s", cpuInfoFile)
}
numPhysicalPackageID_int := maxPhysicalPackageID + 1
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
numNonHT := fmt.Sprint(numNonHT_int)
for _, t := range m.topology {
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "hwthread",
"type-id": t.processor,
"package_id": t.physicalPackageID,
}
}
m.init = true
return nil
}
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
@@ -154,7 +201,7 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
return
}
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
output <- y
}
}

View File

@@ -1,5 +1,5 @@
## `cpufreq_cpuinfo` collector
## `cpufreq_cpuinfo` collector
```json
"cpufreq_cpuinfo": {}
```
@@ -7,5 +7,4 @@
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **hwthread** metrics.
Metrics:
* `cpufreq`

View File

@@ -10,14 +10,23 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
"github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"golang.org/x/sys/unix"
)
type CPUFreqCollectorTopology struct {
scalingCurFreqFile string
tagSet map[string]string
processor string // logical processor number (continuous, starting at 0)
coreID string // socket local core ID
coreID_int int64
physicalPackageID string // socket / package ID
physicalPackageID_int int64
numPhysicalPackages string // number of sockets / packages
numPhysicalPackages_int int64
isHT bool
numNonHT string // number of non hyper-threading processors
numNonHT_int int64
scalingCurFreqFile string
tagSet map[string]string
}
// CPUFreqCollector
@@ -55,43 +64,117 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
"unit": "Hz",
}
m.topology = make([]CPUFreqCollectorTopology, 0)
for _, c := range ccTopology.CpuData() {
// Loop for all CPU directories
baseDir := "/sys/devices/system/cpu"
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
cpuDirs, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf("unable to glob files with pattern '%s': %v", globPattern, err)
}
if cpuDirs == nil {
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
}
// Skip hyper threading CPUs
if c.CpuID != c.CoreCPUsList[0] {
continue
// Initialize CPU topology
m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
for _, cpuDir := range cpuDirs {
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
processor_int, err := strconv.ParseInt(processor, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert cpuID '%s' to int64: %v", processor, err)
}
// Read package ID
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
line, err := os.ReadFile(physicalPackageIDFile)
if err != nil {
return fmt.Errorf("unable to read physical package ID from file '%s': %v", physicalPackageIDFile, err)
}
physicalPackageID := strings.TrimSpace(string(line))
physicalPackageID_int, err := strconv.ParseInt(physicalPackageID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert packageID '%s' to int64: %v", physicalPackageID, err)
}
// Read core ID
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
line, err = os.ReadFile(coreIDFile)
if err != nil {
return fmt.Errorf("unable to read core ID from file '%s': %v", coreIDFile, err)
}
coreID := strings.TrimSpace(string(line))
coreID_int, err := strconv.ParseInt(coreID, 10, 64)
if err != nil {
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
}
// Check access to current frequency file
scalingCurFreqFile := filepath.Join("/sys/devices/system/cpu", fmt.Sprintf("cpu%d", c.CpuID), "cpufreq/scaling_cur_freq")
err := unix.Access(scalingCurFreqFile, unix.R_OK)
scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
err = unix.Access(scalingCurFreqFile, unix.R_OK)
if err != nil {
return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err)
}
m.topology = append(m.topology,
CPUFreqCollectorTopology{
tagSet: map[string]string{
"type": "hwthread",
"type-id": fmt.Sprint(c.CpuID),
"package_id": fmt.Sprint(c.Socket),
},
scalingCurFreqFile: scalingCurFreqFile,
},
)
t := &m.topology[processor_int]
t.processor = processor
t.physicalPackageID = physicalPackageID
t.physicalPackageID_int = physicalPackageID_int
t.coreID = coreID
t.coreID_int = coreID_int
t.scalingCurFreqFile = scalingCurFreqFile
}
// is processor a hyper-thread?
coreSeenBefore := make(map[string]bool)
for i := range m.topology {
t := &m.topology[i]
globalID := t.physicalPackageID + ":" + t.coreID
t.isHT = coreSeenBefore[globalID]
coreSeenBefore[globalID] = true
}
// number of non hyper-thread cores and packages / sockets
var numNonHT_int int64 = 0
PhysicalPackageIDs := make(map[int64]struct{})
for i := range m.topology {
t := &m.topology[i]
if !t.isHT {
numNonHT_int++
}
PhysicalPackageIDs[t.physicalPackageID_int] = struct{}{}
}
numPhysicalPackageID_int := int64(len(PhysicalPackageIDs))
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
numNonHT := fmt.Sprint(numNonHT_int)
for i := range m.topology {
t := &m.topology[i]
t.numPhysicalPackages = numPhysicalPackageID
t.numPhysicalPackages_int = numPhysicalPackageID_int
t.numNonHT = numNonHT
t.numNonHT_int = numNonHT_int
t.tagSet = map[string]string{
"type": "hwthread",
"type-id": t.processor,
"package_id": t.physicalPackageID,
}
}
// Initialized
cclog.ComponentDebug(
m.name,
"initialized",
len(m.topology), "non-hyper-threading CPUs")
numPhysicalPackageID_int, "physical packages,",
len(cpuDirs), "CPUs,",
numNonHT, "non-hyper-threading CPUs")
m.init = true
return nil
}
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
@@ -101,6 +184,11 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
for i := range m.topology {
t := &m.topology[i]
// skip hyper-threads
if t.isHT {
continue
}
// Read current frequency
line, err := os.ReadFile(t.scalingCurFreqFile)
if err != nil {
@@ -117,7 +205,7 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
continue
}
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
output <- y
}
}

View File

@@ -9,8 +9,8 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
sysconf "github.com/tklauser/go-sysconf"
)
@@ -34,7 +34,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
m.name = "CpustatCollector"
m.setup()
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "CPU"}
m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
m.nodetags = map[string]string{"type": "node"}
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
@@ -105,7 +105,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
return nil
}
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
values := make(map[string]float64)
clktck, _ := sysconf.Sysconf(sysconf.SC_CLK_TCK)
for match, index := range m.matches {
@@ -119,26 +119,15 @@ func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]st
}
}
sum := float64(0)
for name, value := range values {
sum += value
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
if err == nil {
y.AddTag("unit", "Percent")
output <- y
}
}
if v, ok := values["cpu_idle"]; ok {
sum -= v
y, err := lp.NewMessage("cpu_used", tags, m.meta, map[string]interface{}{"value": sum * 100}, now)
if err == nil {
y.AddTag("unit", "Percent")
output <- y
}
}
}
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -164,7 +153,7 @@ func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage
}
}
num_cpus_metric, err := lp.NewMessage("num_cpus",
num_cpus_metric, err := lp.New("num_cpus",
m.nodetags,
m.meta,
map[string]interface{}{"value": int(num_cpus)},

View File

@@ -13,15 +13,13 @@ The `cpustat` collector reads data from `/proc/stat` and outputs a handful **nod
Metrics:
* `cpu_user` with `unit=Percent`
* `cpu_nice` with `unit=Percent`
* `cpu_system` with `unit=Percent`
* `cpu_idle` with `unit=Percent`
* `cpu_iowait` with `unit=Percent`
* `cpu_irq` with `unit=Percent`
* `cpu_softirq` with `unit=Percent`
* `cpu_steal` with `unit=Percent`
* `cpu_guest` with `unit=Percent`
* `cpu_guest_nice` with `unit=Percent`
* `cpu_used` = `cpu_* - cpu_idle` with `unit=Percent`
* `num_cpus`
* `cpu_user`
* `cpu_nice`
* `cpu_system`
* `cpu_idle`
* `cpu_iowait`
* `cpu_irq`
* `cpu_softirq`
* `cpu_steal`
* `cpu_guest`
* `cpu_guest_nice`

View File

@@ -9,7 +9,7 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influx "github.com/influxdata/line-protocol"
)
@@ -48,7 +48,7 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
command := exec.Command(cmdfields[0], strings.Join(cmdfields[1:], " "))
command.Wait()
_, err = command.Output()
if err == nil {
if err != nil {
m.commands = append(m.commands, c)
}
}
@@ -75,7 +75,7 @@ var DefaultTime = func() time.Time {
return time.Unix(42, 0)
}
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -99,7 +99,10 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
continue
}
output <- lp.FromInfluxMetric(c)
y := lp.FromInfluxMetric(c)
if err == nil {
output <- y
}
}
}
for _, file := range m.files {
@@ -118,7 +121,10 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
if skip {
continue
}
output <- lp.FromInfluxMetric(f)
y := lp.FromInfluxMetric(f)
if err == nil {
output <- y
}
}
}
}

View File

@@ -9,7 +9,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// "log"
@@ -48,7 +48,7 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
return nil
}
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -92,13 +92,13 @@ func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessag
}
tags := map[string]string{"type": "node", "device": linefields[0]}
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
y, err := lp.NewMessage("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
}
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
y, err = lp.NewMessage("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
if err == nil {
y.AddMeta("unit", "GBytes")
output <- y
@@ -110,7 +110,7 @@ func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessag
}
}
}
y, err := lp.NewMessage("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
if err == nil {
y.AddMeta("unit", "percent")
output <- y

View File

@@ -14,7 +14,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const DEFAULT_GPFS_CMD = "mmpmon"
@@ -31,7 +31,6 @@ type GpfsCollector struct {
Mmpmon string `json:"mmpmon_path,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
SendBandwidths bool `json:"send_bandwidths"`
SendTotalValues bool `json:"send_total_values"`
}
skipFS map[string]struct{}
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
@@ -94,7 +93,7 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
return nil
}
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
return
@@ -217,33 +216,13 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
continue
}
if y, err :=
lp.NewMessage(
"gpfs_bytes_read",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesRead,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
if y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp); err == nil {
output <- y
}
if m.config.SendBandwidths {
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
bwRead := float64(bytesRead-lastBytesRead) / timeDiff
if y, err :=
lp.NewMessage(
"gpfs_bw_read",
m.tags,
m.meta,
map[string]interface{}{
"value": bwRead,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes/sec")
if y, err := lp.New("gpfs_bw_read", m.tags, m.meta, map[string]interface{}{"value": bwRead}, timestamp); err == nil {
output <- y
}
}
@@ -257,33 +236,13 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
continue
}
if y, err :=
lp.NewMessage(
"gpfs_bytes_written",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesWritten,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
if y, err := lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp); err == nil {
output <- y
}
if m.config.SendBandwidths {
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
if y, err :=
lp.NewMessage(
"gpfs_bw_write",
m.tags,
m.meta,
map[string]interface{}{
"value": bwWrite,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes/sec")
if y, err := lp.New("gpfs_bw_write", m.tags, m.meta, map[string]interface{}{"value": bwWrite}, timestamp); err == nil {
output <- y
}
}
@@ -304,7 +263,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
output <- y
}
@@ -316,7 +275,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
output <- y
}
@@ -328,7 +287,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
output <- y
}
@@ -340,7 +299,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
output <- y
}
@@ -352,7 +311,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
output <- y
}
@@ -364,50 +323,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
continue
}
if y, err := lp.NewMessage("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
output <- y
}
// Total values
if m.config.SendTotalValues {
bytesTotal := bytesRead + bytesWritten
if y, err :=
lp.NewMessage("gpfs_bytes_total",
m.tags,
m.meta,
map[string]interface{}{
"value": bytesTotal,
},
timestamp,
); err == nil {
y.AddMeta("unit", "bytes")
output <- y
}
iops := numReads + numWrites
if y, err :=
lp.NewMessage("gpfs_iops",
m.tags,
m.meta,
map[string]interface{}{
"value": iops,
},
timestamp,
); err == nil {
output <- y
}
metaops := numInodeUpdates + numCloses + numOpens + numReaddirs
if y, err :=
lp.NewMessage("gpfs_metaops",
m.tags,
m.meta,
map[string]interface{}{
"value": metaops,
},
timestamp,
); err == nil {
output <- y
}
}
}
}

View File

@@ -6,8 +6,7 @@
"exclude_filesystem": [
"fs1"
],
"send_bandwidths": true,
"send_total_values": true
"send_bandwidths" : true
}
```
@@ -27,12 +26,8 @@ Metrics:
* `gpfs_num_opens`
* `gpfs_num_closes`
* `gpfs_num_reads`
* `gpfs_num_writes`
* `gpfs_num_readdirs`
* `gpfs_num_inode_updates`
* `gpfs_bytes_total = gpfs_bytes_read + gpfs_bytes_written` (if `send_total_values == true`)
* `gpfs_iops = gpfs_num_reads + gpfs_num_writes` (if `send_total_values == true`)
* `gpfs_metaops = gpfs_num_inode_updates + gpfs_num_closes + gpfs_num_opens + gpfs_num_readdirs` (if `send_total_values == true`)
* `gpfs_bw_read` (if `send_bandwidths == true`)
* `gpfs_bw_write` (if `send_bandwidths == true`)

View File

@@ -5,7 +5,7 @@ import (
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"golang.org/x/sys/unix"
"encoding/json"
@@ -18,22 +18,18 @@ import (
const IB_BASEPATH = "/sys/class/infiniband/"
type InfinibandCollectorMetric struct {
name string
path string
unit string
scale int64
addToIBTotal bool
addToIBTotalPkgs bool
currentState int64
lastState int64
path string
unit string
scale int64
}
type InfinibandCollectorInfo struct {
LID string // IB local Identifier (LID)
device string // IB device
port string // IB device port
portCounterFiles []InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric
tagSet map[string]string // corresponding tag list
LID string // IB local Identifier (LID)
device string // IB device
port string // IB device port
portCounterFiles map[string]InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric
tagSet map[string]string // corresponding tag list
lastState map[string]int64 // State from last measurement
}
type InfinibandCollector struct {
@@ -41,10 +37,9 @@ type InfinibandCollector struct {
config struct {
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem
SendTotalValues bool `json:"send_total_values"` // Send computed total values
SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates
}
info []InfinibandCollectorInfo
info []*InfinibandCollectorInfo
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
}
@@ -117,39 +112,11 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
// Check access to counter files
countersDir := filepath.Join(path, "counters")
portCounterFiles := []InfinibandCollectorMetric{
{
name: "ib_recv",
path: filepath.Join(countersDir, "port_rcv_data"),
unit: "bytes",
scale: 4,
addToIBTotal: true,
lastState: -1,
},
{
name: "ib_xmit",
path: filepath.Join(countersDir, "port_xmit_data"),
unit: "bytes",
scale: 4,
addToIBTotal: true,
lastState: -1,
},
{
name: "ib_recv_pkts",
path: filepath.Join(countersDir, "port_rcv_packets"),
unit: "packets",
scale: 1,
addToIBTotalPkgs: true,
lastState: -1,
},
{
name: "ib_xmit_pkts",
path: filepath.Join(countersDir, "port_xmit_packets"),
unit: "packets",
scale: 1,
addToIBTotalPkgs: true,
lastState: -1,
},
portCounterFiles := map[string]InfinibandCollectorMetric{
"ib_recv": {path: filepath.Join(countersDir, "port_rcv_data"), unit: "bytes", scale: 4},
"ib_xmit": {path: filepath.Join(countersDir, "port_xmit_data"), unit: "bytes", scale: 4},
"ib_recv_pkts": {path: filepath.Join(countersDir, "port_rcv_packets"), unit: "packets", scale: 1},
"ib_xmit_pkts": {path: filepath.Join(countersDir, "port_xmit_packets"), unit: "packets", scale: 1},
}
for _, counter := range portCounterFiles {
err := unix.Access(counter.path, unix.R_OK)
@@ -158,8 +125,14 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
}
}
// Initialize last state
lastState := make(map[string]int64)
for counter := range portCounterFiles {
lastState[counter] = -1
}
m.info = append(m.info,
InfinibandCollectorInfo{
&InfinibandCollectorInfo{
LID: LID,
device: device,
port: port,
@@ -170,6 +143,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
"port": port,
"lid": LID,
},
lastState: lastState,
})
}
@@ -182,7 +156,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
}
// Read reads Infiniband counter files below IB_BASEPATH
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {
@@ -196,12 +170,8 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
// Save current timestamp
m.lastTimestamp = now
for i := range m.info {
info := &m.info[i]
var ib_total, ib_total_pkts int64
for i := range info.portCounterFiles {
counterDef := &info.portCounterFiles[i]
for _, info := range m.info {
for counterName, counterDef := range info.portCounterFiles {
// Read counter file
line, err := os.ReadFile(counterDef.path)
@@ -218,26 +188,15 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterDef.name, data, err))
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterName, data, err))
continue
}
// Scale raw value
v *= counterDef.scale
// Save current state
counterDef.currentState = v
// Send absolut values
if m.config.SendAbsoluteValues {
if y, err :=
lp.NewMessage(
counterDef.name,
info.tagSet,
m.meta,
map[string]interface{}{
"value": counterDef.currentState,
},
now); err == nil {
if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
y.AddMeta("unit", counterDef.unit)
output <- y
}
@@ -245,64 +204,18 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
// Send derived values
if m.config.SendDerivedValues {
if counterDef.lastState >= 0 {
rate := float64((counterDef.currentState - counterDef.lastState)) / timeDiff
if y, err :=
lp.NewMessage(
counterDef.name+"_bw",
info.tagSet,
m.meta,
map[string]interface{}{
"value": rate,
},
now); err == nil {
if info.lastState[counterName] >= 0 {
rate := float64((v - info.lastState[counterName])) / timeDiff
if y, err := lp.New(counterName+"_bw", info.tagSet, m.meta, map[string]interface{}{"value": rate}, now); err == nil {
y.AddMeta("unit", counterDef.unit+"/sec")
output <- y
}
}
counterDef.lastState = counterDef.currentState
}
// Sum up total values
if m.config.SendTotalValues {
switch {
case counterDef.addToIBTotal:
ib_total += counterDef.currentState
case counterDef.addToIBTotalPkgs:
ib_total_pkts += counterDef.currentState
}
// Save current state
info.lastState[counterName] = v
}
}
// Send total values
if m.config.SendTotalValues {
if y, err :=
lp.NewMessage(
"ib_total",
info.tagSet,
m.meta,
map[string]interface{}{
"value": ib_total,
},
now); err == nil {
y.AddMeta("unit", "bytes")
output <- y
}
if y, err :=
lp.NewMessage(
"ib_total_pkts",
info.tagSet,
m.meta,
map[string]interface{}{
"value": ib_total_pkts,
},
now); err == nil {
y.AddMeta("unit", "packets")
output <- y
}
}
}
}

View File

@@ -17,16 +17,13 @@ LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
The devices can be filtered with the `exclude_devices` option in the configuration.
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`. (See: <https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-class-infiniband>)
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`.
Metrics:
* `ib_recv`
* `ib_xmit`
* `ib_recv_pkts`
* `ib_xmit_pkts`
* `ib_total = ib_recv + ib_xmit` (if `send_total_values == true`)
* `ib_total_pkts = ib_recv_pkts + ib_xmit_pkts` (if `send_total_values == true`)
* `ib_recv_bw` (if `send_derived_values == true`)
* `ib_xmit_bw` (if `send_derived_values == true`)
* `ib_recv_pkts_bw` (if `send_derived_values == true`)

View File

@@ -5,7 +5,7 @@ import (
"os"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
// "log"
"encoding/json"
@@ -107,7 +107,7 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
return err
}
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -139,7 +139,7 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
x, err := strconv.ParseInt(linefields[idx], 0, 64)
if err == nil {
diff := x - entry.lastValues[name]
y, err := lp.NewMessage(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
if err == nil {
output <- y
}

View File

@@ -14,7 +14,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const IPMISENSORS_PATH = `ipmi-sensors`
@@ -55,35 +55,20 @@ func (m *IpmiCollector) Init(config json.RawMessage) error {
// Check if executables ipmitool or ipmisensors are found
p, err := exec.LookPath(m.config.IpmitoolPath)
if err == nil {
command := exec.Command(p)
err := command.Run()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
m.ipmitool = ""
} else {
m.ipmitool = p
}
m.ipmitool = p
}
p, err = exec.LookPath(m.config.IpmisensorsPath)
if err == nil {
command := exec.Command(p)
err := command.Run()
if err != nil {
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
m.ipmisensors = ""
} else {
m.ipmisensors = p
}
m.ipmisensors = p
}
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
return errors.New("no usable IPMI reader found")
return errors.New("no IPMI reader found")
}
m.init = true
return nil
}
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMetric) {
// Setup ipmitool command
command := exec.Command(cmd, "sensor")
@@ -121,7 +106,7 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
unit = "Watts"
}
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
if err == nil {
y.AddMeta("unit", unit)
output <- y
@@ -135,13 +120,13 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
cclog.ComponentError(
m.name,
fmt.Sprintf("readIpmiTool(): Failed to wait for the end of command \"%s\": %v\n", command.String(), err),
fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", string(errMsg)),
)
cclog.ComponentError(m.name, fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", strings.TrimSpace(string(errMsg))))
return
}
}
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMetric) {
command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate")
command.Wait()
@@ -159,7 +144,7 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
v, err := strconv.ParseFloat(lv[3], 64)
if err == nil {
name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1))
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
if err == nil {
if len(lv) > 4 {
y.AddMeta("unit", lv[4])
@@ -171,7 +156,7 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
}
}
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Check if already initialized
if !m.init {

View File

@@ -15,7 +15,6 @@ import (
"math"
"os"
"os/signal"
"os/user"
"sort"
"strconv"
"strings"
@@ -24,31 +23,25 @@ import (
"time"
"unsafe"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
"github.com/NVIDIA/go-nvml/pkg/dl"
"github.com/fsnotify/fsnotify"
"golang.design/x/thread"
)
const (
LIKWID_LIB_NAME = "liblikwid.so"
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
LIKWID_DEF_ACCESSMODE = "direct"
LIKWID_DEF_LOCKFILE = "/var/run/likwid.lock"
)
type LikwidCollectorMetricConfig struct {
Name string `json:"name"` // Name of the metric
Calc string `json:"calc"` // Calculation for the metric using
Type string `json:"type"` // Metric type (aka node, socket, hwthread, ...)
Publish bool `json:"publish"`
SendCoreTotalVal bool `json:"send_core_total_values,omitempty"`
SendSocketTotalVal bool `json:"send_socket_total_values,omitempty"`
SendNodeTotalVal bool `json:"send_node_total_values,omitempty"`
Unit string `json:"unit"` // Unit of metric if any
Name string `json:"name"` // Name of the metric
Calc string `json:"calc"` // Calculation for the metric using
Type string `json:"type"` // Metric type (aka node, socket, cpu, ...)
Publish bool `json:"publish"`
Unit string `json:"unit"` // Unit of metric if any
}
type LikwidCollectorEventsetConfig struct {
@@ -62,7 +55,7 @@ type LikwidEventsetConfig struct {
eorder []*C.char
estr *C.char
go_estr string
results map[int]map[string]float64
results map[int]map[string]interface{}
metrics map[int]map[string]float64
}
@@ -74,28 +67,22 @@ type LikwidCollectorConfig struct {
AccessMode string `json:"access_mode,omitempty"`
DaemonPath string `json:"accessdaemon_path,omitempty"`
LibraryPath string `json:"liblikwid_path,omitempty"`
LockfilePath string `json:"lockfile_path,omitempty"`
}
type LikwidCollector struct {
metricCollector
cpulist []C.int
cpu2tid map[int]int
sock2tid map[int]int
tid2core map[int]int
tid2socket map[int]int
metrics map[C.int]map[string]int
groups []C.int
config LikwidCollectorConfig
basefreq float64
running bool
initialized bool
needs_reinit bool
myuid int
lock_err_once bool
likwidGroups map[C.int]LikwidEventsetConfig
lock sync.Mutex
measureThread thread.Thread
cpulist []C.int
cpu2tid map[int]int
sock2tid map[int]int
metrics map[C.int]map[string]int
groups []C.int
config LikwidCollectorConfig
gmresults map[int]map[string]float64
basefreq float64
running bool
initialized bool
likwidGroups map[C.int]LikwidEventsetConfig
lock sync.Mutex
}
type LikwidMetric struct {
@@ -105,18 +92,6 @@ type LikwidMetric struct {
group_idx int
}
func checkMetricType(t string) bool {
valid := map[string]bool{
"node": true,
"socket": true,
"hwthread": true,
"core": true,
"memoryDomain": true,
}
_, ok := valid[t]
return ok
}
func eventsToEventStr(events map[string]string) string {
elist := make([]string, 0)
for k, v := range events {
@@ -140,10 +115,10 @@ func genLikwidEventSet(input LikwidCollectorEventsetConfig) LikwidEventsetConfig
elist = append(elist, c_counter)
}
estr := strings.Join(tmplist, ",")
res := make(map[int]map[string]float64)
res := make(map[int]map[string]interface{})
met := make(map[int]map[string]float64)
for _, i := range topo.CpuList() {
res[i] = make(map[string]float64)
res[i] = make(map[string]interface{})
for k := range input.Events {
res[i][k] = 0.0
}
@@ -163,7 +138,7 @@ func genLikwidEventSet(input LikwidCollectorEventsetConfig) LikwidEventsetConfig
}
func testLikwidMetricFormula(formula string, params []string) bool {
myparams := make(map[string]float64)
myparams := make(map[string]interface{})
for _, p := range params {
myparams[p] = float64(1.0)
}
@@ -204,12 +179,9 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
m.name = "LikwidCollector"
m.parallel = false
m.initialized = false
m.needs_reinit = true
m.running = false
m.myuid = os.Getuid()
m.config.AccessMode = LIKWID_DEF_ACCESSMODE
m.config.LibraryPath = LIKWID_LIB_NAME
m.config.LockfilePath = LIKWID_DEF_LOCKFILE
if len(config) > 0 {
err := json.Unmarshal(config, &m.config)
if err != nil {
@@ -243,6 +215,13 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
m.likwidGroups = make(map[C.int]LikwidEventsetConfig)
// m.results = make(map[int]map[int]map[string]interface{})
// m.mresults = make(map[int]map[int]map[string]float64)
m.gmresults = make(map[int]map[string]float64)
for _, tid := range m.cpu2tid {
m.gmresults[tid] = make(map[string]float64)
}
// This is for the global metrics computation test
totalMetrics := 0
// Generate parameter list for the metric computing test
@@ -260,16 +239,12 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
}
for _, metric := range evset.Metrics {
// Try to evaluate the metric
cclog.ComponentDebug(m.name, "Checking", metric.Name)
if !checkMetricType(metric.Type) {
cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
metric.Calc = ""
} else if !testLikwidMetricFormula(metric.Calc, params) {
cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
metric.Calc = ""
} else {
if testLikwidMetricFormula(metric.Calc, params) {
// Add the computable metric to the parameter list for the global metrics
globalParams = append(globalParams, metric.Name)
totalMetrics++
} else {
metric.Calc = ""
}
}
} else {
@@ -279,14 +254,8 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
}
for _, metric := range m.config.Metrics {
// Try to evaluate the global metric
if !checkMetricType(metric.Type) {
cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
metric.Calc = ""
} else if !testLikwidMetricFormula(metric.Calc, globalParams) {
cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
metric.Calc = ""
} else if !checkMetricType(metric.Type) {
cclog.ComponentError(m.name, "Metric", metric.Name, "has invalid type")
if !testLikwidMetricFormula(metric.Calc, globalParams) {
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed")
metric.Calc = ""
} else {
totalMetrics++
@@ -299,275 +268,78 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
cclog.ComponentError(m.name, err.Error())
return err
}
ret := C.topology_init()
if ret != 0 {
err := errors.New("failed to initialize topology module")
cclog.ComponentError(m.name, err.Error())
return err
}
m.measureThread = thread.New()
switch m.config.AccessMode {
case "direct":
C.HPMmode(0)
case "accessdaemon":
if len(m.config.DaemonPath) > 0 {
p := os.Getenv("PATH")
os.Setenv("PATH", m.config.DaemonPath+":"+p)
}
C.HPMmode(1)
retCode := C.HPMinit()
if retCode != 0 {
err := fmt.Errorf("C.HPMinit() failed with return code %v", retCode)
cclog.ComponentError(m.name, err.Error())
}
for _, c := range m.cpulist {
m.measureThread.Call(
func() {
retCode := C.HPMaddThread(c)
if retCode != 0 {
err := fmt.Errorf("C.HPMaddThread(%v) failed with return code %v", c, retCode)
cclog.ComponentError(m.name, err.Error())
}
})
}
}
m.sock2tid = make(map[int]int)
tmp := make([]C.int, 1)
for _, sid := range topo.SocketList() {
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
if ret > 0 {
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
}
C.free(unsafe.Pointer(cstr))
}
cpuData := topo.CpuData()
m.tid2core = make(map[int]int, len(cpuData))
m.tid2socket = make(map[int]int, len(cpuData))
for i := range cpuData {
c := &cpuData[i]
// Hardware thread ID to core ID mapping
if len(c.CoreCPUsList) > 0 {
m.tid2core[c.CpuID] = c.CoreCPUsList[0]
} else {
m.tid2core[c.CpuID] = c.CpuID
}
// Hardware thead ID to socket ID mapping
m.tid2socket[c.CpuID] = c.Socket
}
m.basefreq = getBaseFreq()
m.init = true
return nil
}
// take a measurement for 'interval' seconds of event set index 'group'
func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
func (m *LikwidCollector) takeMeasurement(evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
var ret C.int
var gid C.int = -1
sigchan := make(chan os.Signal, 1)
// Watch changes for the lock file ()
watcher, err := fsnotify.NewWatcher()
if err != nil {
cclog.ComponentError(m.name, err.Error())
return true, err
}
defer watcher.Close()
if len(m.config.LockfilePath) > 0 {
// Check if the lock file exists
info, err := os.Stat(m.config.LockfilePath)
if os.IsNotExist(err) {
// Create the lock file if it does not exist
file, createErr := os.Create(m.config.LockfilePath)
if createErr != nil {
return true, fmt.Errorf("failed to create lock file: %v", createErr)
}
file.Close()
info, err = os.Stat(m.config.LockfilePath) // Recheck the file after creation
}
if err != nil {
return true, err
}
// Check file ownership
uid := info.Sys().(*syscall.Stat_t).Uid
if uid != uint32(m.myuid) {
usr, err := user.LookupId(fmt.Sprint(uid))
if err == nil {
err = fmt.Errorf("access to performance counters locked by %s", usr.Username)
} else {
err = fmt.Errorf("access to performance counters locked by %d", uid)
}
// delete error if we already returned the error once.
if !m.lock_err_once {
m.lock_err_once = true
} else {
err = nil
}
return true, err
}
// reset lock_err_once
m.lock_err_once = false
// Add the lock file to the watcher
err = watcher.Add(m.config.LockfilePath)
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
}
m.lock.Lock()
defer m.lock.Unlock()
// Initialize the performance monitoring feature by creating basic data structures
select {
case e := <-watcher.Events:
ret = -1
if e.Op != fsnotify.Chmod {
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
if m.initialized {
ret = C.perfmon_setupCounters(evset.gid)
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
default:
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
}
if ret != 0 {
return true, fmt.Errorf("failed to initialize library, error %d", ret)
}
signal.Notify(sigchan, os.Interrupt)
signal.Notify(sigchan, syscall.SIGCHLD)
// Add an event string to LIKWID
select {
case <-sigchan:
gid = -1
case e := <-watcher.Events:
gid = -1
if e.Op != fsnotify.Chmod {
gid = C.perfmon_addEventSet(evset.estr)
}
default:
gid = C.perfmon_addEventSet(evset.estr)
}
if gid < 0 {
return true, fmt.Errorf("failed to add events %s, id %d, error %d", evset.go_estr, evidx, gid)
}
// Setup all performance monitoring counters of an eventSet
select {
case <-sigchan:
ret = -1
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
ret = C.perfmon_setupCounters(gid)
}
default:
ret = C.perfmon_setupCounters(gid)
}
if ret != 0 {
return true, fmt.Errorf("failed to setup events '%s', error %d", evset.go_estr, ret)
}
// Start counters
select {
case <-sigchan:
ret = -1
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
ret = C.perfmon_startCounters()
}
default:
ret = C.perfmon_startCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to start events '%s', error %d", evset.go_estr, ret)
}
select {
case <-sigchan:
ret = -1
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
ret = C.perfmon_readCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
default:
ret = C.perfmon_readCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
}
// Wait
time.Sleep(interval)
// Read counters
select {
case <-sigchan:
ret = -1
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
ret = C.perfmon_readCounters()
m.running = true
time.Sleep(interval)
m.running = false
ret = C.perfmon_stopCounters()
if ret != 0 {
var err error = nil
var skip bool = false
if ret == -37 {
skip = true
} else {
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
}
m.lock.Unlock()
return skip, err
}
default:
ret = C.perfmon_readCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
}
m.lock.Unlock()
return false, nil
}
// Store counters
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
invClock := float64(1.0 / m.basefreq)
// Go over events and get the results
for eidx, counter := range evset.eorder {
gctr := C.GoString(counter)
for _, tid := range m.cpu2tid {
res := C.perfmon_getLastResult(gid, C.int(eidx), C.int(tid))
res := C.perfmon_getLastResult(evset.gid, C.int(eidx), C.int(tid))
fres := float64(res)
if m.config.InvalidToZero && (math.IsNaN(fres) || math.IsInf(fres, 0)) {
fres = 0.0
}
evset.results[tid][gctr] = fres
evset.results[tid]["time"] = interval.Seconds()
evset.results[tid]["inverseClock"] = invClock
}
}
// Store time in seconds the event group was measured the last time
for _, tid := range m.cpu2tid {
evset.results[tid]["time"] = float64(C.perfmon_getLastTimeOfGroup(gid))
}
// Stop counters
select {
case <-sigchan:
ret = -1
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
ret = C.perfmon_stopCounters()
}
default:
ret = C.perfmon_stopCounters()
}
if ret != 0 {
return true, fmt.Errorf("failed to stop events '%s', error %d", evset.go_estr, ret)
}
// Deallocates all internal data that is used during performance monitoring
signal.Stop(sigchan)
select {
case e := <-watcher.Events:
if e.Op != fsnotify.Chmod {
C.perfmon_finalize()
}
default:
C.perfmon_finalize()
}
return false, nil
}
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
invClock := float64(1.0 / m.basefreq)
for _, tid := range m.cpu2tid {
evset.results[tid]["inverseClock"] = invClock
evset.results[tid]["gotime"] = interval.Seconds()
}
// Go over the event set metrics, derive the value out of the event:counter values and send it
for _, metric := range m.config.Eventsets[evset.internal].Metrics {
// The metric scope is determined in the Init() function
@@ -576,9 +348,6 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
if metric.Type == "socket" {
scopemap = m.sock2tid
}
// Send all metrics with same time stamp
// This function does only computiation, counter measurement is done before
now := time.Now()
for domain, tid := range scopemap {
if tid >= 0 && len(metric.Calc) > 0 {
value, err := agg.EvalFloat64Condition(metric.Calc, evset.results[tid])
@@ -591,151 +360,31 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
}
evset.metrics[tid][metric.Name] = value
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) && metric.Publish {
fields := map[string]interface{}{"value": value}
y, err :=
lp.NewMessage(
metric.Name,
map[string]string{
"type": metric.Type,
},
m.meta,
fields,
now,
)
if err == nil {
if metric.Type != "node" {
y.AddTag("type-id", fmt.Sprintf("%d", domain))
if !math.IsNaN(value) {
if metric.Publish {
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, map[string]string{"type": metric.Type}, m.meta, fields, time.Now())
if err == nil {
if metric.Type != "node" {
y.AddTag("type-id", fmt.Sprintf("%d", domain))
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
}
}
// Send per core aggregated values
if metric.SendCoreTotalVal {
totalCoreValues := make(map[int]float64)
for _, tid := range scopemap {
if tid >= 0 && len(metric.Calc) > 0 {
coreID := m.tid2core[tid]
value := evset.metrics[tid][metric.Name]
if !math.IsNaN(value) && metric.Publish {
totalCoreValues[coreID] += value
}
}
}
for coreID, value := range totalCoreValues {
y, err :=
lp.NewMessage(
metric.Name,
map[string]string{
"type": "core",
"type-id": fmt.Sprintf("%d", coreID),
},
m.meta,
map[string]interface{}{
"value": value,
},
now,
)
if err != nil {
continue
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
// Send per socket aggregated values
if metric.SendSocketTotalVal {
totalSocketValues := make(map[int]float64)
for _, tid := range scopemap {
if tid >= 0 && len(metric.Calc) > 0 {
socketID := m.tid2socket[tid]
value := evset.metrics[tid][metric.Name]
if !math.IsNaN(value) && metric.Publish {
totalSocketValues[socketID] += value
}
}
}
for socketID, value := range totalSocketValues {
y, err :=
lp.NewMessage(
metric.Name,
map[string]string{
"type": "socket",
"type-id": fmt.Sprintf("%d", socketID),
},
m.meta,
map[string]interface{}{
"value": value,
},
now,
)
if err != nil {
continue
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
// Send per node aggregated value
if metric.SendNodeTotalVal {
var totalNodeValue float64 = 0.0
for _, tid := range scopemap {
if tid >= 0 && len(metric.Calc) > 0 {
value := evset.metrics[tid][metric.Name]
if !math.IsNaN(value) && metric.Publish {
totalNodeValue += value
}
}
}
y, err :=
lp.NewMessage(
metric.Name,
map[string]string{
"type": "node",
},
m.meta,
map[string]interface{}{
"value": totalNodeValue,
},
now,
)
if err != nil {
continue
}
if len(metric.Unit) > 0 {
y.AddMeta("unit", metric.Unit)
}
output <- y
}
}
return nil
}
// Go over the global metrics, derive the value out of the event sets' metric values and send it
func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
// Send all metrics with same time stamp
// This function does only computiation, counter measurement is done before
now := time.Now()
func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan lp.CCMetric) error {
for _, metric := range m.config.Metrics {
// The metric scope is determined in the Init() function
// Get the map scope-id -> tids
scopemap := m.cpu2tid
if metric.Type == "socket" {
scopemap = m.sock2tid
@@ -743,13 +392,12 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
for domain, tid := range scopemap {
if tid >= 0 {
// Here we generate parameter list
params := make(map[string]float64)
for _, evset := range groups {
params := make(map[string]interface{})
for _, evset := range m.likwidGroups {
for mname, mres := range evset.metrics[tid] {
params[mname] = mres
}
}
params["gotime"] = interval.Seconds()
// Evaluate the metric
value, err := agg.EvalFloat64Condition(metric.Calc, params)
if err != nil {
@@ -759,21 +407,13 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
if m.config.InvalidToZero && (math.IsNaN(value) || math.IsInf(value, 0)) {
value = 0.0
}
m.gmresults[tid][metric.Name] = value
// Now we have the result, send it with the proper tags
if !math.IsNaN(value) {
if metric.Publish {
y, err :=
lp.NewMessage(
metric.Name,
map[string]string{
"type": metric.Type,
},
m.meta,
map[string]interface{}{
"value": value,
},
now,
)
tags := map[string]string{"type": metric.Type}
fields := map[string]interface{}{"value": value}
y, err := lp.New(metric.Name, tags, m.meta, fields, time.Now())
if err == nil {
if metric.Type != "node" {
y.AddTag("type-id", fmt.Sprintf("%d", domain))
@@ -791,52 +431,163 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
return nil
}
func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMessage) {
var err error = nil
groups := make([]LikwidEventsetConfig, 0)
func (m *LikwidCollector) LateInit() error {
var ret C.int
if m.initialized {
return nil
}
switch m.config.AccessMode {
case "direct":
C.HPMmode(0)
case "accessdaemon":
if len(m.config.DaemonPath) > 0 {
p := os.Getenv("PATH")
os.Setenv("PATH", m.config.DaemonPath+":"+p)
}
C.HPMmode(1)
}
cclog.ComponentDebug(m.name, "initialize LIKWID topology")
ret = C.topology_init()
if ret != 0 {
err := errors.New("failed to initialize LIKWID topology")
cclog.ComponentError(m.name, err.Error())
return err
}
for evidx, evset := range m.config.Eventsets {
e := genLikwidEventSet(evset)
e.internal = evidx
skip := false
if !skip {
// measure event set 'i' for 'interval' seconds
skip, err = m.takeMeasurement(evidx, e, interval)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
m.sock2tid = make(map[int]int)
tmp := make([]C.int, 1)
for _, sid := range topo.SocketList() {
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
if ret > 0 {
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
}
C.free(unsafe.Pointer(cstr))
}
m.basefreq = getBaseFreq()
cclog.ComponentDebug(m.name, "BaseFreq", m.basefreq)
cclog.ComponentDebug(m.name, "initialize LIKWID perfmon module")
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
if ret != 0 {
var err error = nil
C.topology_finalize()
if ret != -22 {
err = errors.New("failed to initialize LIKWID perfmon")
cclog.ComponentError(m.name, err.Error())
} else {
err = errors.New("access to LIKWID perfmon locked")
}
return err
}
// While adding the events, we test the metrics whether they can be computed at all
for i, evset := range m.config.Eventsets {
var gid C.int
if len(evset.Events) > 0 {
skip := false
likwidGroup := genLikwidEventSet(evset)
for _, g := range m.likwidGroups {
if likwidGroup.go_estr == g.go_estr {
skip = true
break
}
}
if skip {
continue
}
// Now we add the list of events to likwid
gid = C.perfmon_addEventSet(likwidGroup.estr)
if gid >= 0 {
likwidGroup.gid = gid
likwidGroup.internal = i
m.likwidGroups[gid] = likwidGroup
}
} else {
cclog.ComponentError(m.name, "Invalid Likwid eventset config, no events given")
continue
}
if !skip {
// read measurements and derive event set metrics
m.calcEventsetMetrics(e, interval, output)
groups = append(groups, e)
}
}
if len(groups) > 0 {
// calculate global metrics
m.calcGlobalMetrics(groups, interval, output)
// If no event set could be added, shut down LikwidCollector
if len(m.likwidGroups) == 0 {
C.perfmon_finalize()
C.topology_finalize()
err := errors.New("no LIKWID performance group initialized")
cclog.ComponentError(m.name, err.Error())
return err
}
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGCHLD)
signal.Notify(sigchan, os.Interrupt)
go func() {
<-sigchan
signal.Stop(sigchan)
m.initialized = false
}()
m.initialized = true
return nil
}
// main read function taking multiple measurement rounds, each 'interval' seconds long
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var skip bool = false
var err error
if !m.init {
return
}
m.measureThread.Call(func() {
m.ReadThread(interval, output)
})
if !m.initialized {
m.lock.Lock()
err = m.LateInit()
if err != nil {
m.lock.Unlock()
return
}
m.initialized = true
m.lock.Unlock()
}
if m.initialized && !skip {
for _, evset := range m.likwidGroups {
if !skip {
// measure event set 'i' for 'interval' seconds
skip, err = m.takeMeasurement(evset, interval)
if err != nil {
cclog.ComponentError(m.name, err.Error())
return
}
}
if !skip {
// read measurements and derive event set metrics
m.calcEventsetMetrics(evset, interval, output)
}
}
if !skip {
// use the event set metrics to derive the global metrics
m.calcGlobalMetrics(interval, output)
}
}
}
func (m *LikwidCollector) Close() {
if m.init {
m.init = false
cclog.ComponentDebug(m.name, "Closing ...")
m.lock.Lock()
m.measureThread.Terminate()
m.initialized = false
if m.initialized {
cclog.ComponentDebug(m.name, "Finalize LIKWID perfmon module")
C.perfmon_finalize()
m.initialized = false
}
m.lock.Unlock()
cclog.ComponentDebug(m.name, "Finalize LIKWID topology module")
C.topology_finalize()
cclog.ComponentDebug(m.name, "Closing done")
}
}

View File

@@ -10,12 +10,11 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
"liblikwid_path" : "/path/to/liblikwid.so",
"accessdaemon_path" : "/folder/that/contains/likwid-accessD",
"access_mode" : "direct or accessdaemon or perf_event",
"lockfile_path" : "/var/run/likwid.lock",
"eventsets": [
{
"events" : {
"COUNTER0": "EVENT0",
"COUNTER1": "EVENT1"
"COUNTER1": "EVENT1",
},
"metrics" : [
{
@@ -27,7 +26,7 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
}
]
}
],
]
"globalmetrics" : [
{
"name": "global_sum",
@@ -41,39 +40,34 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
```
The `likwid` configuration consists of two parts, the `eventsets` and `globalmetrics`:
- An event set list itself has two parts, the `events` and a set of derivable `metrics`. Each of the `events` is a `counter:event` pair in LIKWID's syntax. The `metrics` are a list of formulas to derive the metric value from the measurements of the `events`' values. Each metric has a name, the formula, a type and a publish flag. There is an optional `unit` field. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. You can optionally use `time` for the measurement time and `inverseClock` for `1.0/baseCpuFrequency`. The type tells the LikwidCollector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the LikwidCollector whether a metric should be sent to the router or is only used internally to compute a global metric.
- The `globalmetrics` are metrics which require data from multiple event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a type and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. Also `time` and `inverseClock` cannot be used anymore. So, the idea is to derive a metric in the `eventsets` section and reuse it in the `globalmetrics` part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`"publish": false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
- The `globalmetrics` are metrics which require data from multiple event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a scope and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. Also `time` and `inverseClock` cannot be used anymore. So, the idea is to derive a metric in the `eventsets` section and reuse it in the `globalmetrics` part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`"publish": false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
Additional options:
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`. See below in [seperate section](./likwidMetric.md#invalid_to_zero-option)
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested.
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`)
- `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so`
- `lockfile_path`: Location of LIKWID's lock file if multiple tools should access the hardware counters. Default `/var/run/likwid.lock`
### Available metric types
### Available metric scopes
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the LikwidCollector provides the specification of a `type` for each metric.
- `hwthread` : One metric per CPU hardware thread with the tags `"type" : "hwthread"` and `"type-id" : "$hwthread_id"`
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
**Note:** You cannot specify `socket` type for a metric that is measured at `hwthread` type, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the type of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
**Note:** You cannot specify `socket` scope for a metric that is measured at `hwthread` scope, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the scope of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
As a guideline:
- All counters `FIXCx`, `PMCy` and `TMAz` have the type `hwthread`
- All counters names containing `BOX` have the type `socket`
- All `PWRx` counters have type `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `hwthread` type
- All `DFCx` counters have type `socket`
- All counters `FIXCx`, `PMCy` and `TMAz` have the scope `hwthread`
- All counters names containing `BOX` have the scope `socket`
- All `PWRx` counters have scope `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `hwthread` scope
- All `DFCx` counters have scope `socket`
### Help with the configuration
The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`):
```
$ likwid-perfctr -i
[...]
@@ -99,7 +93,7 @@ $ scripts/likwid_perfgroup_to_cc_config.py ICX MEM_DP
"name": "Runtime (RDTSC) [s]",
"publish": true,
"unit": "seconds"
"type": "hwthread"
"scope": "hwthread"
},
{
"..." : "..."
@@ -115,31 +109,20 @@ You can copy this JSON and add it to the `eventsets` list. If you specify multip
LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job):
Before (SLURM prolog, ...)
```bash
chown $JOBUSER /var/run/likwid.lock
```
$ chown $JOBUSER /var/run/likwid.lock
```
After (SLURM epilog, ...)
```bash
chown $CCUSER /var/run/likwid.lock
```
$ chown $CCUSER /var/run/likwid.lock
```
### `invalid_to_zero` option
In some cases LIKWID returns `0.0` for some events that are further used in processing and maybe used as divisor in a calculation. After evaluation of a metric, the result might be `NaN` or `+-Inf`. These resulting metrics are commonly not created and forwarded to the router because the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#float) does not support these special floating-point values. If you want to have them sent, this option forces these metric values to be `0.0` instead.
One might think this does not happen often but often used metrics in the world of performance engineering like Instructions-per-Cycle (IPC) or more frequently the actual CPU clock are derived with events like `CPU_CLK_UNHALTED_CORE` (Intel) which do not increment in halted state (as the name implies). In there are different power management systems in a chip which can cause a hardware thread to go in such a state. Moreover, if no cycles are executed by the core, also many other events are not incremented as well (like `INSTR_RETIRED_ANY` for retired instructions and part of IPC).
### `lockfile_path` option
LIKWID can be configured with a lock file with which the access to the performance monitoring registers can be disabled (only the owner of the lock file is allowed to access the registers). When the `lockfile_path` option is set, the collector subscribes to changes to this file to stop monitoring if the owner of the lock file changes. This feature is useful when users should be able to perform own hardware performance counter measurements through LIKWID or any other tool.
### `send_*_total values` option
- `send_core_total_values`: Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU core.
- `send_socket_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU socket.
- `send_node_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per node.
### Example configuration
@@ -244,7 +227,6 @@ LIKWID can be configured with a lock file with which the access to the performan
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
```
EVENTSET -> "events": {
FIXC1 ACTUAL_CPU_CLOCK -> "FIXC1": "ACTUAL_CPU_CLOCK",
@@ -263,7 +245,7 @@ METRICS -> "metrics": [
IPC PMC0/PMC1 -> {
-> "name" : "IPC",
-> "calc" : "PMC0/PMC1",
-> "type": "hwthread",
-> "scope": "hwthread",
-> "publish": true
-> }
-> ]

View File

@@ -8,16 +8,18 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
//
// LoadavgCollector collects:
// * load average of last 1, 5 & 15 minutes
// * number of processes currently runnable
// * total number of processes in system
//
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
//
const LOADAVGFILE = "/proc/loadavg"
type LoadavgCollector struct {
@@ -66,15 +68,17 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
return nil
}
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
buffer, err := os.ReadFile(LOADAVGFILE)
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
if err != nil {
cclog.ComponentError(
m.name,
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
}
return
}
now := time.Now()
@@ -92,7 +96,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
if m.load_skips[i] {
continue
}
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil {
output <- y
}
@@ -111,7 +115,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
if m.proc_skips[i] {
continue
}
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
if err == nil {
output <- y
}

View File

@@ -11,7 +11,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const LUSTRE_SYSFS = `/sys/fs/lustre`
@@ -101,7 +101,7 @@ func getMetricData(lines []string, prefix string, offset int) (int64, error) {
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
// devdir := filepath.Join(llitedir, device)
// statsfile := filepath.Join(devdir, "stats")
// buffer, err := os.ReadFile(statsfile)
// buffer, err := ioutil.ReadFile(statsfile)
// if err != nil {
// return make([]string, 0)
// }
@@ -377,7 +377,7 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
return nil
}
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -388,7 +388,7 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
for _, def := range m.definitions {
var use_x int64
var err error
var y lp.CCMessage
var y lp.CCMetric
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
if err == nil {
use_x = x
@@ -399,19 +399,19 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
switch def.calc {
case "none":
value = use_x
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "difference":
value = use_x - devData[def.name]
if value.(int64) < 0 {
value = 0
}
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
case "derivative":
value = float64(use_x-devData[def.name]) / tdiff.Seconds()
if value.(float64) < 0 {
value = 0
}
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
}
if err == nil {
y.AddTag("device", device)

View File

@@ -13,7 +13,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const MEMSTATFILE = "/proc/meminfo"
@@ -159,7 +159,7 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
return err
}
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -175,7 +175,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
}
}
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)
@@ -208,7 +208,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
}
}
}
y, err := lp.NewMessage("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
if err == nil {
if len(unit) > 0 {
y.AddMeta("unit", unit)

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type MetricCollector interface {
@@ -13,7 +13,7 @@ type MetricCollector interface {
Init(config json.RawMessage) error // Initialize metric collector
Initialized() bool // Is metric collector initialized?
Parallel() bool
Read(duration time.Duration, output chan lp.CCMessage) // Read metrics from metric collector
Read(duration time.Duration, output chan lp.CCMetric) // Read metrics from metric collector
Close() // Close / finish metric collector
}

View File

@@ -10,7 +10,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const NETSTATFILE = "/proc/net/dev"
@@ -102,7 +102,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
// Check if device is a included device
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
tags := map[string]string{"stype": "network", "stype-id": dev, "type": "node"}
tags := map[string]string{"device": dev, "type": "node"}
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
@@ -153,7 +153,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
return nil
}
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -197,14 +197,14 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
continue
}
if m.config.SendAbsoluteValues {
if y, err := lp.NewMessage(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
if y, err := lp.New(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
output <- y
}
}
if m.config.SendDerivedValues {
if metric.lastValue >= 0 {
rate := float64(v-metric.lastValue) / timeDiff
if y, err := lp.NewMessage(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
if y, err := lp.New(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
output <- y
}
}

View File

@@ -23,5 +23,5 @@ Metrics:
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
The device name is added as tag `stype=network,stype-id=<device>`.
The device name is added as tag `device`.

View File

@@ -11,7 +11,7 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// First part contains the code for the general NfsCollector.
@@ -118,7 +118,7 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
return nil
}
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -140,7 +140,7 @@ func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
continue
}
value := data.current - data.last
y, err := lp.NewMessage(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddMeta("version", m.version)
output <- y

View File

@@ -1,166 +0,0 @@
package collectors
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
)
// These are the fields we read from the JSON configuration
type NfsIOStatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
UseServerAddressAsSType bool `json:"use_server_as_stype,omitempty"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type NfsIOStatCollector struct {
metricCollector
config NfsIOStatCollectorConfig // the configuration structure
meta map[string]string // default meta information
tags map[string]string // default tags
data map[string]map[string]int64 // data storage for difference calculation
key string // which device info should be used as subtype ID? 'server' or 'mntpoint', see NfsIOStatCollectorConfig.UseServerAddressAsSType
}
var deviceRegex = regexp.MustCompile(`device (?P<server>[^ ]+) mounted on (?P<mntpoint>[^ ]+) with fstype nfs(?P<version>\d*) statvers=[\d\.]+`)
var bytesRegex = regexp.MustCompile(`\s+bytes:\s+(?P<nread>[^ ]+) (?P<nwrite>[^ ]+) (?P<dread>[^ ]+) (?P<dwrite>[^ ]+) (?P<nfsread>[^ ]+) (?P<nfswrite>[^ ]+) (?P<pageread>[^ ]+) (?P<pagewrite>[^ ]+)`)
func resolve_regex_fields(s string, regex *regexp.Regexp) map[string]string {
fields := make(map[string]string)
groups := regex.SubexpNames()
for _, match := range regex.FindAllStringSubmatch(s, -1) {
for groupIdx, group := range match {
if len(groups[groupIdx]) > 0 {
fields[groups[groupIdx]] = group
}
}
}
return fields
}
func (m *NfsIOStatCollector) readNfsiostats() map[string]map[string]int64 {
data := make(map[string]map[string]int64)
filename := "/proc/self/mountstats"
stats, err := os.ReadFile(filename)
if err != nil {
return data
}
lines := strings.Split(string(stats), "\n")
var current map[string]string = nil
for _, l := range lines {
// Is this a device line with mount point, remote target and NFS version?
dev := resolve_regex_fields(l, deviceRegex)
if len(dev) > 0 {
if _, ok := stringArrayContains(m.config.ExcludeFilesystem, dev[m.key]); !ok {
current = dev
if len(current["version"]) == 0 {
current["version"] = "3"
}
}
}
if len(current) > 0 {
// Byte line parsing (if found the device for it)
bytes := resolve_regex_fields(l, bytesRegex)
if len(bytes) > 0 {
data[current[m.key]] = make(map[string]int64)
for name, sval := range bytes {
if _, ok := stringArrayContains(m.config.ExcludeMetrics, name); !ok {
val, err := strconv.ParseInt(sval, 10, 64)
if err == nil {
data[current[m.key]][name] = val
}
}
}
current = nil
}
}
}
return data
}
func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
var err error = nil
m.name = "NfsIOStatCollector"
m.setup()
m.parallel = true
m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"}
m.tags = map[string]string{"type": "node"}
m.config.UseServerAddressAsSType = false
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
m.key = "mntpoint"
if m.config.UseServerAddressAsSType {
m.key = "server"
}
m.data = m.readNfsiostats()
m.init = true
return err
}
func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
timestamp := time.Now()
// Get the current values for all mountpoints
newdata := m.readNfsiostats()
for mntpoint, values := range newdata {
// Was the mount point already present in the last iteration
if old, ok := m.data[mntpoint]; ok {
// Calculate the difference of old and new values
for i := range values {
x := values[i] - old[i]
y, err := lp.NewMessage(fmt.Sprintf("nfsio_%s", i), m.tags, m.meta, map[string]interface{}{"value": x}, timestamp)
if err == nil {
if strings.HasPrefix(i, "page") {
y.AddMeta("unit", "4K_Pages")
}
y.AddTag("stype", "filesystem")
y.AddTag("stype-id", mntpoint)
// Send it to output channel
output <- y
}
// Update old to the new value for the next iteration
old[i] = values[i]
}
} else {
// First time we see this mount point, store all values
m.data[mntpoint] = values
}
}
// Reset entries that do not exist anymore
for mntpoint := range m.data {
found := false
for new := range newdata {
if new == mntpoint {
found = true
break
}
}
if !found {
m.data[mntpoint] = nil
}
}
}
func (m *NfsIOStatCollector) Close() {
// Unset flag
m.init = false
}

View File

@@ -1,27 +0,0 @@
## `nfsiostat` collector
```json
"nfsiostat": {
"exclude_metrics": [
"nfsio_oread"
],
"exclude_filesystems" : [
"/mnt",
],
"use_server_as_stype": false
}
```
The `nfsiostat` collector reads data from `/proc/self/mountstats` and outputs a handful **node** metrics for each NFS filesystem. If a metric or filesystem is not required, it can be excluded from forwarding it to the sink.
Metrics:
* `nfsio_nread`: Bytes transferred by normal `read()` calls
* `nfsio_nwrite`: Bytes transferred by normal `write()` calls
* `nfsio_oread`: Bytes transferred by `read()` calls with `O_DIRECT`
* `nfsio_owrite`: Bytes transferred by `write()` calls with `O_DIRECT`
* `nfsio_pageread`: Pages transferred by `read()` calls
* `nfsio_pagewrite`: Pages transferred by `write()` calls
* `nfsio_nfsread`: Bytes transferred for reading from the server
* `nfsio_nfswrite`: Pages transferred by writing to the server
The `nfsiostat` collector adds the mountpoint to the tags as `stype=filesystem,stype-id=<mountpoint>`. If the server address should be used instead of the mountpoint, use the `use_server_as_stype` config setting.

View File

@@ -11,7 +11,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// Non-Uniform Memory Access (NUMA) policy hit/miss statistics
@@ -97,7 +97,7 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
return nil
}
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -130,7 +130,7 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
continue
}
y, err := lp.NewMessage(
y, err := lp.New(
"numastats_"+key,
t.tagSet,
m.meta,

View File

@@ -8,8 +8,8 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/nvml"
)
@@ -71,14 +71,6 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
// Initialize NVIDIA Management Library (NVML)
ret := nvml.Init()
// Error: NVML library not found
// (nvml.ErrorString can not be used in this case)
if ret == nvml.ERROR_LIBRARY_NOT_FOUND {
err = fmt.Errorf("NVML library not found")
cclog.ComponentError(m.name, err.Error())
return err
}
if ret != nvml.SUCCESS {
err = errors.New(nvml.ErrorString(ret))
cclog.ComponentError(m.name, "Unable to initialize NVML", err.Error())
@@ -206,7 +198,7 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
return nil
}
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_fb_mem_total"] || !device.excludeMetrics["nv_fb_mem_used"] || !device.excludeMetrics["nv_fb_mem_reserved"] {
var total uint64
var used uint64
@@ -222,7 +214,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
if !device.excludeMetrics["nv_fb_mem_total"] {
t := float64(total) / (1024 * 1024)
y, err := lp.NewMessage("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
@@ -231,7 +223,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
if !device.excludeMetrics["nv_fb_mem_used"] {
f := float64(used) / (1024 * 1024)
y, err := lp.NewMessage("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
y, err := lp.New("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
@@ -240,7 +232,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
if v2 && !device.excludeMetrics["nv_fb_mem_reserved"] {
r := float64(reserved) / (1024 * 1024)
y, err := lp.NewMessage("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
y, err := lp.New("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
@@ -250,7 +242,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
return nil
}
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_bar1_mem_total"] || !device.excludeMetrics["nv_bar1_mem_used"] {
meminfo, ret := nvml.DeviceGetBAR1MemoryInfo(device.device)
if ret != nvml.SUCCESS {
@@ -259,7 +251,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
}
if !device.excludeMetrics["nv_bar1_mem_total"] {
t := float64(meminfo.Bar1Total) / (1024 * 1024)
y, err := lp.NewMessage("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
@@ -267,7 +259,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
}
if !device.excludeMetrics["nv_bar1_mem_used"] {
t := float64(meminfo.Bar1Used) / (1024 * 1024)
y, err := lp.NewMessage("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "MByte")
output <- y
@@ -277,7 +269,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
return nil
}
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
if ret != nvml.SUCCESS {
err := errors.New(nvml.ErrorString(ret))
@@ -301,14 +293,14 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
util, ret := nvml.DeviceGetUtilizationRates(device.device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_util"] {
y, err := lp.NewMessage("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
y, err := lp.New("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
}
}
if !device.excludeMetrics["nv_mem_util"] {
y, err := lp.NewMessage("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
y, err := lp.New("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
@@ -319,7 +311,7 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
return nil
}
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_temp"] {
// Retrieves the current temperature readings for the device, in degrees C.
//
@@ -328,7 +320,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
// * NVML_TEMPERATURE_COUNT
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
y, err := lp.New("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
if err == nil {
y.AddMeta("unit", "degC")
output <- y
@@ -338,7 +330,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
return nil
}
func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readFan(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_fan"] {
// Retrieves the intended operating speed of the device's fan.
//
@@ -351,7 +343,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
// This value may exceed 100% in certain cases.
fan, ret := nvml.DeviceGetFanSpeed(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
@@ -361,14 +353,14 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
return nil
}
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
// if !device.excludeMetrics["nv_fan"] {
// numFans, ret := nvml.DeviceGetNumFans(device.device)
// if ret == nvml.SUCCESS {
// for i := 0; i < numFans; i++ {
// fan, ret := nvml.DeviceGetFanSpeed_v2(device.device, i)
// if ret == nvml.SUCCESS {
// y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
// y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
// if err == nil {
// y.AddMeta("unit", "%")
// y.AddTag("stype", "fan")
@@ -382,7 +374,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
// return nil
// }
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_ecc_mode"] {
// Retrieves the current and pending ECC modes for the device.
//
@@ -393,21 +385,21 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
// The "pending" ECC mode refers to the target mode following the next reboot.
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
if ret == nvml.SUCCESS {
var y lp.CCMessage
var y lp.CCMetric
var err error
switch ecc_pend {
case nvml.FEATURE_DISABLED:
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
case nvml.FEATURE_ENABLED:
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
default:
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
}
if err == nil {
output <- y
}
} else if ret == nvml.ERROR_NOT_SUPPORTED {
y, err := lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
y, err := lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
if err == nil {
output <- y
}
@@ -416,7 +408,7 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
return nil
}
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_perf_state"] {
// Retrieves the current performance state for the device.
//
@@ -427,7 +419,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
// 32: Unknown performance state.
pState, ret := nvml.DeviceGetPerformanceState(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
y, err := lp.New("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
if err == nil {
output <- y
}
@@ -436,7 +428,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
return nil
}
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_power_usage"] {
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
//
@@ -450,7 +442,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
if mode == nvml.FEATURE_ENABLED {
power, ret := nvml.DeviceGetPowerUsage(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
y, err := lp.New("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
output <- y
@@ -461,7 +453,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
return nil
}
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
// Retrieves the current clock speeds for the device.
//
// Available clock information:
@@ -471,7 +463,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
if !device.excludeMetrics["nv_graphics_clock"] {
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
y, err := lp.New("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -482,7 +474,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
if !device.excludeMetrics["nv_sm_clock"] {
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
y, err := lp.New("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -493,7 +485,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
if !device.excludeMetrics["nv_mem_clock"] {
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
y, err := lp.New("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -503,7 +495,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
if !device.excludeMetrics["nv_video_clock"] {
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
y, err := lp.New("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -513,7 +505,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
return nil
}
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
// Retrieves the maximum clock speeds for the device.
//
// Available clock information:
@@ -528,7 +520,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
if !device.excludeMetrics["nv_max_graphics_clock"] {
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
y, err := lp.New("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -539,7 +531,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
if !device.excludeMetrics["nv_max_sm_clock"] {
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
y, err := lp.New("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -550,7 +542,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
if !device.excludeMetrics["nv_max_mem_clock"] {
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
y, err := lp.New("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -561,7 +553,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
if !device.excludeMetrics["nv_max_video_clock"] {
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
y, err := lp.New("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
if err == nil {
y.AddMeta("unit", "MHz")
output <- y
@@ -571,7 +563,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
return nil
}
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_ecc_uncorrected_error"] {
// Retrieves the total ECC error counts for the device.
//
@@ -584,7 +576,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
// i.e. the total set of errors across the entire device.
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
y, err := lp.New("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
if err == nil {
output <- y
}
@@ -593,7 +585,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
if !device.excludeMetrics["nv_ecc_corrected_error"] {
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
y, err := lp.New("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
if err == nil {
output <- y
}
@@ -602,7 +594,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
return nil
}
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_power_max_limit"] {
// Retrieves the power management limit associated with this device.
//
@@ -612,7 +604,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
// If the card's total power draw reaches this limit the power management algorithm kicks in.
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
y, err := lp.New("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
if err == nil {
y.AddMeta("unit", "watts")
output <- y
@@ -622,7 +614,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
return nil
}
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
if ret != nvml.SUCCESS {
err := errors.New(nvml.ErrorString(ret))
@@ -639,7 +631,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
y, err := lp.New("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
@@ -649,7 +641,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
return nil
}
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
if ret != nvml.SUCCESS {
err := errors.New(nvml.ErrorString(ret))
@@ -666,7 +658,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
y, err := lp.New("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
if err == nil {
y.AddMeta("unit", "%")
output <- y
@@ -676,7 +668,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
return nil
}
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_remapped_rows_corrected"] ||
!device.excludeMetrics["nv_remapped_rows_uncorrected"] ||
!device.excludeMetrics["nv_remapped_rows_pending"] ||
@@ -693,13 +685,13 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
corrected, uncorrected, pending, failure, ret := nvml.DeviceGetRemappedRows(device.device)
if ret == nvml.SUCCESS {
if !device.excludeMetrics["nv_remapped_rows_corrected"] {
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
if err == nil {
output <- y
}
}
if !device.excludeMetrics["nv_remapped_rows_uncorrected"] {
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
if err == nil {
output <- y
}
@@ -709,7 +701,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
if pending {
p = 1
}
y, err := lp.NewMessage("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
y, err := lp.New("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
if err == nil {
output <- y
}
@@ -719,7 +711,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
if failure {
f = 1
}
y, err := lp.NewMessage("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
y, err := lp.New("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
if err == nil {
output <- y
}
@@ -729,7 +721,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
return nil
}
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
if !device.excludeMetrics["nv_compute_processes"] {
// Get information about processes with a compute context on a device
//
@@ -753,7 +745,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
procList, ret := nvml.DeviceGetComputeRunningProcesses(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
y, err := lp.New("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
if err == nil {
output <- y
}
@@ -782,7 +774,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
procList, ret := nvml.DeviceGetGraphicsRunningProcesses(device.device)
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
y, err := lp.New("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
if err == nil {
output <- y
}
@@ -812,7 +804,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
// // Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
// procList, ret := nvml.DeviceGetMPSComputeRunningProcesses(device.device)
// if ret == nvml.SUCCESS {
// y, err := lp.NewMessage("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
// y, err := lp.New("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
// if err == nil {
// output <- y
// }
@@ -821,7 +813,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
return nil
}
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
var violTime nvml.ViolationTime
var ret nvml.Return
@@ -840,7 +832,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_POWER)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -852,7 +844,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_THERMAL)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -864,7 +856,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_SYNC_BOOST)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -876,7 +868,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_BOARD_LIMIT)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -888,7 +880,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_LOW_UTILIZATION)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -900,7 +892,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_RELIABILITY)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -912,7 +904,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_APP_CLOCKS)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -924,7 +916,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_BASE_CLOCKS)
if ret == nvml.SUCCESS {
t := float64(violTime.ViolationTime) * 1e-9
y, err := lp.NewMessage("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
y, err := lp.New("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
if err == nil {
y.AddMeta("unit", "sec")
output <- y
@@ -935,18 +927,12 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
return nil
}
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
// Retrieves the specified error counter value
// Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
//
// For Pascal &tm; or newer fully supported devices.
var aggregate_crc_errors uint64 = 0
var aggregate_ecc_errors uint64 = 0
var aggregate_replay_errors uint64 = 0
var aggregate_recovery_errors uint64 = 0
var aggregate_crc_flit_errors uint64 = 0
for i := 0; i < nvml.NVLINK_MAX_LINKS; i++ {
state, ret := nvml.DeviceGetNvLinkState(device.device, i)
if ret == nvml.SUCCESS {
@@ -954,9 +940,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
// Data link receive data CRC error counter
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_DATA)
aggregate_crc_errors = aggregate_crc_errors + count
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
y, err := lp.New("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
@@ -967,9 +952,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
// Data link receive data ECC error counter
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_ECC_DATA)
aggregate_ecc_errors = aggregate_ecc_errors + count
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
y, err := lp.New("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
@@ -980,9 +964,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
// Data link transmit replay error counter
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_REPLAY)
aggregate_replay_errors = aggregate_replay_errors + count
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
y, err := lp.New("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
@@ -993,9 +976,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
// Data link transmit recovery error counter
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_RECOVERY)
aggregate_recovery_errors = aggregate_recovery_errors + count
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
y, err := lp.New("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
@@ -1006,9 +988,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
// Data link receive flow control digit CRC error counter
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_FLIT)
aggregate_crc_flit_errors = aggregate_crc_flit_errors + count
if ret == nvml.SUCCESS {
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
y, err := lp.New("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
@@ -1019,58 +1000,16 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
}
}
}
// Export aggegated values
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
// Data link receive data CRC error counter
y, err := lp.NewMessage("nv_nvlink_crc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_errors}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
output <- y
}
}
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
// Data link receive data ECC error counter
y, err := lp.NewMessage("nv_nvlink_ecc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_ecc_errors}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
output <- y
}
}
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
// Data link transmit replay error counter
y, err := lp.NewMessage("nv_nvlink_replay_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_replay_errors}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
output <- y
}
}
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
// Data link transmit recovery error counter
y, err := lp.NewMessage("nv_nvlink_recovery_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_recovery_errors}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
output <- y
}
}
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
// Data link receive flow control digit CRC error counter
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_flit_errors}, time.Now())
if err == nil {
y.AddTag("stype", "nvlink")
output <- y
}
}
return nil
}
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
var err error
if !m.init {
return
}
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMessage) {
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMetric) {
name, ret := nvml.DeviceGetName(device.device)
if ret != nvml.SUCCESS {
name = "NoName"

View File

@@ -10,7 +10,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// running average power limit (RAPL) monitoring attributes for a zone
@@ -214,7 +214,7 @@ func (m *RAPLCollector) Init(config json.RawMessage) error {
// Read reads running average power limit (RAPL) monitoring attributes for all initialized zones
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMetric) {
for i := range m.RAPLZoneInfo {
p := &m.RAPLZoneInfo[i]
@@ -237,7 +237,7 @@ func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMessage) {
timeDiff := energyTimestamp.Sub(p.energyTimestamp)
averagePower := float64(energyDiff) / float64(timeDiff.Microseconds())
y, err := lp.NewMessage(
y, err := lp.New(
"rapl_average_power",
p.tags,
m.meta,

View File

@@ -7,7 +7,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
)
@@ -162,7 +162,7 @@ func (m *RocmSmiCollector) Init(config json.RawMessage) error {
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric
timestamp := time.Now()
@@ -175,119 +175,119 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
if !dev.excludeMetrics["rocm_gfx_util"] {
value := metrics.Average_gfx_activity
y, err := lp.NewMessage("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_umc_util"] {
value := metrics.Average_umc_activity
y, err := lp.NewMessage("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_mm_util"] {
value := metrics.Average_mm_activity
y, err := lp.NewMessage("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_avg_power"] {
value := metrics.Average_socket_power
y, err := lp.NewMessage("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_mem"] {
value := metrics.Temperature_mem
y, err := lp.NewMessage("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_hotspot"] {
value := metrics.Temperature_hotspot
y, err := lp.NewMessage("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_edge"] {
value := metrics.Temperature_edge
y, err := lp.NewMessage("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
value := metrics.Temperature_vrgfx
y, err := lp.NewMessage("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
value := metrics.Temperature_vrsoc
y, err := lp.NewMessage("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrmem"] {
value := metrics.Temperature_vrmem
y, err := lp.NewMessage("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_gfx_clock"] {
value := metrics.Average_gfxclk_frequency
y, err := lp.NewMessage("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_soc_clock"] {
value := metrics.Average_socclk_frequency
y, err := lp.NewMessage("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_u_clock"] {
value := metrics.Average_uclk_frequency
y, err := lp.NewMessage("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v0_clock"] {
value := metrics.Average_vclk0_frequency
y, err := lp.NewMessage("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v1_clock"] {
value := metrics.Average_vclk1_frequency
y, err := lp.NewMessage("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d0_clock"] {
value := metrics.Average_dclk0_frequency
y, err := lp.NewMessage("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d1_clock"] {
value := metrics.Average_dclk1_frequency
y, err := lp.NewMessage("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
@@ -295,7 +295,7 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
if !dev.excludeMetrics["rocm_temp_hbm"] {
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
value := metrics.Temperature_hbm[i]
y, err := lp.NewMessage("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddTag("stype", "device")
y.AddTag("stype-id", fmt.Sprintf("%d", i))

View File

@@ -4,8 +4,8 @@ import (
"encoding/json"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// These are the fields we read from the JSON configuration
@@ -32,7 +32,7 @@ type SampleCollector struct {
func (m *SampleCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SampleCollector"
m.name = "InternalCollector"
// This is for later use, also call it early
m.setup()
// Tell whether the collector should be run in parallel with others (reading files, ...)
@@ -74,7 +74,7 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric
timestamp := time.Now()
@@ -85,7 +85,7 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage)
// stop := readState()
// value = (stop - start) / interval.Seconds()
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
// Send it to output channel
output <- y

View File

@@ -5,8 +5,8 @@ import (
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// These are the fields we read from the JSON configuration
@@ -25,7 +25,7 @@ type SampleTimerCollector struct {
config SampleTimerCollectorConfig // the configuration structure
interval time.Duration // the interval parsed from configuration
ticker *time.Ticker // own timer
output chan lp.CCMessage // own internal output channel
output chan lp.CCMetric // own internal output channel
}
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
@@ -100,14 +100,14 @@ func (m *SampleTimerCollector) ReadMetrics(timestamp time.Time) {
// stop := readState()
// value = (stop - start) / interval.Seconds()
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil && m.output != nil {
// Send it to output channel if we have a valid channel
m.output <- y
}
}
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Capture output channel
m.output = output
}

View File

@@ -11,7 +11,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const SCHEDSTATFILE = `/proc/schedstat`
@@ -96,7 +96,7 @@ func (m *SchedstatCollector) Init(config json.RawMessage) error {
return err
}
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
running, _ := strconv.ParseInt(linefields[7], 10, 64)
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
diff_running := running - m.olddata[linefields[0]]["running"]
@@ -109,7 +109,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
m.olddata[linefields[0]]["waiting"] = waiting
value := l_running + l_waiting
y, err := lp.NewMessage("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
y, err := lp.New("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
// Send it to output channel
output <- y
@@ -118,7 +118,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}

View File

@@ -7,7 +7,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type SelfCollectorConfig struct {
@@ -42,56 +42,56 @@ func (m *SelfCollector) Init(config json.RawMessage) error {
return err
}
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMetric) {
timestamp := time.Now()
if m.config.MemStats {
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
y, err := lp.NewMessage("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
y, err := lp.New("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
y, err = lp.New("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
y, err = lp.New("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
y, err = lp.New("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
y, err = lp.New("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
y, err = lp.New("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
if err == nil {
y.AddMeta("unit", "Bytes")
output <- y
}
y, err = lp.NewMessage("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
y, err = lp.New("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
if err == nil {
output <- y
}
}
if m.config.GoRoutines {
y, err := lp.NewMessage("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
y, err := lp.New("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
if err == nil {
output <- y
}
}
if m.config.CgoCalls {
y, err := lp.NewMessage("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
y, err := lp.New("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
if err == nil {
output <- y
}
@@ -102,35 +102,35 @@ func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
if err == nil {
sec, nsec := rusage.Utime.Unix()
t := float64(sec) + (float64(nsec) * 1e-9)
y, err := lp.NewMessage("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
y, err := lp.New("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
if err == nil {
y.AddMeta("unit", "seconds")
output <- y
}
sec, nsec = rusage.Stime.Unix()
t = float64(sec) + (float64(nsec) * 1e-9)
y, err = lp.NewMessage("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
y, err = lp.New("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
if err == nil {
y.AddMeta("unit", "seconds")
output <- y
}
y, err = lp.NewMessage("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
y, err = lp.New("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
y, err = lp.New("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
y, err = lp.New("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
y, err = lp.New("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
if err == nil {
output <- y
}
y, err = lp.NewMessage("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
y, err = lp.New("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
if err == nil {
output <- y
}

View File

@@ -10,7 +10,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
@@ -171,7 +171,7 @@ func (m *TempCollector) Init(config json.RawMessage) error {
return nil
}
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
for _, sensor := range m.sensors {
// Read sensor file
@@ -190,7 +190,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
continue
}
x /= 1000
y, err := lp.NewMessage(
y, err := lp.New(
sensor.metricName,
sensor.tags,
m.meta,
@@ -203,7 +203,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
// max temperature
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
y, err := lp.NewMessage(
y, err := lp.New(
sensor.maxTempName,
sensor.tags,
m.meta,
@@ -217,7 +217,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
// critical temperature
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
y, err := lp.NewMessage(
y, err := lp.New(
sensor.critTempName,
sensor.tags,
m.meta,

View File

@@ -9,7 +9,7 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const MAX_NUM_PROCS = 10
@@ -53,7 +53,7 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
return nil
}
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
@@ -68,7 +68,7 @@ func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessag
lines := strings.Split(string(stdout), "\n")
for i := 1; i < m.config.Num_procs+1; i++ {
name := fmt.Sprintf("topproc%d", i)
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
if err == nil {
output <- y
}

View File

@@ -37,9 +37,7 @@ $ install --mode 644 \
$ systemctl enable cc-metric-collector
```
## Packaging
### RPM
## RPM
In order to get a RPM packages for cc-metric-collector, just use:
@@ -49,7 +47,7 @@ $ make RPM
It uses the RPM SPEC file `scripts/cc-metric-collector.spec` and requires the RPM tools (`rpm` and `rpmspec`) and `git`.
### DEB
## DEB
In order to get very simple Debian packages for cc-metric-collector, just use:
@@ -59,16 +57,4 @@ $ make DEB
It uses the DEB control file `scripts/cc-metric-collector.control` and requires `dpkg-deb`, `awk`, `sed` and `git`. It creates only a binary deb package.
_This option is not well tested and therefore experimental_
### Customizing RPMs or DEB packages
If you want to customize the RPMs or DEB packages for your local system, use the following workflow.
- (if there is already a fork in the private account, delete it and wait until Github realizes the deletion)
- Fork the cc-metric-collector repository (if Github hasn't realized it, it creates a fork named cc-metric-collector2)
- Go to private cc-metric-collector repository and enable Github Actions
- Do changes to the scripts, code, ... Commit and push your changes.
- Tag the new commit with `v0.x.y-<myversion>` (`git tag v0.x.y-<myversion>`)
- Push tags to repository (`git push --tags`)
- Wait until the Release action finishes. It creates fresh RPMs and DEBs in your private repository on the Releases page.
_This option is not well tested and therefore experimental_

View File

@@ -12,8 +12,8 @@ The global file contains the paths to the other four files and some global optio
"collectors" : "collectors.json",
"receivers" : "receivers.json",
"router" : "router.json",
"interval": "10s",
"duration": "1s"
"interval": 10,
"duration": 1
}
```

57
go.mod
View File

@@ -1,45 +1,40 @@
module github.com/ClusterCockpit/cc-metric-collector
go 1.21.1
toolchain go1.22.1
go 1.18
require (
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900
github.com/ClusterCockpit/cc-units v0.4.0
github.com/ClusterCockpit/cc-units v0.3.0
github.com/ClusterCockpit/go-rocm-smi v0.3.0
github.com/NVIDIA/go-nvml v0.12.0-2
github.com/PaesslerAG/gval v1.2.2
github.com/expr-lang/expr v1.16.9
github.com/fsnotify/fsnotify v1.7.0
github.com/gorilla/mux v1.8.1
github.com/influxdata/influxdb-client-go/v2 v2.13.0
github.com/NVIDIA/go-nvml v0.11.6-0
github.com/PaesslerAG/gval v1.2.1
github.com/gorilla/mux v1.8.0
github.com/influxdata/influxdb-client-go/v2 v2.12.0
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf
github.com/influxdata/line-protocol/v2 v2.2.1
github.com/nats-io/nats.go v1.36.0
github.com/prometheus/client_golang v1.19.0
github.com/stmcginnis/gofish v0.15.0
github.com/tklauser/go-sysconf v0.3.13
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/sys v0.18.0
github.com/nats-io/nats.go v1.20.0
github.com/prometheus/client_golang v1.14.0
github.com/stmcginnis/gofish v0.13.0
github.com/tklauser/go-sysconf v0.3.11
golang.org/x/sys v0.2.0
)
require (
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/deepmap/oapi-codegen v1.12.3 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/nats-io/nats-server/v2 v2.8.4 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/oapi-codegen/runtime v1.1.1 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.49.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.22.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/net v0.2.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
)

676
go.sum
View File

@@ -1,120 +1,624 @@
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900 h1:6+WNav16uWTEDC09hkZKEHfBhtc91p/ZcjgCtyntuIg=
github.com/ClusterCockpit/cc-energy-manager v0.0.0-20240709142550-dd446f7ab900/go.mod h1:EbYeC5t+Y0kW1Q1pP2n9zMqbeYEJITG8YGvAUihXVn4=
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClusterCockpit/cc-units v0.3.0 h1:JEKgEyvN4GABheKIReW2siDXgpYf2zf4STXV2ip418Y=
github.com/ClusterCockpit/cc-units v0.3.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
github.com/ClusterCockpit/go-rocm-smi v0.3.0 h1:1qZnSpG7/NyLtc7AjqnUL9Jb8xtqG1nMVgp69rJfaR8=
github.com/ClusterCockpit/go-rocm-smi v0.3.0/go.mod h1:+I3UMeX3OlizXDf1WpGD43W4KGZZGVSGmny6rTeOnWA=
github.com/NVIDIA/go-nvml v0.11.6-0 h1:tugQzmaX84Y/6+03wZ/MAgcpfSKDkvkAWeuxFNLHmxY=
github.com/NVIDIA/go-nvml v0.11.6-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
github.com/NVIDIA/go-nvml v0.12.0-2 h1:Sg239yy7jmopu/cuvYauoMj9fOpcGMngxVxxS1EBXeY=
github.com/NVIDIA/go-nvml v0.12.0-2/go.mod h1:7ruy85eOM73muOc/I37euONSwEyFqZsv5ED9AogD4G0=
github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E=
github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
github.com/PaesslerAG/gval v1.2.0 h1:DA7PsxmtzlUU4bYxV35MKp9KDDVWcrJJRhlaCohMhsM=
github.com/PaesslerAG/gval v1.2.0/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A=
github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/influxdata/influxdb-client-go/v2 v2.9.1 h1:5kbH226fmmiV0MMTs7a8L7/ECCKdJWBi1QZNNv4/TkI=
github.com/influxdata/influxdb-client-go/v2 v2.9.1/go.mod h1:x7Jo5UHHl+w8wu8UnGiNobDDHygojXwJX4mx7rXGKMk=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98=
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig=
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo=
github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY=
github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY=
github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE=
github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM=
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I=
github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4=
github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4=
github.com/nats-io/nats.go v1.16.0 h1:zvLE7fGBQYW6MWaFaRdsgm9qT39PJDQoju+DS8KsO1g=
github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI=
github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stmcginnis/gofish v0.15.0 h1:8TG41+lvJk/0Nf8CIIYErxbMlQUy80W0JFRZP3Ld82A=
github.com/stmcginnis/gofish v0.15.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stmcginnis/gofish v0.13.0 h1:qq6q3yNt9vw7ZuJxiw87hq9+BdPLsuRQBwl+XoZSz60=
github.com/stmcginnis/gofish v0.13.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1 h1:P7S/GeHBAFEZIYp0ePPs2kHXoazz8q2KsyxHyQVGCJg=
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1/go.mod h1:9CWpnTUmlQkfdpdutA1nNf4iE5lAVt3QZOu0Z6hahBE=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sys v0.0.0-20210122093101-04d7465088b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w=
golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@@ -11,7 +11,7 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
"github.com/PaesslerAG/gval"
@@ -31,14 +31,14 @@ type metricAggregator struct {
functions []*MetricAggregatorIntervalConfig
constants map[string]interface{}
language gval.Language
output chan lp.CCMessage
output chan lp.CCMetric
}
type MetricAggregator interface {
AddAggregation(name, function, condition string, tags, meta map[string]string) error
DeleteAggregation(name string) error
Init(output chan lp.CCMessage) error
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage)
Init(output chan lp.CCMetric) error
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric)
}
var metricCacheLanguage = gval.NewLanguage(
@@ -74,7 +74,7 @@ var evaluables = struct {
mapping: make(map[string]gval.Evaluable),
}
func (c *metricAggregator) Init(output chan lp.CCMessage) error {
func (c *metricAggregator) Init(output chan lp.CCMetric) error {
c.output = output
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
c.constants = make(map[string]interface{})
@@ -112,7 +112,7 @@ func (c *metricAggregator) Init(output chan lp.CCMessage) error {
return nil
}
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage) {
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric) {
vars := make(map[string]interface{})
for k, v := range c.constants {
vars[k] = v
@@ -121,13 +121,8 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
vars["endtime"] = endtime
for _, f := range c.functions {
cclog.ComponentDebug("MetricCache", "COLLECT", f.Name, "COND", f.Condition)
var valuesFloat64 []float64
var valuesFloat32 []float32
var valuesInt []int
var valuesInt32 []int32
var valuesInt64 []int64
var valuesBool []bool
matches := make([]lp.CCMessage, 0)
values := make([]float64, 0)
matches := make([]lp.CCMetric, 0)
for _, m := range metrics {
vars["metric"] = m
//value, err := gval.Evaluate(f.Condition, vars, c.language)
@@ -141,17 +136,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
if valid {
switch x := v.(type) {
case float64:
valuesFloat64 = append(valuesFloat64, x)
values = append(values, x)
case float32:
valuesFloat32 = append(valuesFloat32, x)
case int:
valuesInt = append(valuesInt, x)
case int32:
valuesInt32 = append(valuesInt32, x)
case int64:
valuesInt64 = append(valuesInt64, x)
values = append(values, float64(x))
case bool:
valuesBool = append(valuesBool, x)
if x {
values = append(values, float64(1.0))
} else {
values = append(values, float64(0.0))
}
default:
cclog.ComponentError("MetricCache", "COLLECT ADD VALUE", v, "FAILED")
}
@@ -160,63 +155,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
}
}
delete(vars, "metric")
// Check, that only values of one type were collected
countValueTypes := 0
if len(valuesFloat64) > 0 {
countValueTypes += 1
}
if len(valuesFloat32) > 0 {
countValueTypes += 1
}
if len(valuesInt) > 0 {
countValueTypes += 1
}
if len(valuesInt32) > 0 {
countValueTypes += 1
}
if len(valuesInt64) > 0 {
countValueTypes += 1
}
if len(valuesBool) > 0 {
countValueTypes += 1
}
if countValueTypes > 1 {
cclog.ComponentError("MetricCache", "Collected values of different types")
}
var len_values int
switch {
case len(valuesFloat64) > 0:
vars["values"] = valuesFloat64
len_values = len(valuesFloat64)
case len(valuesFloat32) > 0:
vars["values"] = valuesFloat32
len_values = len(valuesFloat32)
case len(valuesInt) > 0:
vars["values"] = valuesInt
len_values = len(valuesInt)
case len(valuesInt32) > 0:
vars["values"] = valuesInt32
len_values = len(valuesInt32)
case len(valuesInt64) > 0:
vars["values"] = valuesInt64
len_values = len(valuesInt64)
case len(valuesBool) > 0:
vars["values"] = valuesBool
len_values = len(valuesBool)
}
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function)
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function)
vars["values"] = values
vars["metrics"] = matches
if len_values > 0 {
if len(values) > 0 {
value, err := gval.Evaluate(f.Function, vars, c.language)
if err != nil {
cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function, ":", err.Error())
cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function, ":", err.Error())
break
}
copy_tags := func(tags map[string]string, metrics []lp.CCMessage) map[string]string {
copy_tags := func(tags map[string]string, metrics []lp.CCMetric) map[string]string {
out := make(map[string]string)
for key, value := range tags {
switch value {
@@ -233,7 +182,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
}
return out
}
copy_meta := func(meta map[string]string, metrics []lp.CCMessage) map[string]string {
copy_meta := func(meta map[string]string, metrics []lp.CCMetric) map[string]string {
out := make(map[string]string)
for key, value := range meta {
switch value {
@@ -253,18 +202,18 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
tags := copy_tags(f.Tags, matches)
meta := copy_meta(f.Meta, matches)
var m lp.CCMessage
var m lp.CCMetric
switch t := value.(type) {
case float64:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case float32:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case int:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case int64:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
case string:
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
default:
cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name)
}
@@ -367,7 +316,7 @@ func EvalBoolCondition(condition string, params map[string]interface{}) (bool, e
return value, err
}
func EvalFloat64Condition(condition string, params map[string]float64) (float64, error) {
func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
evaluables.mutex.Lock()
evaluable, ok := evaluables.mapping[condition]
evaluables.mutex.Unlock()
@@ -389,7 +338,7 @@ func EvalFloat64Condition(condition string, params map[string]float64) (float64,
return value, err
}
func NewAggregator(output chan lp.CCMessage) (MetricAggregator, error) {
func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
a := new(metricAggregator)
err := a.Init(output)
if err != nil {

View File

@@ -3,11 +3,12 @@ package metricAggregator
import (
"errors"
"fmt"
"math"
"regexp"
"sort"
"strings"
"golang.org/x/exp/slices"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
)
@@ -15,155 +16,149 @@ import (
* Arithmetic functions on value arrays
*/
func sumAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("sum function requires at least one argument")
// Sum up values
func sumfunc(args ...interface{}) (interface{}, error) {
s := 0.0
values, ok := args[0].([]float64)
if ok {
cclog.ComponentDebug("MetricCache", "SUM FUNC START")
for _, x := range values {
s += x
}
cclog.ComponentDebug("MetricCache", "SUM FUNC END", s)
} else {
cclog.ComponentDebug("MetricCache", "SUM FUNC CAST FAILED")
}
var sum T
for _, value := range values {
sum += value
}
return sum, nil
return s, nil
}
// Sum up values
func sumfunc(args interface{}) (interface{}, error) {
var err error
switch values := args.(type) {
// Get the minimum value
func minfunc(args ...interface{}) (interface{}, error) {
var err error = nil
switch values := args[0].(type) {
case []float64:
return sumAnyType(values)
var s float64 = math.MaxFloat64
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []float32:
return sumAnyType(values)
var s float32 = math.MaxFloat32
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int:
return sumAnyType(values)
var s int = int(math.MaxInt32)
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int64:
return sumAnyType(values)
var s int64 = math.MaxInt64
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
case []int32:
return sumAnyType(values)
var s int32 = math.MaxInt32
for _, x := range values {
if x < s {
s = x
}
}
return s, nil
default:
err = errors.New("function 'sum' only on list of values (float64, float32, int, int32, int64)")
err = errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
}
return 0.0, err
}
func minAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("min function requires at least one argument")
}
return slices.Min(values), nil
}
// Get the minimum value
func minfunc(args interface{}) (interface{}, error) {
switch values := args.(type) {
case []float64:
return minAnyType(values)
case []float32:
return minAnyType(values)
case []int:
return minAnyType(values)
case []int64:
return minAnyType(values)
case []int32:
return minAnyType(values)
default:
return 0.0, errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
}
}
func avgAnyType[T float64 | float32 | int | int32 | int64](values []T) (float64, error) {
if len(values) == 0 {
return 0.0, errors.New("average function requires at least one argument")
}
sum, err := sumAnyType[T](values)
return float64(sum) / float64(len(values)), err
}
// Get the average or mean value
func avgfunc(args interface{}) (interface{}, error) {
switch values := args.(type) {
func avgfunc(args ...interface{}) (interface{}, error) {
switch values := args[0].(type) {
case []float64:
return avgAnyType(values)
var s float64 = 0
for _, x := range values {
s += x
}
return s / float64(len(values)), nil
case []float32:
return avgAnyType(values)
var s float32 = 0
for _, x := range values {
s += x
}
return s / float32(len(values)), nil
case []int:
return avgAnyType(values)
var s int = 0
for _, x := range values {
s += x
}
return s / len(values), nil
case []int64:
return avgAnyType(values)
case []int32:
return avgAnyType(values)
default:
return 0.0, errors.New("function 'average' only on list of values (float64, float32, int, int32, int64)")
var s int64 = 0
for _, x := range values {
s += x
}
return s / int64(len(values)), nil
}
}
func maxAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("max function requires at least one argument")
}
return slices.Max(values), nil
return 0.0, nil
}
// Get the maximum value
func maxfunc(args interface{}) (interface{}, error) {
switch values := args.(type) {
case []float64:
return maxAnyType(values)
case []float32:
return maxAnyType(values)
case []int:
return maxAnyType(values)
case []int64:
return maxAnyType(values)
case []int32:
return maxAnyType(values)
default:
return 0.0, errors.New("function 'max' only on list of values (float64, float32, int, int32, int64)")
func maxfunc(args ...interface{}) (interface{}, error) {
s := 0.0
values, ok := args[0].([]float64)
if ok {
for _, x := range values {
if x > s {
s = x
}
}
}
}
func medianAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
if len(values) == 0 {
return 0.0, errors.New("median function requires at least one argument")
}
slices.Sort(values)
var median T
if midPoint := len(values) % 2; midPoint == 0 {
median = (values[midPoint-1] + values[midPoint]) / 2
} else {
median = values[midPoint]
}
return median, nil
return s, nil
}
// Get the median value
func medianfunc(args interface{}) (interface{}, error) {
switch values := args.(type) {
func medianfunc(args ...interface{}) (interface{}, error) {
switch values := args[0].(type) {
case []float64:
return medianAnyType(values)
case []float32:
return medianAnyType(values)
sort.Float64s(values)
return values[len(values)/2], nil
// case []float32:
// sort.Float64s(values)
// return values[len(values)/2], nil
case []int:
return medianAnyType(values)
case []int64:
return medianAnyType(values)
case []int32:
return medianAnyType(values)
default:
return 0.0, errors.New("function 'median' only on list of values (float64, float32, int, int32, int64)")
sort.Ints(values)
return values[len(values)/2], nil
// case []int64:
// sort.Ints(values)
// return values[len(values)/2], nil
// case []int32:
// sort.Ints(values)
// return values[len(values)/2], nil
}
return 0.0, errors.New("function 'median()' only on lists of type float64 and int")
}
/*
* Get number of values in list. Returns always an int
*/
func lenfunc(args interface{}) (interface{}, error) {
func lenfunc(args ...interface{}) (interface{}, error) {
var err error = nil
var length int = 0
switch values := args.(type) {
switch values := args[0].(type) {
case []float64:
length = len(values)
case []float32:
@@ -248,8 +243,8 @@ func matchfunc(args ...interface{}) (interface{}, error) {
*/
// for a given cpuid, it returns the core id
func getCpuCoreFunc(args interface{}) (interface{}, error) {
switch cpuid := args.(type) {
func getCpuCoreFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args[0].(type) {
case int:
return topo.GetHwthreadCore(cpuid), nil
}
@@ -257,8 +252,8 @@ func getCpuCoreFunc(args interface{}) (interface{}, error) {
}
// for a given cpuid, it returns the socket id
func getCpuSocketFunc(args interface{}) (interface{}, error) {
switch cpuid := args.(type) {
func getCpuSocketFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args[0].(type) {
case int:
return topo.GetHwthreadSocket(cpuid), nil
}
@@ -266,8 +261,8 @@ func getCpuSocketFunc(args interface{}) (interface{}, error) {
}
// for a given cpuid, it returns the id of the NUMA node
func getCpuNumaDomainFunc(args interface{}) (interface{}, error) {
switch cpuid := args.(type) {
func getCpuNumaDomainFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args[0].(type) {
case int:
return topo.GetHwthreadNumaDomain(cpuid), nil
}
@@ -275,8 +270,8 @@ func getCpuNumaDomainFunc(args interface{}) (interface{}, error) {
}
// for a given cpuid, it returns the id of the CPU die
func getCpuDieFunc(args interface{}) (interface{}, error) {
switch cpuid := args.(type) {
func getCpuDieFunc(args ...interface{}) (interface{}, error) {
switch cpuid := args[0].(type) {
case int:
return topo.GetHwthreadDie(cpuid), nil
}
@@ -284,13 +279,13 @@ func getCpuDieFunc(args interface{}) (interface{}, error) {
}
// for a given core id, it returns the list of cpuids
func getCpuListOfCoreFunc(args interface{}) (interface{}, error) {
func getCpuListOfCoreFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0)
switch in := args.(type) {
switch in := args[0].(type) {
case int:
for _, c := range topo.CpuData() {
if c.Core == in {
cpulist = append(cpulist, c.CpuID)
cpulist = append(cpulist, c.Cpuid)
}
}
}
@@ -298,13 +293,13 @@ func getCpuListOfCoreFunc(args interface{}) (interface{}, error) {
}
// for a given socket id, it returns the list of cpuids
func getCpuListOfSocketFunc(args interface{}) (interface{}, error) {
func getCpuListOfSocketFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0)
switch in := args.(type) {
switch in := args[0].(type) {
case int:
for _, c := range topo.CpuData() {
if c.Socket == in {
cpulist = append(cpulist, c.CpuID)
cpulist = append(cpulist, c.Cpuid)
}
}
}
@@ -312,13 +307,13 @@ func getCpuListOfSocketFunc(args interface{}) (interface{}, error) {
}
// for a given id of a NUMA domain, it returns the list of cpuids
func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) {
func getCpuListOfNumaDomainFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0)
switch in := args.(type) {
switch in := args[0].(type) {
case int:
for _, c := range topo.CpuData() {
if c.NumaDomain == in {
cpulist = append(cpulist, c.CpuID)
if c.Numadomain == in {
cpulist = append(cpulist, c.Cpuid)
}
}
}
@@ -326,13 +321,13 @@ func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) {
}
// for a given CPU die id, it returns the list of cpuids
func getCpuListOfDieFunc(args interface{}) (interface{}, error) {
func getCpuListOfDieFunc(args ...interface{}) (interface{}, error) {
cpulist := make([]int, 0)
switch in := args.(type) {
switch in := args[0].(type) {
case int:
for _, c := range topo.CpuData() {
if c.Die == in {
cpulist = append(cpulist, c.CpuID)
cpulist = append(cpulist, c.Cpuid)
}
}
}
@@ -340,7 +335,7 @@ func getCpuListOfDieFunc(args interface{}) (interface{}, error) {
}
// wrapper function to get a list of all cpuids of the node
func getCpuListOfNode() (interface{}, error) {
func getCpuListOfNode(args ...interface{}) (interface{}, error) {
return topo.HwthreadList(), nil
}

View File

@@ -1,21 +1,15 @@
# CC Metric Router
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMessages](https://pkg.go.dev/github.com/ClusterCockpit/cc-energy-manager@v0.0.0-20240919152819-92a17f2da4f7/pkg/cc-message.
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMetrics](../ccMetric/README.md).
# Configuration
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) with option `process_messages`.
```json
{
"num_cache_intervals" : 1,
"interval_timestamp" : true,
"hostname_tag" : "hostname",
"max_forward" : 50,
"process_messages": {
"see": "pkg/messageProcessor/README.md"
},
"add_tags" : [
{
"key" : "cluster",
@@ -69,8 +63,6 @@ The CCMetric router sits in between the collectors and the sinks and can be used
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) (option `process_messages`) instead of `add_tags`, `delete_tags`, `drop_metrics`, `drop_metrics_if`, `rename_metrics`, `normalize_units` and `change_unit_prefix`. These options are deprecated and will be removed in future versions. Until then, they are added to the message processor.
# Processing order in the router
- Add the `hostname_tag` tag (if sent by collectors or cache)
@@ -104,8 +96,6 @@ Every time the router receives a metric through any of the channels, it tries to
# The `rename_metrics` option
__deprecated__
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
```json
@@ -117,8 +107,6 @@ In the ClusterCockpit world we specified a set of standard metrics. Since some c
# Conditional manipulation of tags (`add_tags` and `del_tags`)
__deprecated__
Common config format:
```json
{
@@ -130,8 +118,6 @@ Common config format:
## The `del_tags` option
__deprecated__
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
@@ -143,8 +129,6 @@ Never delete tags:
## The `add_tags` option
__deprecated__
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
@@ -186,8 +170,6 @@ In some cases, you want to drop a metric and don't get it forwarded to the sinks
## The `drop_metrics` section
__deprecated__
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
```json
@@ -203,8 +185,6 @@ The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
## The `drop_metrics_if` section
__deprecated__
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
```json
@@ -220,22 +200,15 @@ The first line is comparable with the example in `drop_metrics`, it drops all me
# Manipulating the metric units
## The `normalize_units` option
__deprecated__
The cc-metric-collector tries to read the data from the system as it is reported. If available, it tries to read the metric unit from the system as well (e.g. from `/proc/meminfo`). The problem is that, depending on the source, the metric units are named differently. Just think about `byte`, `Byte`, `B`, `bytes`, ...
The [cc-units](https://github.com/ClusterCockpit/cc-units) package provides us a normalization option to use the same metric unit name for all metrics. It this option is set to true, all `unit` meta tags are normalized.
## The `change_unit_prefix` section
__deprecated__
It is often the case that metrics are reported by the system using a rather outdated unit prefix (like `/proc/meminfo` still uses kByte despite current memory sizes are in the GByte range). If you want to change the prefix of a unit, you can do that with the help of [cc-units](https://github.com/ClusterCockpit/cc-units). The setting works on the metric name and requires the new prefix for the metric. The cc-units package determines the scaling factor.
# Aggregate metric values of the current interval with the `interval_aggregates` option
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0 and is **experimental**
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.

View File

@@ -7,7 +7,7 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
)
@@ -16,7 +16,7 @@ type metricCachePeriod struct {
stopstamp time.Time
numMetrics int
sizeMetrics int
metrics []lp.CCMessage
metrics []lp.CCMetric
}
// Metric cache data structure
@@ -29,21 +29,21 @@ type metricCache struct {
ticker mct.MultiChanTicker
tickchan chan time.Time
done chan bool
output chan lp.CCMessage
output chan lp.CCMetric
aggEngine agg.MetricAggregator
}
type MetricCache interface {
Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
Start()
Add(metric lp.CCMessage)
GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage)
Add(metric lp.CCMetric)
GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric)
AddAggregation(name, function, condition string, tags, meta map[string]string) error
DeleteAggregation(name string) error
Close()
}
func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
var err error = nil
c.done = make(chan bool)
c.wg = wg
@@ -55,7 +55,7 @@ func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker,
p := new(metricCachePeriod)
p.numMetrics = 0
p.sizeMetrics = 0
p.metrics = make([]lp.CCMessage, 0)
p.metrics = make([]lp.CCMetric, 0)
c.intervals = append(c.intervals, p)
}
@@ -124,7 +124,7 @@ func (c *metricCache) Start() {
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
// The intervals list is used as round-robin buffer and the metric list grows dynamically and
// to avoid reallocations
func (c *metricCache) Add(metric lp.CCMessage) {
func (c *metricCache) Add(metric lp.CCMetric) {
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
c.lock.Lock()
p := c.intervals[c.curPeriod]
@@ -153,10 +153,10 @@ func (c *metricCache) DeleteAggregation(name string) error {
// Get all metrics of a interval. The index is the difference to the current interval, so index=0
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
// is given (negative index, index larger than configured number of total intervals, ...)
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage) {
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
var start time.Time = time.Now()
var stop time.Time = time.Now()
var metrics []lp.CCMessage
var metrics []lp.CCMetric
if index >= 0 && index < c.numPeriods {
pindex := c.curPeriod - index
if pindex < 0 {
@@ -168,10 +168,10 @@ func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage
metrics = c.intervals[pindex].metrics
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
} else {
metrics = make([]lp.CCMessage, 0)
metrics = make([]lp.CCMetric, 0)
}
} else {
metrics = make([]lp.CCMessage, 0)
metrics = make([]lp.CCMetric, 0)
}
return start, stop, metrics
}
@@ -182,7 +182,7 @@ func (c *metricCache) Close() {
c.done <- true
}
func NewCache(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
func NewCache(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
c := new(metricCache)
err := c.Init(output, ticker, wg, numPeriods)
if err != nil {

View File

@@ -2,7 +2,6 @@ package metricRouter
import (
"encoding/json"
"fmt"
"os"
"strings"
"sync"
@@ -10,10 +9,10 @@ import (
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
units "github.com/ClusterCockpit/cc-units"
)
const ROUTER_MAX_FORWARD = 50
@@ -39,17 +38,16 @@ type metricRouterConfig struct {
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
NormalizeUnits bool `json:"normalize_units"` // Check unit meta flag and normalize it using cc-units
ChangeUnitPrefix map[string]string `json:"change_unit_prefix"` // Add prefix that should be applied to the metrics
// dropMetrics map[string]bool // Internal map for O(1) lookup
MessageProcessor json.RawMessage `json:"process_message,omitempty"`
dropMetrics map[string]bool // Internal map for O(1) lookup
}
// Metric router data structure
type metricRouter struct {
hostname string // Hostname used in tags
coll_input chan lp.CCMessage // Input channel from CollectorManager
recv_input chan lp.CCMessage // Input channel from ReceiveManager
cache_input chan lp.CCMessage // Input channel from MetricCache
outputs []chan lp.CCMessage // List of all output channels
coll_input chan lp.CCMetric // Input channel from CollectorManager
recv_input chan lp.CCMetric // Input channel from ReceiveManager
cache_input chan lp.CCMetric // Input channel from MetricCache
outputs []chan lp.CCMetric // List of all output channels
done chan bool // channel to finish / stop metric router
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
timestamp time.Time // timestamp periodically updated by ticker each interval
@@ -58,15 +56,14 @@ type metricRouter struct {
cache MetricCache // pointer to MetricCache
cachewg sync.WaitGroup // wait group for MetricCache
maxForward int // number of metrics to forward maximally in one iteration
mp mp.MessageProcessor
}
// MetricRouter access functions
type MetricRouter interface {
Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error
AddCollectorInput(input chan lp.CCMessage)
AddReceiverInput(input chan lp.CCMessage)
AddOutput(output chan lp.CCMessage)
AddCollectorInput(input chan lp.CCMetric)
AddReceiverInput(input chan lp.CCMetric)
AddOutput(output chan lp.CCMetric)
Start()
Close()
}
@@ -78,9 +75,9 @@ type MetricRouter interface {
// * ticker (from variable ticker)
// * configuration (read from config file in variable routerConfigFile)
func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error {
r.outputs = make([]chan lp.CCMessage, 0)
r.outputs = make([]chan lp.CCMetric, 0)
r.done = make(chan bool)
r.cache_input = make(chan lp.CCMessage)
r.cache_input = make(chan lp.CCMetric)
r.wg = wg
r.ticker = ticker
r.config.MaxForward = ROUTER_MAX_FORWARD
@@ -122,56 +119,14 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
}
}
p, err := mp.NewMessageProcessor()
if err != nil {
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
r.config.dropMetrics = make(map[string]bool)
for _, mname := range r.config.DropMetrics {
r.mp.AddDropMessagesByName(mname)
r.config.dropMetrics[mname] = true
}
for _, cond := range r.config.DropMetricsIf {
r.mp.AddDropMessagesByCondition(cond)
}
for _, data := range r.config.AddTags {
cond := data.Condition
if cond == "*" {
cond = "true"
}
r.mp.AddAddTagsByCondition(cond, data.Key, data.Value)
}
for _, data := range r.config.DelTags {
cond := data.Condition
if cond == "*" {
cond = "true"
}
r.mp.AddDeleteTagsByCondition(cond, data.Key, data.Value)
}
for oldname, newname := range r.config.RenameMetrics {
r.mp.AddRenameMetricByName(oldname, newname)
}
for metricName, prefix := range r.config.ChangeUnitPrefix {
r.mp.AddChangeUnitPrefix(fmt.Sprintf("name == '%s'", metricName), prefix)
}
r.mp.SetNormalizeUnits(r.config.NormalizeUnits)
r.mp.AddAddTagsByCondition("true", r.config.HostnameTagName, r.hostname)
// r.config.dropMetrics = make(map[string]bool)
// for _, mname := range r.config.DropMetrics {
// r.config.dropMetrics[mname] = true
// }
return nil
}
func getParamMap(point lp.CCMessage) map[string]interface{} {
func getParamMap(point lp.CCMetric) map[string]interface{} {
params := make(map[string]interface{})
params["metric"] = point
params["name"] = point.Name()
@@ -189,7 +144,7 @@ func getParamMap(point lp.CCMessage) map[string]interface{} {
}
// DoAddTags adds a tag when condition is fullfiled
func (r *metricRouter) DoAddTags(point lp.CCMessage) {
func (r *metricRouter) DoAddTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.AddTags {
if m.Condition == "*" {
@@ -211,81 +166,81 @@ func (r *metricRouter) DoAddTags(point lp.CCMessage) {
}
// DoDelTags removes a tag when condition is fullfiled
// func (r *metricRouter) DoDelTags(point lp.CCMessage) {
// var conditionMatches bool
// for _, m := range r.config.DelTags {
// if m.Condition == "*" {
// // Condition is always matched
// conditionMatches = true
// } else {
// // Evaluate condition
// var err error
// conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
// if err != nil {
// cclog.ComponentError("MetricRouter", err.Error())
// conditionMatches = false
// }
// }
// if conditionMatches {
// point.RemoveTag(m.Key)
// }
// }
// }
func (r *metricRouter) DoDelTags(point lp.CCMetric) {
var conditionMatches bool
for _, m := range r.config.DelTags {
if m.Condition == "*" {
// Condition is always matched
conditionMatches = true
} else {
// Evaluate condition
var err error
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
}
}
if conditionMatches {
point.RemoveTag(m.Key)
}
}
}
// Conditional test whether a metric should be dropped
// func (r *metricRouter) dropMetric(point lp.CCMessage) bool {
// // Simple drop check
// if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
// return conditionMatches
// }
func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
// Simple drop check
if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
return conditionMatches
}
// // Checking the dropping conditions
// for _, m := range r.config.DropMetricsIf {
// conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
// if err != nil {
// cclog.ComponentError("MetricRouter", err.Error())
// conditionMatches = false
// }
// if conditionMatches {
// return conditionMatches
// }
// }
// Checking the dropping conditions
for _, m := range r.config.DropMetricsIf {
conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
if err != nil {
cclog.ComponentError("MetricRouter", err.Error())
conditionMatches = false
}
if conditionMatches {
return conditionMatches
}
}
// // No dropping condition met
// return false
// }
// No dropping condition met
return false
}
// func (r *metricRouter) prepareUnit(point lp.CCMessage) bool {
// if r.config.NormalizeUnits {
// if in_unit, ok := point.GetMeta("unit"); ok {
// u := units.NewUnit(in_unit)
// if u.Valid() {
// point.AddMeta("unit", u.Short())
// }
// }
// }
// if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
func (r *metricRouter) prepareUnit(point lp.CCMetric) bool {
if r.config.NormalizeUnits {
if in_unit, ok := point.GetMeta("unit"); ok {
u := units.NewUnit(in_unit)
if u.Valid() {
point.AddMeta("unit", u.Short())
}
}
}
if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
// newPrefix := units.NewPrefix(newP)
newPrefix := units.NewPrefix(newP)
// if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
// u := units.NewUnit(in_unit)
// if u.Valid() {
// cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
// conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
// if conv != nil && out_unit.Valid() {
// if val, ok := point.GetField("value"); ok {
// point.AddField("value", conv(val))
// point.AddMeta("unit", out_unit.Short())
// }
// }
// }
if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
u := units.NewUnit(in_unit)
if u.Valid() {
cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
if conv != nil && out_unit.Valid() {
if val, ok := point.GetField("value"); ok {
point.AddField("value", conv(val))
point.AddMeta("unit", out_unit.Short())
}
}
}
// }
// }
}
}
// return true
// }
return true
}
// Start starts the metric router
func (r *metricRouter) Start() {
@@ -304,75 +259,59 @@ func (r *metricRouter) Start() {
// Forward takes a received metric, adds or deletes tags
// and forwards it to the output channels
// forward := func(point lp.CCMessage) {
// cclog.ComponentDebug("MetricRouter", "FORWARD", point)
// r.DoAddTags(point)
// r.DoDelTags(point)
// name := point.Name()
// if new, ok := r.config.RenameMetrics[name]; ok {
// point.SetName(new)
// point.AddMeta("oldname", name)
// r.DoAddTags(point)
// r.DoDelTags(point)
// }
forward := func(point lp.CCMetric) {
cclog.ComponentDebug("MetricRouter", "FORWARD", point)
r.DoAddTags(point)
r.DoDelTags(point)
name := point.Name()
if new, ok := r.config.RenameMetrics[name]; ok {
point.SetName(new)
point.AddMeta("oldname", name)
r.DoAddTags(point)
r.DoDelTags(point)
}
// r.prepareUnit(point)
r.prepareUnit(point)
// for _, o := range r.outputs {
// o <- point
// }
// }
for _, o := range r.outputs {
o <- point
}
}
// Foward message received from collector channel
coll_forward := func(p lp.CCMessage) {
coll_forward := func(p lp.CCMetric) {
// receive from metric collector
//p.AddTag(r.config.HostnameTagName, r.hostname)
p.AddTag(r.config.HostnameTagName, r.hostname)
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
m, err := r.mp.ProcessMessage(p)
if err == nil && m != nil {
for _, o := range r.outputs {
o <- m
}
if !r.dropMetric(p) {
forward(p)
}
// if !r.dropMetric(p) {
// for _, o := range r.outputs {
// o <- point
// }
// }
// even if the metric is dropped, it is stored in the cache for
// aggregations
if r.config.NumCacheIntervals > 0 {
r.cache.Add(m)
r.cache.Add(p)
}
}
// Forward message received from receivers channel
recv_forward := func(p lp.CCMessage) {
recv_forward := func(p lp.CCMetric) {
// receive from receive manager
if r.config.IntervalStamp {
p.SetTime(r.timestamp)
}
m, err := r.mp.ProcessMessage(p)
if err == nil && m != nil {
for _, o := range r.outputs {
o <- m
}
if !r.dropMetric(p) {
forward(p)
}
// if !r.dropMetric(p) {
// forward(p)
// }
}
// Forward message received from cache channel
cache_forward := func(p lp.CCMessage) {
cache_forward := func(p lp.CCMetric) {
// receive from metric collector
m, err := r.mp.ProcessMessage(p)
if err == nil && m != nil {
for _, o := range r.outputs {
o <- m
}
if !r.dropMetric(p) {
p.AddTag(r.config.HostnameTagName, r.hostname)
forward(p)
}
}
@@ -419,17 +358,17 @@ func (r *metricRouter) Start() {
}
// AddCollectorInput adds a channel between metric collector and metric router
func (r *metricRouter) AddCollectorInput(input chan lp.CCMessage) {
func (r *metricRouter) AddCollectorInput(input chan lp.CCMetric) {
r.coll_input = input
}
// AddReceiverInput adds a channel between metric receiver and metric router
func (r *metricRouter) AddReceiverInput(input chan lp.CCMessage) {
func (r *metricRouter) AddReceiverInput(input chan lp.CCMetric) {
r.recv_input = input
}
// AddOutput adds a output channel to the metric router
func (r *metricRouter) AddOutput(output chan lp.CCMessage) {
func (r *metricRouter) AddOutput(output chan lp.CCMetric) {
r.outputs = append(r.outputs, output)
}

View File

@@ -7,7 +7,6 @@ import (
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
write "github.com/influxdata/influxdb-client-go/v2/api/write"
lp "github.com/influxdata/line-protocol" // MIT license
"golang.org/x/exp/maps"
)
// Most functions are derived from github.com/influxdata/line-protocol/metric.go
@@ -65,11 +64,9 @@ func (m *ccMetric) String() string {
// ToLineProtocol generates influxDB line protocol for data type ccMetric
func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) {
p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)
for key, use_as_tag := range metaAsTags {
if use_as_tag {
if value, ok := m.GetMeta(key); ok {
p.AddTag(key, value)
}
for key, ok1 := range metaAsTags {
if val, ok2 := m.GetMeta(key); ok1 && ok2 {
p.AddTag(key, val)
}
}
return p
@@ -196,13 +193,19 @@ func New(
) (CCMetric, error) {
m := &ccMetric{
name: name,
tags: maps.Clone(tags),
meta: maps.Clone(meta),
tags: make(map[string]string, len(tags)),
meta: make(map[string]string, len(meta)),
fields: make(map[string]interface{}, len(fields)),
tm: tm,
}
// deep copy fields
// deep copy tags, meta data tags and fields
for k, v := range tags {
m.tags[k] = v
}
for k, v := range meta {
m.meta[k] = v
}
for k, v := range fields {
v := convertField(v)
if v == nil {
@@ -216,14 +219,28 @@ func New(
// FromMetric copies the metric <other>
func FromMetric(other CCMetric) CCMetric {
return &ccMetric{
otags := other.Tags()
ometa := other.Meta()
ofields := other.Fields()
m := &ccMetric{
name: other.Name(),
tags: maps.Clone(other.Tags()),
meta: maps.Clone(other.Meta()),
fields: maps.Clone(other.Fields()),
tags: make(map[string]string, len(otags)),
meta: make(map[string]string, len(ometa)),
fields: make(map[string]interface{}, len(ofields)),
tm: other.Time(),
}
// deep copy tags, meta data tags and fields
for key, value := range otags {
m.tags[key] = value
}
for key, value := range ometa {
m.meta[key] = value
}
for key, value := range ofields {
m.fields[key] = value
}
return m
}
// FromInfluxMetric copies the influxDB line protocol metric <other>
@@ -247,10 +264,8 @@ func FromInfluxMetric(other lp.Metric) CCMetric {
}
// convertField converts data types of fields by the following schemata:
//
// *float32, *float64, float32, float64 -> float64
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
//
// *float32, *float64, float32, float64 -> float64
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
// *[]byte, *string, []byte, string -> string
// *bool, bool -> bool

View File

@@ -2,6 +2,7 @@ package ccTopology
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
@@ -10,247 +11,93 @@ import (
"strings"
cclogger "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
"golang.org/x/exp/slices"
)
const SYSFS_NUMABASE = `/sys/devices/system/node`
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
const PROCFS_CPUINFO = `/proc/cpuinfo`
// Structure holding all information about a hardware thread
// See https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-devices-system-cpu
type HwthreadEntry struct {
// for each CPUx:
CpuID int // CPU / hardware thread ID
SMT int // Simultaneous Multithreading ID
CoreCPUsList []int // CPUs within the same core
Core int // Socket local core ID
Socket int // Sockets (physical) ID
Die int // Die ID
NumaDomain int // NUMA Domain
// intArrayContains scans an array of ints if the value str is present in the array
// If the specified value is found, the corresponding array index is returned.
// The bool value is used to signal success or failure
func intArrayContains(array []int, str int) (int, bool) {
for i, a := range array {
if a == str {
return i, true
}
}
return -1, false
}
var cache struct {
HwthreadList []int // List of CPU hardware threads
SMTList []int // List of symmetric hyper threading IDs
CoreList []int // List of CPU core IDs
SocketList []int // List of CPU sockets (physical) IDs
DieList []int // List of CPU Die IDs
NumaDomainList []int // List of NUMA Domains
CpuData []HwthreadEntry
}
// fileToInt reads an integer value from a sysfs file
// In case of an error -1 is returned
// Used internally for sysfs file reads
func fileToInt(path string) int {
buffer, err := os.ReadFile(path)
buffer, err := ioutil.ReadFile(path)
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "fileToInt", "Reading", path, ":", err.Error())
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
return -1
}
stringBuffer := strings.TrimSpace(string(buffer))
id, err := strconv.Atoi(stringBuffer)
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
var id int64
//_, err = fmt.Scanf("%d", sbuffer, &id)
id, err = strconv.ParseInt(sbuffer, 10, 32)
if err != nil {
cclogger.ComponentError("ccTopology", "fileToInt", "Parsing", path, ":", stringBuffer, err.Error())
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
return -1
}
return id
return int(id)
}
// fileToList reads a list from a sysfs file
// A list consists of value ranges separated by colon
// A range can be a single value or a range of values given by a startValue-endValue
// In case of an error nil is returned
func fileToList(path string) []int {
// Read list
buffer, err := os.ReadFile(path)
// Get list of CPU socket IDs
func SocketList() []int {
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
cclogger.ComponentError("ccTopology", "fileToList", "Reading", path, ":", err.Error())
return nil
}
// Create list
list := make([]int, 0)
stringBuffer := strings.TrimSpace(string(buffer))
for _, valueRangeString := range strings.Split(stringBuffer, ",") {
valueRange := strings.Split(valueRangeString, "-")
switch len(valueRange) {
case 1:
singleValue, err := strconv.Atoi(valueRange[0])
ll := strings.Split(string(buffer), "\n")
packs := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "physical id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
return nil
log.Print(err)
return packs
}
list = append(list, singleValue)
case 2:
startValue, err := strconv.Atoi(valueRange[0])
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
return nil
}
endValue, err := strconv.Atoi(valueRange[1])
if err != nil {
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[1], ":", err.Error())
return nil
}
for value := startValue; value <= endValue; value++ {
list = append(list, value)
_, found := intArrayContains(packs, int(id))
if !found {
packs = append(packs, int(id))
}
}
}
return list
return packs
}
// init initializes the cache structure
func init() {
getHWThreads :=
func() []int {
globPath := filepath.Join(SYSFS_CPUBASE, "cpu[0-9]*")
regexPath := filepath.Join(SYSFS_CPUBASE, "cpu([[:digit:]]+)")
regex := regexp.MustCompile(regexPath)
// File globbing for hardware threads
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "init:getHWThreads", err.Error())
return nil
}
hwThreadIDs := make([]int, len(files))
for i, file := range files {
// Extract hardware thread ID
matches := regex.FindStringSubmatch(file)
if len(matches) != 2 {
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to extract hardware thread ID from ", file)
return nil
}
// Convert hardware thread ID to int
id, err := strconv.Atoi(matches[1])
if err != nil {
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to convert to int hardware thread ID ", matches[1])
return nil
}
hwThreadIDs[i] = id
}
// Sort hardware thread IDs
slices.Sort(hwThreadIDs)
return hwThreadIDs
}
getNumaDomain :=
func(basePath string) int {
globPath := filepath.Join(basePath, "node*")
regexPath := filepath.Join(basePath, "node([[:digit:]]+)")
regex := regexp.MustCompile(regexPath)
// File globbing for NUMA node
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", err.Error())
return -1
}
// Check, that exactly one NUMA domain was found
if len(files) != 1 {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Number of NUMA domains != 1: ", len(files))
return -1
}
// Extract NUMA node ID
matches := regex.FindStringSubmatch(files[0])
if len(matches) != 2 {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to extract NUMA node ID from: ", files[0])
return -1
}
id, err := strconv.Atoi(matches[1])
if err != nil {
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to parse NUMA node ID from: ", matches[1])
return -1
}
return id
}
cache.HwthreadList = getHWThreads()
cache.CoreList = make([]int, len(cache.HwthreadList))
cache.SocketList = make([]int, len(cache.HwthreadList))
cache.DieList = make([]int, len(cache.HwthreadList))
cache.SMTList = make([]int, len(cache.HwthreadList))
cache.NumaDomainList = make([]int, len(cache.HwthreadList))
cache.CpuData = make([]HwthreadEntry, len(cache.HwthreadList))
for i, c := range cache.HwthreadList {
// Set cpuBase directory for topology lookup
cpuBase := filepath.Join(SYSFS_CPUBASE, fmt.Sprintf("cpu%d", c))
topoBase := filepath.Join(cpuBase, "topology")
// Lookup Core ID
cache.CoreList[i] = fileToInt(filepath.Join(topoBase, "core_id"))
// Lookup socket / physical package ID
cache.SocketList[i] = fileToInt(filepath.Join(topoBase, "physical_package_id"))
// Lookup CPU die id
cache.DieList[i] = fileToInt(filepath.Join(topoBase, "die_id"))
if cache.DieList[i] < 0 {
cache.DieList[i] = cache.SocketList[i]
}
// Lookup List of CPUs within the same core
coreCPUsList := fileToList(filepath.Join(topoBase, "core_cpus_list"))
// Find index of CPU ID in List of CPUs within the same core
// if not found return -1
cache.SMTList[i] = slices.Index(coreCPUsList, c)
// Lookup NUMA domain id
cache.NumaDomainList[i] = getNumaDomain(cpuBase)
cache.CpuData[i] =
HwthreadEntry{
CpuID: cache.HwthreadList[i],
SMT: cache.SMTList[i],
CoreCPUsList: coreCPUsList,
Socket: cache.SocketList[i],
NumaDomain: cache.NumaDomainList[i],
Die: cache.DieList[i],
Core: cache.CoreList[i],
}
}
slices.Sort(cache.HwthreadList)
cache.HwthreadList = slices.Compact(cache.HwthreadList)
slices.Sort(cache.SMTList)
cache.SMTList = slices.Compact(cache.SMTList)
slices.Sort(cache.CoreList)
cache.CoreList = slices.Compact(cache.CoreList)
slices.Sort(cache.SocketList)
cache.SocketList = slices.Compact(cache.SocketList)
slices.Sort(cache.DieList)
cache.DieList = slices.Compact(cache.DieList)
slices.Sort(cache.NumaDomainList)
cache.NumaDomainList = slices.Compact(cache.NumaDomainList)
}
// SocketList gets the list of CPU socket IDs
func SocketList() []int {
return slices.Clone(cache.SocketList)
}
// HwthreadList gets the list of hardware thread IDs in the order of listing in /proc/cpuinfo
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
func HwthreadList() []int {
return slices.Clone(cache.HwthreadList)
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
cpulist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[2], 10, 32)
if err != nil {
log.Print(err)
return cpulist
}
_, found := intArrayContains(cpulist, int(id))
if !found {
cpulist = append(cpulist, int(id))
}
}
}
return cpulist
}
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
@@ -259,25 +106,88 @@ func CpuList() []int {
return HwthreadList()
}
// CoreList gets the list of CPU core IDs in the order of listing in /proc/cpuinfo
// Get list of CPU core IDs in the order of listing in /proc/cpuinfo
func CoreList() []int {
return slices.Clone(cache.CoreList)
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
corelist := make([]int, 0)
for _, line := range ll {
if strings.HasPrefix(line, "core id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return corelist
}
_, found := intArrayContains(corelist, int(id))
if !found {
corelist = append(corelist, int(id))
}
}
}
return corelist
}
// Get list of NUMA node IDs
func NumaNodeList() []int {
return slices.Clone(cache.NumaDomainList)
numaList := make([]int, 0)
globPath := filepath.Join(string(SYSFS_NUMABASE), "node*")
regexPath := filepath.Join(string(SYSFS_NUMABASE), "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "NumaNodeList", err.Error())
}
for _, f := range files {
if !regex.MatchString(f) {
continue
}
finfo, err := os.Lstat(f)
if err != nil {
continue
}
if !finfo.IsDir() {
continue
}
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
if _, found := intArrayContains(numaList, id); !found {
numaList = append(numaList, id)
}
}
}
}
return numaList
}
// DieList gets the list of CPU die IDs
// Get list of CPU die IDs
func DieList() []int {
if len(cache.DieList) > 0 {
return slices.Clone(cache.DieList)
cpulist := HwthreadList()
dielist := make([]int, 0)
for _, c := range cpulist {
diepath := filepath.Join(string(SYSFS_CPUBASE), fmt.Sprintf("cpu%d", c), "topology/die_id")
dieid := fileToInt(diepath)
if dieid > 0 {
_, found := intArrayContains(dielist, int(dieid))
if !found {
dielist = append(dielist, int(dieid))
}
}
}
if len(dielist) > 0 {
return dielist
}
return SocketList()
}
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
// Get list of specified type using the naming format inside ClusterCockpit
func GetTypeList(topology_type string) []int {
switch topology_type {
case "node":
@@ -296,33 +206,128 @@ func GetTypeList(topology_type string) []int {
return []int{}
}
func GetTypeId(hwt HwthreadEntry, topology_type string) (int, error) {
var err error = nil
switch topology_type {
case "node":
return 0, err
case "socket":
return hwt.Socket, err
case "die":
return hwt.Die, err
case "memoryDomain":
return hwt.NumaDomain, err
case "core":
return hwt.Core, err
case "hwthread":
return hwt.CpuID, err
}
return -1, fmt.Errorf("unknown topology type '%s'", topology_type)
// Structure holding all information about a hardware thread
type HwthreadEntry struct {
Cpuid int
SMT int
Core int
Socket int
Numadomain int
Die int
}
// CpuData returns CPU data for each hardware thread
func CpuData() []HwthreadEntry {
// return a deep copy to protect cache data
c := slices.Clone(cache.CpuData)
for i := range c {
c[i].CoreCPUsList = slices.Clone(cache.CpuData[i].CoreCPUsList)
// fileToInt := func(path string) int {
// buffer, err := ioutil.ReadFile(path)
// if err != nil {
// log.Print(err)
// //cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
// return -1
// }
// sbuffer := strings.Replace(string(buffer), "\n", "", -1)
// var id int64
// //_, err = fmt.Scanf("%d", sbuffer, &id)
// id, err = strconv.ParseInt(sbuffer, 10, 32)
// if err != nil {
// cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
// return -1
// }
// return int(id)
// }
getCore := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/core_id", basepath))
}
return c
getSocket := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/physical_package_id", basepath))
}
getDie := func(basepath string) int {
return fileToInt(fmt.Sprintf("%s/die_id", basepath))
}
getSMT := func(cpuid int, basepath string) int {
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/thread_siblings_list", basepath))
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
}
threadlist := make([]int, 0)
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
for _, x := range strings.Split(sbuffer, ",") {
id, err := strconv.ParseInt(x, 10, 32)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
}
threadlist = append(threadlist, int(id))
}
for i, x := range threadlist {
if x == cpuid {
return i
}
}
return 1
}
getNumaDomain := func(basepath string) int {
globPath := filepath.Join(basepath, "node*")
regexPath := filepath.Join(basepath, "node(\\d+)")
regex := regexp.MustCompile(regexPath)
files, err := filepath.Glob(globPath)
if err != nil {
cclogger.ComponentError("CCTopology", "CpuData:getNumaDomain", err.Error())
}
for _, f := range files {
finfo, err := os.Lstat(f)
if err == nil && finfo.IsDir() {
matches := regex.FindStringSubmatch(f)
if len(matches) == 2 {
id, err := strconv.Atoi(matches[1])
if err == nil {
return id
}
}
}
}
return 0
}
clist := make([]HwthreadEntry, 0)
for _, c := range HwthreadList() {
clist = append(clist, HwthreadEntry{Cpuid: c})
}
for i, centry := range clist {
centry.Socket = -1
centry.Numadomain = -1
centry.Die = -1
centry.Core = -1
// Set base directory for topology lookup
cpustr := fmt.Sprintf("cpu%d", centry.Cpuid)
base := filepath.Join("/sys/devices/system/cpu", cpustr)
topoBase := filepath.Join(base, "topology")
// Lookup CPU core id
centry.Core = getCore(topoBase)
// Lookup CPU socket id
centry.Socket = getSocket(topoBase)
// Lookup CPU die id
centry.Die = getDie(topoBase)
if centry.Die < 0 {
centry.Die = centry.Socket
}
// Lookup SMT thread id
centry.SMT = getSMT(centry.Cpuid, topoBase)
// Lookup NUMA domain id
centry.Numadomain = getNumaDomain(base)
// Update values in output list
clist[i] = centry
}
return clist
}
// Structure holding basic information about a CPU
@@ -335,129 +340,130 @@ type CpuInformation struct {
NumNumaDomains int
}
// CpuInformation reports basic information about the CPU
// Get basic information about the CPU
func CpuInfo() CpuInformation {
return CpuInformation{
NumNumaDomains: len(cache.NumaDomainList),
SMTWidth: len(cache.SMTList),
NumDies: len(cache.DieList),
NumCores: len(cache.CoreList),
NumSockets: len(cache.SocketList),
NumHWthreads: len(cache.HwthreadList),
var c CpuInformation
smtList := make([]int, 0)
numaList := make([]int, 0)
dieList := make([]int, 0)
socketList := make([]int, 0)
coreList := make([]int, 0)
cdata := CpuData()
for _, d := range cdata {
if _, ok := intArrayContains(smtList, d.SMT); !ok {
smtList = append(smtList, d.SMT)
}
if _, ok := intArrayContains(numaList, d.Numadomain); !ok {
numaList = append(numaList, d.Numadomain)
}
if _, ok := intArrayContains(dieList, d.Die); !ok {
dieList = append(dieList, d.Die)
}
if _, ok := intArrayContains(socketList, d.Socket); !ok {
socketList = append(socketList, d.Socket)
}
if _, ok := intArrayContains(coreList, d.Core); !ok {
coreList = append(coreList, d.Core)
}
}
c.NumNumaDomains = len(numaList)
c.SMTWidth = len(smtList)
c.NumDies = len(dieList)
c.NumCores = len(coreList)
c.NumSockets = len(socketList)
c.NumHWthreads = len(cdata)
return c
}
// GetHwthreadSocket gets the CPU socket ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadSocket(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
// Get the CPU socket ID for a given hardware thread ID
func GetHwthreadSocket(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Socket
}
}
return -1
}
// GetHwthreadNumaDomain gets the NUMA domain ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadNumaDomain(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
return d.NumaDomain
// Get the NUMA node ID for a given hardware thread ID
func GetHwthreadNumaDomain(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Numadomain
}
}
return -1
}
// GetHwthreadDie gets the CPU die ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadDie(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
// Get the CPU die ID for a given hardware thread ID
func GetHwthreadDie(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Die
}
}
return -1
}
// GetHwthreadCore gets the CPU core ID for a given hardware thread ID
// In case hardware thread ID is not found -1 is returned
func GetHwthreadCore(cpuID int) int {
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.CpuID == cpuID {
// Get the CPU core ID for a given hardware thread ID
func GetHwthreadCore(cpuid int) int {
cdata := CpuData()
for _, d := range cdata {
if d.Cpuid == cpuid {
return d.Core
}
}
return -1
}
// GetSocketHwthreads gets all hardware thread IDs associated with a CPU socket
// Get the all hardware thread ID associated with a CPU socket
func GetSocketHwthreads(socket int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Socket == socket {
cpuList = append(cpuList, d.CpuID)
cpulist = append(cpulist, d.Cpuid)
}
}
return cpuList
return cpulist
}
// GetNumaDomainHwthreads gets the all hardware thread IDs associated with a NUMA domain
func GetNumaDomainHwthreads(numaDomain int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
if d.NumaDomain == numaDomain {
cpuList = append(cpuList, d.CpuID)
// Get the all hardware thread ID associated with a NUMA node
func GetNumaDomainHwthreads(domain int) []int {
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Numadomain == domain {
cpulist = append(cpulist, d.Cpuid)
}
}
return cpuList
return cpulist
}
// GetDieHwthreads gets all hardware thread IDs associated with a CPU die
// Get the all hardware thread ID associated with a CPU die
func GetDieHwthreads(die int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Die == die {
cpuList = append(cpuList, d.CpuID)
cpulist = append(cpulist, d.Cpuid)
}
}
return cpuList
return cpulist
}
// GetCoreHwthreads get all hardware thread IDs associated with a CPU core
// Get the all hardware thread ID associated with a CPU core
func GetCoreHwthreads(core int) []int {
cpuList := make([]int, 0)
for i := range cache.CpuData {
d := &cache.CpuData[i]
all := CpuData()
cpulist := make([]int, 0)
for _, d := range all {
if d.Core == core {
cpuList = append(cpuList, d.CpuID)
cpulist = append(cpulist, d.Cpuid)
}
}
return cpuList
}
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
func GetTypeHwthreads(topology_type string, id int) []int {
switch topology_type {
case "node":
return HwthreadList()
case "socket":
return GetSocketHwthreads(id)
case "die":
return GetDieHwthreads(id)
case "memoryDomain":
return GetNumaDomainHwthreads(id)
case "core":
return GetCoreHwthreads(id)
case "hwthread":
return []int{id}
}
return []int{}
return cpulist
}

View File

@@ -1,125 +0,0 @@
package hostlist
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
)
func Expand(in string) (result []string, err error) {
// Create ranges regular expression
reStNumber := "[[:digit:]]+"
reStRange := reStNumber + "-" + reStNumber
reStOptionalNumberOrRange := "(" + reStNumber + ",|" + reStRange + ",)*"
reStNumberOrRange := "(" + reStNumber + "|" + reStRange + ")"
reStBraceLeft := "[[]"
reStBraceRight := "[]]"
reStRanges := reStBraceLeft +
reStOptionalNumberOrRange +
reStNumberOrRange +
reStBraceRight
reRanges := regexp.MustCompile(reStRanges)
// Create host list regular expression
reStDNSChars := "[a-zA-Z0-9-]+"
reStPrefix := "^(" + reStDNSChars + ")"
reStOptionalSuffix := "(" + reStDNSChars + ")?"
re := regexp.MustCompile(reStPrefix + "([[][0-9,-]+[]])?" + reStOptionalSuffix)
// Remove all delimiters from the input
in = strings.TrimLeft(in, ", ")
for len(in) > 0 {
if v := re.FindStringSubmatch(in); v != nil {
// Remove matched part from the input
lenPrefix := len(v[0])
in = in[lenPrefix:]
// Remove all delimiters from the input
in = strings.TrimLeft(in, ", ")
// matched prefix, range and suffix
hlPrefix := v[1]
hlRanges := v[2]
hlSuffix := v[3]
// Single node without ranges
if hlRanges == "" {
result = append(result, hlPrefix)
continue
}
// Node with ranges
if v := reRanges.FindStringSubmatch(hlRanges); v != nil {
// Remove braces
hlRanges = hlRanges[1 : len(hlRanges)-1]
// Split host ranges at ,
for _, hlRange := range strings.Split(hlRanges, ",") {
// Split host range at -
RangeStartEnd := strings.Split(hlRange, "-")
// Range is only a single number
if len(RangeStartEnd) == 1 {
result = append(result, hlPrefix+RangeStartEnd[0]+hlSuffix)
continue
}
// Range has a start and an end
widthRangeStart := len(RangeStartEnd[0])
widthRangeEnd := len(RangeStartEnd[1])
iStart, _ := strconv.ParseUint(RangeStartEnd[0], 10, 64)
iEnd, _ := strconv.ParseUint(RangeStartEnd[1], 10, 64)
if iStart > iEnd {
return nil, fmt.Errorf("single range start is greater than end: %s", hlRange)
}
// Create print format string for range numbers
doPadding := widthRangeStart == widthRangeEnd
widthPadding := widthRangeStart
var formatString string
if doPadding {
formatString = "%0" + fmt.Sprint(widthPadding) + "d"
} else {
formatString = "%d"
}
formatString = hlPrefix + formatString + hlSuffix
// Add nodes from this range
for i := iStart; i <= iEnd; i++ {
result = append(result, fmt.Sprintf(formatString, i))
}
}
} else {
return nil, fmt.Errorf("not at hostlist range: %s", hlRanges)
}
} else {
return nil, fmt.Errorf("not a hostlist: %s", in)
}
}
if result != nil {
// sort
sort.Strings(result)
// uniq
previous := 1
for current := 1; current < len(result); current++ {
if result[current-1] != result[current] {
if previous != current {
result[previous] = result[current]
}
previous++
}
}
result = result[:previous]
}
return
}

View File

@@ -1,126 +0,0 @@
package hostlist
import (
"testing"
)
func TestExpand(t *testing.T) {
// Compare two slices of strings
equal := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
type testDefinition struct {
input string
resultExpected []string
errorExpected bool
}
expandTests := []testDefinition{
{
// Single node
input: "n1",
resultExpected: []string{"n1"},
errorExpected: false,
},
{
// Single node, duplicated
input: "n1,n1",
resultExpected: []string{"n1"},
errorExpected: false,
},
{
// Single node with padding
input: "n[01]",
resultExpected: []string{"n01"},
errorExpected: false,
},
{
// Single node with suffix
input: "n[01]-p",
resultExpected: []string{"n01-p"},
errorExpected: false,
},
{
// Multiple nodes with a single range
input: "n[1-2]",
resultExpected: []string{"n1", "n2"},
errorExpected: false,
},
{
// Multiple nodes with a single range and a single index
input: "n[1-2,3]",
resultExpected: []string{"n1", "n2", "n3"},
errorExpected: false,
},
{
// Multiple nodes with different prefixes
input: "n[1-2],m[1,2]",
resultExpected: []string{"m1", "m2", "n1", "n2"},
errorExpected: false,
},
{
// Multiple nodes with different suffixes
input: "n[1-2]-p,n[1,2]-q",
resultExpected: []string{"n1-p", "n1-q", "n2-p", "n2-q"},
errorExpected: false,
},
{
// Multiple nodes with and without node ranges
input: " n09, n[01-04,06-07,09] , , n10,n04",
resultExpected: []string{"n01", "n02", "n03", "n04", "n06", "n07", "n09", "n10"},
errorExpected: false,
},
{
// Forbidden DNS character
input: "n@",
resultExpected: []string{},
errorExpected: true,
},
{
// Forbidden range
input: "n[1-2-2,3]",
resultExpected: []string{},
errorExpected: true,
},
{
// Forbidden range limits
input: "n[2-1]",
resultExpected: []string{},
errorExpected: true,
},
}
for _, expandTest := range expandTests {
result, err := Expand(expandTest.input)
hasError := err != nil
if hasError != expandTest.errorExpected && hasError {
t.Errorf("Expand('%s') failed: unexpected error '%v'",
expandTest.input, err)
continue
}
if hasError != expandTest.errorExpected && !hasError {
t.Errorf("Expand('%s') did not fail as expected: got result '%+v'",
expandTest.input, result)
continue
}
if !hasError && !equal(result, expandTest.resultExpected) {
t.Errorf("Expand('%s') failed: got result '%+v', expected result '%v'",
expandTest.input, result, expandTest.resultExpected)
continue
}
t.Logf("Checked hostlist.Expand('%s'): result = '%+v', err = '%v'",
expandTest.input, result, err)
}
}

View File

@@ -1,266 +0,0 @@
# Message Processor Component
Multiple parts of in the ClusterCockit ecosystem require the processing of CCMessages.
The main CC application using it is `cc-metric-collector`. The processing part there was originally in the metric router, the central
hub connecting collectors (reading local data), receivers (receiving remote data) and sinks (sending data). Already in early stages, the
lack of flexibility caused some trouble:
> The sysadmins wanted to keep operating their Ganglia based monitoring infrastructure while we developed the CC stack. Ganglia wants the core metrics with
> a specific name and resolution (right unit prefix) but there was no conversion of the data in the CC stack, so CC frontend developers wanted a different
> resolution for some metrics. The issue was basically the `mem_used` metric showing the currently used memory of the node. Ganglia wants it in `kByte` as provided
> by the Linux operating system but CC wanted it in `GByte`.
With the message processor, the Ganglia sinks can apply the unit prefix changes individually and name the metrics as required by Ganglia.
## For developers
Whenever you receive or are about to send a message out, you should provide some processing.
### Configuration of component
New operations can be added to the message processor at runtime. Of course, they can also be removed again. For the initial setup, having a configuration file
or some fields in a configuration file for the processing.
The message processor uses the following configuration
```json
{
"drop_messages": [
"name_of_message_to_drop"
],
"drop_messages_if": [
"condition_when_to_drop_message",
"name == 'drop_this'",
"tag.hostname == 'this_host'",
"meta.unit != 'MB'"
],
"rename_messages" : {
"old_message_name" : "new_message_name"
},
"rename_messages_if": {
"condition_when_to_rename_message" : "new_name"
},
"add_tags_if": [
{
"if" : "condition_when_to_add_tag",
"key": "name_for_new_tag",
"value": "new_tag_value"
}
],
"delete_tags_if": [
{
"if" : "condition_when_to_delete_tag",
"key": "name_of_tag"
}
],
"add_meta_if": [
{
"if" : "condition_when_to_add_meta_info",
"key": "name_for_new_meta_info",
"value": "new_meta_info_value"
}
],
"delete_meta_if": [
{
"if" : "condition_when_to_delete_meta_info",
"key": "name_of_meta_info"
}
],
"add_field_if": [
{
"if" : "condition_when_to_add_field",
"key": "name_for_new_field",
"value": "new_field_value_but_only_string_at_the_moment"
}
],
"delete_field_if": [
{
"if" : "condition_when_to_delete_field",
"key": "name_of_field"
}
],
"move_tag_to_meta_if": [
{
"if" : "condition_when_to_move_tag_to_meta_info_including_its_value",
"key": "name_of_tag",
"value": "name_of_meta_info"
}
],
"move_tag_to_field_if": [
{
"if" : "condition_when_to_move_tag_to_fields_including_its_value",
"key": "name_of_tag",
"value": "name_of_field"
}
],
"move_meta_to_tag_if": [
{
"if" : "condition_when_to_move_meta_info_to_tags_including_its_value",
"key": "name_of_meta_info",
"value": "name_of_tag"
}
],
"move_meta_to_field_if": [
{
"if" : "condition_when_to_move_meta_info_to_fields_including_its_value",
"key": "name_of_tag",
"value": "name_of_meta_info"
}
],
"move_field_to_tag_if": [
{
"if" : "condition_when_to_move_field_to_tags_including_its_stringified_value",
"key": "name_of_field",
"value": "name_of_tag"
}
],
"move_field_to_meta_if": [
{
"if" : "condition_when_to_move_field_to_meta_info_including_its_stringified_value",
"key": "name_of_field",
"value": "name_of_meta_info"
}
],
"drop_by_message_type": [
"metric",
"event",
"log",
"control"
],
"change_unit_prefix": {
"name == 'metric_with_wrong_unit_prefix'" : "G",
"only_if_messagetype == 'metric'": "T"
},
"normalize_units": true,
"add_base_env": {
"MY_CONSTANT_FOR_CUSTOM_CONDITIONS": 1.0,
"output_value_for_test_metrics": 42.0,
},
"stage_order": [
"rename_messages_if",
"drop_messages"
]
}
```
The options `change_unit_prefix` and `normalize_units` are only applied to CCMetrics. It is not possible to delete the field related to each message type as defined in [cc-specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/interfaces/lineprotocol). In short:
- CCMetrics always have to have a field named `value`
- CCEvents always have to have a field named `event`
- CCLogs always have to have a field named `log`
- CCControl messages always have to have a field named `control`
With `add_base_env`, one can specifiy mykey=myvalue pairs that can be used in conditions like `tag.type == mykey`.
The order in which each message is processed, can be specified with the `stage_order` option. The stage names are the keys in the JSON configuration, thus `change_unit_prefix`, `move_field_to_meta_if`, etc. Stages can be listed multiple times.
### Using the component
In order to load the configuration from a `json.RawMessage`:
```golang
mp, err := NewMessageProcessor()
if err != nil {
log.Error("failed to create new message processor")
}
mp.FromConfigJSON(configJson)
```
After initialization and adding the different operations, the `ProcessMessage()` function applies all operations and returns whether the message should be dropped.
```golang
m := lp.CCMetric{}
x, err := mp.ProcessMessage(m)
if err != nil {
// handle error
}
if x != nil {
// process x further
} else {
// this message got dropped
}
```
Single operations can be added and removed at runtime
```golang
type MessageProcessor interface {
// Functions to set the execution order of the processing stages
SetStages([]string) error
DefaultStages() []string
// Function to add variables to the base evaluation environment
AddBaseEnv(env map[string]interface{}) error
// Functions to add and remove rules
AddDropMessagesByName(name string) error
RemoveDropMessagesByName(name string)
AddDropMessagesByCondition(condition string) error
RemoveDropMessagesByCondition(condition string)
AddRenameMetricByCondition(condition string, name string) error
RemoveRenameMetricByCondition(condition string)
AddRenameMetricByName(from, to string) error
RemoveRenameMetricByName(from string)
SetNormalizeUnits(settings bool)
AddChangeUnitPrefix(condition string, prefix string) error
RemoveChangeUnitPrefix(condition string)
AddAddTagsByCondition(condition, key, value string) error
RemoveAddTagsByCondition(condition string)
AddDeleteTagsByCondition(condition, key, value string) error
RemoveDeleteTagsByCondition(condition string)
AddAddMetaByCondition(condition, key, value string) error
RemoveAddMetaByCondition(condition string)
AddDeleteMetaByCondition(condition, key, value string) error
RemoveDeleteMetaByCondition(condition string)
AddMoveTagToMeta(condition, key, value string) error
RemoveMoveTagToMeta(condition string)
AddMoveTagToFields(condition, key, value string) error
RemoveMoveTagToFields(condition string)
AddMoveMetaToTags(condition, key, value string) error
RemoveMoveMetaToTags(condition string)
AddMoveMetaToFields(condition, key, value string) error
RemoveMoveMetaToFields(condition string)
AddMoveFieldToTags(condition, key, value string) error
RemoveMoveFieldToTags(condition string)
AddMoveFieldToMeta(condition, key, value string) error
RemoveMoveFieldToMeta(condition string)
// Read in a JSON configuration
FromConfigJSON(config json.RawMessage) error
ProcessMessage(m lp2.CCMessage) (lp2.CCMessage, error)
// Processing functions for legacy CCMetric and current CCMessage
ProcessMetric(m lp.CCMetric) (lp2.CCMessage, error)
}
```
### Syntax for evaluatable terms
The message processor uses `gval` for evaluating the terms. It provides a basic set of operators like string comparison and arithmetic operations.
Accessible for operations are
- `name` of the message
- `timestamp` or `time` of the message
- `type`, `type-id` of the message (also `tag_type`, `tag_type-id` and `tag_typeid`)
- `stype`, `stype-id` of the message (if message has theses tags, also `tag_stype`, `tag_stype-id` and `tag_stypeid`)
- `value` for a CCMetric message (also `field_value`)
- `event` for a CCEvent message (also `field_event`)
- `control` for a CCControl message (also `field_control`)
- `log` for a CCLog message (also `field_log`)
- `messagetype` or `msgtype`. Possible values `event`, `metric`, `log` and `control`.
Generally, all tags are accessible with `tag_<tagkey>`, `tags_<tagkey>` or `tags.<tagkey>`. Similarly for all fields with `field[s]?[_.]<fieldkey>`. For meta information `meta[_.]<metakey>` (there is no `metas[_.]<metakey>`).
The [syntax of `expr`](https://expr-lang.org/docs/language-definition) is accepted with some additions:
- Comparing strings: `==`, `!=`, `str matches regex` (use `%` instead of `\`!)
- Combining conditions: `&&`, `||`
- Comparing numbers: `==`, `!=`, `<`, `>`, `<=`, `>=`
- Test lists: `<value> in <list>`
- Topological tests: `tag_type-id in getCpuListOfType("socket", "1")` (test if the metric belongs to socket 1 in local node topology)
Often the operations are written in JSON files for loading them at startup. In JSON, some characters are not allowed. Therefore, the term syntax reflects that:
- use `''` instead of `""` for strings
- for the regexes, use `%` instead of `\`
For operations that should be applied on all messages, use the condition `true`.
### Overhead
The operations taking conditions are pre-processed, which is commonly the time consuming part but, of course, with each added operation, the time to process a message
increases. Moreover, the processing creates a copy of the message.

View File

@@ -1,988 +0,0 @@
package messageprocessor
import (
"encoding/json"
"fmt"
"strings"
"sync"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lplegacy "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/expr-lang/expr"
"github.com/expr-lang/expr/vm"
)
// Message processor add/delete tag/meta configuration
type messageProcessorTagConfig struct {
Key string `json:"key"` // Tag name
Value string `json:"value,omitempty"` // Tag value
Condition string `json:"if"` // Condition for adding or removing corresponding tag
}
type messageProcessorConfig struct {
StageOrder []string `json:"stage_order,omitempty"` // List of stages to execute them in the specified order and to skip unrequired ones
DropMessages []string `json:"drop_messages,omitempty"` // List of metric names to drop. For fine-grained dropping use drop_messages_if
DropMessagesIf []string `json:"drop_messages_if,omitempty"` // List of evaluatable terms to drop messages
RenameMessages map[string]string `json:"rename_messages,omitempty"` // Map of metric names to rename
RenameMessagesIf map[string]string `json:"rename_messages_if,omitempty"` // Map to rename metric name based on a condition
NormalizeUnits bool `json:"normalize_units,omitempty"` // Check unit meta flag and normalize it using cc-units
ChangeUnitPrefix map[string]string `json:"change_unit_prefix,omitempty"` // Add prefix that should be applied to the messages
AddTagsIf []messageProcessorTagConfig `json:"add_tags_if"` // List of tags that are added when the condition is met
DelTagsIf []messageProcessorTagConfig `json:"delete_tags_if"` // List of tags that are removed when the condition is met
AddMetaIf []messageProcessorTagConfig `json:"add_meta_if"` // List of meta infos that are added when the condition is met
DelMetaIf []messageProcessorTagConfig `json:"delete_meta_if"` // List of meta infos that are removed when the condition is met
AddFieldIf []messageProcessorTagConfig `json:"add_field_if"` // List of fields that are added when the condition is met
DelFieldIf []messageProcessorTagConfig `json:"delete_field_if"` // List of fields that are removed when the condition is met
DropByType []string `json:"drop_by_message_type"` // List of message types that should be dropped
MoveTagToMeta []messageProcessorTagConfig `json:"move_tag_to_meta_if"`
MoveTagToField []messageProcessorTagConfig `json:"move_tag_to_field_if"`
MoveMetaToTag []messageProcessorTagConfig `json:"move_meta_to_tag_if"`
MoveMetaToField []messageProcessorTagConfig `json:"move_meta_to_field_if"`
MoveFieldToTag []messageProcessorTagConfig `json:"move_field_to_tag_if"`
MoveFieldToMeta []messageProcessorTagConfig `json:"move_field_to_meta_if"`
AddBaseEnv map[string]interface{} `json:"add_base_env"`
}
type messageProcessor struct {
// For thread-safety
mutex sync.RWMutex
// mapping contains all evalables as strings to gval.Evaluable
// because it is not possible to get the original string out of
// a gval.Evaluable
mapping map[string]*vm.Program
stages []string // order of stage execution
dropMessages map[string]struct{} // internal lookup map
dropTypes map[string]struct{} // internal lookup map
dropMessagesIf map[*vm.Program]struct{} // pre-processed dropMessagesIf
renameMessages map[string]string // internal lookup map
renameMessagesIf map[*vm.Program]string // pre-processed RenameMessagesIf
changeUnitPrefix map[*vm.Program]string // pre-processed ChangeUnitPrefix
normalizeUnits bool
addTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddTagsIf
deleteTagsIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelTagsIf
addMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddMetaIf
deleteMetaIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelMetaIf
addFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed AddFieldIf
deleteFieldIf map[*vm.Program]messageProcessorTagConfig // pre-processed DelFieldIf
moveTagToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToMeta
moveTagToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveTagToField
moveMetaToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToTag
moveMetaToField map[*vm.Program]messageProcessorTagConfig // pre-processed MoveMetaToField
moveFieldToTag map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToTag
moveFieldToMeta map[*vm.Program]messageProcessorTagConfig // pre-processed MoveFieldToMeta
}
type MessageProcessor interface {
// Functions to set the execution order of the processing stages
SetStages([]string) error
DefaultStages() []string
// Function to add variables to the base evaluation environment
AddBaseEnv(env map[string]interface{}) error
// Functions to add and remove rules
AddDropMessagesByName(name string) error
RemoveDropMessagesByName(name string)
AddDropMessagesByCondition(condition string) error
RemoveDropMessagesByCondition(condition string)
AddRenameMetricByCondition(condition string, name string) error
RemoveRenameMetricByCondition(condition string)
AddRenameMetricByName(from, to string) error
RemoveRenameMetricByName(from string)
SetNormalizeUnits(settings bool)
AddChangeUnitPrefix(condition string, prefix string) error
RemoveChangeUnitPrefix(condition string)
AddAddTagsByCondition(condition, key, value string) error
RemoveAddTagsByCondition(condition string)
AddDeleteTagsByCondition(condition, key, value string) error
RemoveDeleteTagsByCondition(condition string)
AddAddMetaByCondition(condition, key, value string) error
RemoveAddMetaByCondition(condition string)
AddDeleteMetaByCondition(condition, key, value string) error
RemoveDeleteMetaByCondition(condition string)
AddMoveTagToMeta(condition, key, value string) error
RemoveMoveTagToMeta(condition string)
AddMoveTagToFields(condition, key, value string) error
RemoveMoveTagToFields(condition string)
AddMoveMetaToTags(condition, key, value string) error
RemoveMoveMetaToTags(condition string)
AddMoveMetaToFields(condition, key, value string) error
RemoveMoveMetaToFields(condition string)
AddMoveFieldToTags(condition, key, value string) error
RemoveMoveFieldToTags(condition string)
AddMoveFieldToMeta(condition, key, value string) error
RemoveMoveFieldToMeta(condition string)
// Read in a JSON configuration
FromConfigJSON(config json.RawMessage) error
// Processing functions for legacy CCMetric and current CCMessage
ProcessMetric(m lplegacy.CCMetric) (lp.CCMessage, error)
ProcessMessage(m lp.CCMessage) (lp.CCMessage, error)
//EvalToBool(condition string, parameters map[string]interface{}) (bool, error)
//EvalToFloat64(condition string, parameters map[string]interface{}) (float64, error)
//EvalToString(condition string, parameters map[string]interface{}) (string, error)
}
const (
STAGENAME_DROP_BY_NAME string = "drop_by_name"
STAGENAME_DROP_BY_TYPE string = "drop_by_type"
STAGENAME_DROP_IF string = "drop_if"
STAGENAME_ADD_TAG string = "add_tag"
STAGENAME_DELETE_TAG string = "delete_tag"
STAGENAME_MOVE_TAG_META string = "move_tag_to_meta"
STAGENAME_MOVE_TAG_FIELD string = "move_tag_to_fields"
STAGENAME_ADD_META string = "add_meta"
STAGENAME_DELETE_META string = "delete_meta"
STAGENAME_MOVE_META_TAG string = "move_meta_to_tags"
STAGENAME_MOVE_META_FIELD string = "move_meta_to_fields"
STAGENAME_ADD_FIELD string = "add_field"
STAGENAME_DELETE_FIELD string = "delete_field"
STAGENAME_MOVE_FIELD_TAG string = "move_field_to_tags"
STAGENAME_MOVE_FIELD_META string = "move_field_to_meta"
STAGENAME_RENAME_BY_NAME string = "rename"
STAGENAME_RENAME_IF string = "rename_if"
STAGENAME_CHANGE_UNIT_PREFIX string = "change_unit_prefix"
STAGENAME_NORMALIZE_UNIT string = "normalize_unit"
)
var StageNames = []string{
STAGENAME_DROP_BY_NAME,
STAGENAME_DROP_BY_TYPE,
STAGENAME_DROP_IF,
STAGENAME_ADD_TAG,
STAGENAME_DELETE_TAG,
STAGENAME_MOVE_TAG_META,
STAGENAME_MOVE_TAG_FIELD,
STAGENAME_ADD_META,
STAGENAME_DELETE_META,
STAGENAME_MOVE_META_TAG,
STAGENAME_MOVE_META_FIELD,
STAGENAME_ADD_FIELD,
STAGENAME_DELETE_FIELD,
STAGENAME_MOVE_FIELD_TAG,
STAGENAME_MOVE_FIELD_META,
STAGENAME_RENAME_BY_NAME,
STAGENAME_RENAME_IF,
STAGENAME_CHANGE_UNIT_PREFIX,
STAGENAME_NORMALIZE_UNIT,
}
var paramMapPool = sync.Pool{
New: func() any {
return make(map[string]interface{})
},
}
func sanitizeExprString(key string) string {
return strings.ReplaceAll(key, "type-id", "typeid")
}
func getParamMap(point lp.CCMetric) map[string]interface{} {
params := paramMapPool.Get().(map[string]interface{})
params["message"] = point
params["msg"] = point
params["name"] = point.Name()
params["timestamp"] = point.Time().Unix()
params["time"] = params["timestamp"]
fields := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Fields() {
fields[key] = value
switch key {
case "value":
params["messagetype"] = "metric"
params["value"] = value
params["metric"] = value
case "event":
params["messagetype"] = "event"
params["event"] = value
case "control":
params["messagetype"] = "control"
params["control"] = value
case "log":
params["messagetype"] = "log"
params["log"] = value
default:
params["messagetype"] = "unknown"
}
}
params["msgtype"] = params["messagetype"]
params["fields"] = fields
params["field"] = fields
tags := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Tags() {
tags[sanitizeExprString(key)] = value
}
params["tags"] = tags
params["tag"] = tags
meta := paramMapPool.Get().(map[string]interface{})
for key, value := range point.Meta() {
meta[sanitizeExprString(key)] = value
}
params["meta"] = meta
return params
}
var baseenv = map[string]interface{}{
"name": "",
"messagetype": "unknown",
"msgtype": "unknown",
"tag": map[string]interface{}{
"type": "unknown",
"typeid": "0",
"stype": "unknown",
"stypeid": "0",
"hostname": "localhost",
"cluster": "nocluster",
},
"tags": map[string]interface{}{
"type": "unknown",
"typeid": "0",
"stype": "unknown",
"stypeid": "0",
"hostname": "localhost",
"cluster": "nocluster",
},
"meta": map[string]interface{}{
"unit": "invalid",
"source": "unknown",
},
"fields": map[string]interface{}{
"value": 0,
"event": "",
"control": "",
"log": "",
},
"field": map[string]interface{}{
"value": 0,
"event": "",
"control": "",
"log": "",
},
"timestamp": 1234567890,
"msg": lp.EmptyMessage(),
"message": lp.EmptyMessage(),
}
func addBaseEnvWalker(values map[string]interface{}) map[string]interface{} {
out := make(map[string]interface{})
for k, v := range values {
switch value := v.(type) {
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
out[k] = value
case map[string]interface{}:
if _, ok := baseenv[k]; !ok {
out[k] = addBaseEnvWalker(value)
}
}
}
return out
}
func (mp *messageProcessor) AddBaseEnv(env map[string]interface{}) error {
for k, v := range env {
switch value := v.(type) {
case int, int32, int64, uint, uint32, uint64, string, float32, float64:
baseenv[k] = value
case map[string]interface{}:
if _, ok := baseenv[k]; !ok {
baseenv[k] = addBaseEnvWalker(value)
}
}
}
return nil
}
func (mp *messageProcessor) init() error {
mp.stages = make([]string, 0)
mp.mapping = make(map[string]*vm.Program)
mp.dropMessages = make(map[string]struct{})
mp.dropTypes = make(map[string]struct{})
mp.dropMessagesIf = make(map[*vm.Program]struct{})
mp.renameMessages = make(map[string]string)
mp.renameMessagesIf = make(map[*vm.Program]string)
mp.changeUnitPrefix = make(map[*vm.Program]string)
mp.addTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.addMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.addFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteTagsIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteMetaIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.deleteFieldIf = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveFieldToMeta = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveFieldToTag = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveMetaToField = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveMetaToTag = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveTagToField = make(map[*vm.Program]messageProcessorTagConfig)
mp.moveTagToMeta = make(map[*vm.Program]messageProcessorTagConfig)
mp.normalizeUnits = false
return nil
}
func (mp *messageProcessor) AddDropMessagesByName(name string) error {
mp.mutex.Lock()
if _, ok := mp.dropMessages[name]; !ok {
mp.dropMessages[name] = struct{}{}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByName(name string) {
mp.mutex.Lock()
delete(mp.dropMessages, name)
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddDropMessagesByType(typestring string) error {
valid := []string{"metric", "event", "control", "log"}
isValid := false
for _, t := range valid {
if t == typestring {
isValid = true
break
}
}
if isValid {
mp.mutex.Lock()
if _, ok := mp.dropTypes[typestring]; !ok {
cclog.ComponentDebug("MessageProcessor", "Adding type", typestring, "for dropping")
mp.dropTypes[typestring] = struct{}{}
}
mp.mutex.Unlock()
} else {
return fmt.Errorf("invalid message type %s", typestring)
}
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByType(typestring string) {
mp.mutex.Lock()
delete(mp.dropTypes, typestring)
mp.mutex.Unlock()
}
func (mp *messageProcessor) addTagConfig(condition, key, value string, config *map[*vm.Program]messageProcessorTagConfig) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := (*config)[evaluable]; !ok {
mp.mapping[condition] = evaluable
(*config)[evaluable] = messageProcessorTagConfig{
Condition: condition,
Key: key,
Value: value,
}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) removeTagConfig(condition string, config *map[*vm.Program]messageProcessorTagConfig) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(*config, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddAddTagsByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addTagsIf)
}
func (mp *messageProcessor) RemoveAddTagsByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addTagsIf)
}
func (mp *messageProcessor) AddDeleteTagsByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteTagsIf)
}
func (mp *messageProcessor) RemoveDeleteTagsByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteTagsIf)
}
func (mp *messageProcessor) AddAddMetaByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addMetaIf)
}
func (mp *messageProcessor) RemoveAddMetaByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addMetaIf)
}
func (mp *messageProcessor) AddDeleteMetaByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteMetaIf)
}
func (mp *messageProcessor) RemoveDeleteMetaByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteMetaIf)
}
func (mp *messageProcessor) AddAddFieldByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.addFieldIf)
}
func (mp *messageProcessor) RemoveAddFieldByCondition(condition string) {
mp.removeTagConfig(condition, &mp.addFieldIf)
}
func (mp *messageProcessor) AddDeleteFieldByCondition(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.deleteFieldIf)
}
func (mp *messageProcessor) RemoveDeleteFieldByCondition(condition string) {
mp.removeTagConfig(condition, &mp.deleteFieldIf)
}
func (mp *messageProcessor) AddDropMessagesByCondition(condition string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.dropMessagesIf[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.dropMessagesIf[evaluable] = struct{}{}
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveDropMessagesByCondition(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.dropMessagesIf, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddRenameMetricByCondition(condition string, name string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.renameMessagesIf[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.renameMessagesIf[evaluable] = name
} else {
mp.renameMessagesIf[evaluable] = name
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveRenameMetricByCondition(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.renameMessagesIf, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) SetNormalizeUnits(setting bool) {
mp.normalizeUnits = setting
}
func (mp *messageProcessor) AddChangeUnitPrefix(condition string, prefix string) error {
var err error
evaluable, err := expr.Compile(sanitizeExprString(condition), expr.Env(baseenv), expr.AsBool())
if err != nil {
return fmt.Errorf("failed to create condition evaluable of '%s': %v", condition, err.Error())
}
mp.mutex.Lock()
if _, ok := mp.changeUnitPrefix[evaluable]; !ok {
mp.mapping[condition] = evaluable
mp.changeUnitPrefix[evaluable] = prefix
} else {
mp.changeUnitPrefix[evaluable] = prefix
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveChangeUnitPrefix(condition string) {
mp.mutex.Lock()
if e, ok := mp.mapping[condition]; ok {
delete(mp.mapping, condition)
delete(mp.changeUnitPrefix, e)
}
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddRenameMetricByName(from, to string) error {
mp.mutex.Lock()
if _, ok := mp.renameMessages[from]; !ok {
mp.renameMessages[from] = to
}
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) RemoveRenameMetricByName(from string) {
mp.mutex.Lock()
delete(mp.renameMessages, from)
mp.mutex.Unlock()
}
func (mp *messageProcessor) AddMoveTagToMeta(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveTagToMeta)
}
func (mp *messageProcessor) RemoveMoveTagToMeta(condition string) {
mp.removeTagConfig(condition, &mp.moveTagToMeta)
}
func (mp *messageProcessor) AddMoveTagToFields(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveTagToField)
}
func (mp *messageProcessor) RemoveMoveTagToFields(condition string) {
mp.removeTagConfig(condition, &mp.moveTagToField)
}
func (mp *messageProcessor) AddMoveMetaToTags(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveMetaToTag)
}
func (mp *messageProcessor) RemoveMoveMetaToTags(condition string) {
mp.removeTagConfig(condition, &mp.moveMetaToTag)
}
func (mp *messageProcessor) AddMoveMetaToFields(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveMetaToField)
}
func (mp *messageProcessor) RemoveMoveMetaToFields(condition string) {
mp.removeTagConfig(condition, &mp.moveMetaToField)
}
func (mp *messageProcessor) AddMoveFieldToTags(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveFieldToTag)
}
func (mp *messageProcessor) RemoveMoveFieldToTags(condition string) {
mp.removeTagConfig(condition, &mp.moveFieldToTag)
}
func (mp *messageProcessor) AddMoveFieldToMeta(condition, key, value string) error {
return mp.addTagConfig(condition, key, value, &mp.moveFieldToMeta)
}
func (mp *messageProcessor) RemoveMoveFieldToMeta(condition string) {
mp.removeTagConfig(condition, &mp.moveFieldToMeta)
}
func (mp *messageProcessor) SetStages(stages []string) error {
newstages := make([]string, 0)
if len(stages) == 0 {
mp.mutex.Lock()
mp.stages = newstages
mp.mutex.Unlock()
return nil
}
for i, s := range stages {
valid := false
for _, v := range StageNames {
if s == v {
valid = true
}
}
if valid {
newstages = append(newstages, s)
} else {
return fmt.Errorf("invalid stage %s at index %d", s, i)
}
}
mp.mutex.Lock()
mp.stages = newstages
mp.mutex.Unlock()
return nil
}
func (mp *messageProcessor) DefaultStages() []string {
return StageNames
}
func (mp *messageProcessor) FromConfigJSON(config json.RawMessage) error {
var c messageProcessorConfig
err := json.Unmarshal(config, &c)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
if len(c.StageOrder) > 0 {
err = mp.SetStages(c.StageOrder)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
} else {
err = mp.SetStages(mp.DefaultStages())
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropMessages {
err = mp.AddDropMessagesByName(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropByType {
err = mp.AddDropMessagesByType(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropMessagesIf {
err = mp.AddDropMessagesByCondition(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.RenameMessagesIf {
err = mp.AddRenameMetricByCondition(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.RenameMessages {
err = mp.AddRenameMetricByName(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for k, v := range c.ChangeUnitPrefix {
err = mp.AddChangeUnitPrefix(k, v)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddTagsIf {
err = mp.AddAddTagsByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddMetaIf {
err = mp.AddAddMetaByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.AddFieldIf {
err = mp.AddAddFieldByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelTagsIf {
err = mp.AddDeleteTagsByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelMetaIf {
err = mp.AddDeleteMetaByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.DelFieldIf {
err = mp.AddDeleteFieldByCondition(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveTagToMeta {
err = mp.AddMoveTagToMeta(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveTagToField {
err = mp.AddMoveTagToFields(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveMetaToTag {
err = mp.AddMoveMetaToTags(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveMetaToField {
err = mp.AddMoveMetaToFields(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveFieldToTag {
err = mp.AddMoveFieldToTags(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, c := range c.MoveFieldToMeta {
err = mp.AddMoveFieldToMeta(c.Condition, c.Key, c.Value)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
for _, m := range c.DropByType {
err = mp.AddDropMessagesByType(m)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
if len(c.AddBaseEnv) > 0 {
err = mp.AddBaseEnv(c.AddBaseEnv)
if err != nil {
return fmt.Errorf("failed to process config JSON: %v", err.Error())
}
}
mp.SetNormalizeUnits(c.NormalizeUnits)
return nil
}
func (mp *messageProcessor) ProcessMetric(metric lplegacy.CCMetric) (lp.CCMessage, error) {
m, err := lp.NewMessage(
metric.Name(),
metric.Tags(),
metric.Meta(),
metric.Fields(),
metric.Time(),
)
if err != nil {
return m, fmt.Errorf("failed to parse metric to message: %v", err.Error())
}
return mp.ProcessMessage(m)
}
func (mp *messageProcessor) ProcessMessage(m lp.CCMessage) (lp.CCMessage, error) {
var err error = nil
var out lp.CCMessage = lp.FromMessage(m)
name := out.Name()
if len(mp.stages) == 0 {
mp.SetStages(mp.DefaultStages())
}
mp.mutex.RLock()
defer mp.mutex.RUnlock()
params := getParamMap(out)
defer func() {
params["field"] = nil
params["tag"] = nil
paramMapPool.Put(params["fields"])
paramMapPool.Put(params["tags"])
paramMapPool.Put(params["meta"])
paramMapPool.Put(params)
}()
for _, s := range mp.stages {
switch s {
case STAGENAME_DROP_BY_NAME:
if len(mp.dropMessages) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by message name ", name)
if _, ok := mp.dropMessages[name]; ok {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_DROP_BY_TYPE:
if len(mp.dropTypes) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by message type")
if _, ok := mp.dropTypes[params["messagetype"].(string)]; ok {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_DROP_IF:
if len(mp.dropMessagesIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Dropping by condition")
drop, err := dropMessagesIf(&params, &mp.dropMessagesIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if drop {
//cclog.ComponentDebug("MessageProcessor", "Drop")
return nil, nil
}
}
case STAGENAME_RENAME_BY_NAME:
if len(mp.renameMessages) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Renaming by name match")
if newname, ok := mp.renameMessages[name]; ok {
//cclog.ComponentDebug("MessageProcessor", "Rename to", newname)
out.SetName(newname)
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", name)
out.AddMeta("oldname", name)
}
}
case STAGENAME_RENAME_IF:
if len(mp.renameMessagesIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Renaming by condition")
_, err := renameMessagesIf(out, &params, &mp.renameMessagesIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_TAG:
if len(mp.addTagsIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding tags")
_, err = addTagIf(out, &params, &mp.addTagsIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_TAG:
if len(mp.deleteTagsIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete tags")
_, err = deleteTagIf(out, &params, &mp.deleteTagsIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_META:
if len(mp.addMetaIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding meta information")
_, err = addMetaIf(out, &params, &mp.addMetaIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_META:
if len(mp.deleteMetaIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete meta information")
_, err = deleteMetaIf(out, &params, &mp.deleteMetaIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_ADD_FIELD:
if len(mp.addFieldIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Adding fields")
_, err = addFieldIf(out, &params, &mp.addFieldIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_DELETE_FIELD:
if len(mp.deleteFieldIf) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Delete fields")
_, err = deleteFieldIf(out, &params, &mp.deleteFieldIf)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_TAG_META:
if len(mp.moveTagToMeta) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move tag to meta")
_, err := moveTagToMeta(out, &params, &mp.moveTagToMeta)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_TAG_FIELD:
if len(mp.moveTagToField) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move tag to fields")
_, err := moveTagToField(out, &params, &mp.moveTagToField)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_META_TAG:
if len(mp.moveMetaToTag) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move meta to tags")
_, err := moveMetaToTag(out, &params, &mp.moveMetaToTag)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_META_FIELD:
if len(mp.moveMetaToField) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move meta to fields")
_, err := moveMetaToField(out, &params, &mp.moveMetaToField)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_FIELD_META:
if len(mp.moveFieldToMeta) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move field to meta")
_, err := moveFieldToMeta(out, &params, &mp.moveFieldToMeta)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_MOVE_FIELD_TAG:
if len(mp.moveFieldToTag) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Move field to tags")
_, err := moveFieldToTag(out, &params, &mp.moveFieldToTag)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
}
case STAGENAME_NORMALIZE_UNIT:
if mp.normalizeUnits {
//cclog.ComponentDebug("MessageProcessor", "Normalize units")
if lp.IsMetric(out) {
_, err := normalizeUnits(out)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
} else {
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
}
}
case STAGENAME_CHANGE_UNIT_PREFIX:
if len(mp.changeUnitPrefix) > 0 {
//cclog.ComponentDebug("MessageProcessor", "Change unit prefix")
if lp.IsMetric(out) {
_, err := changeUnitPrefix(out, &params, &mp.changeUnitPrefix)
if err != nil {
return out, fmt.Errorf("failed to evaluate: %v", err.Error())
}
} else {
cclog.ComponentDebug("MessageProcessor", "skipped, no metric")
}
}
}
}
return out, nil
}
// Get a new instace of a message processor.
func NewMessageProcessor() (MessageProcessor, error) {
mp := new(messageProcessor)
err := mp.init()
if err != nil {
err := fmt.Errorf("failed to create MessageProcessor: %v", err.Error())
cclog.ComponentError("MessageProcessor", err.Error())
return nil, err
}
return mp, nil
}

View File

@@ -1,262 +0,0 @@
package messageprocessor
import (
"errors"
"fmt"
lp2 "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
units "github.com/ClusterCockpit/cc-units"
"github.com/expr-lang/expr"
"github.com/expr-lang/expr/vm"
)
type MessageLocation int
const (
MESSAGE_LOCATION_TAGS MessageLocation = iota
MESSAGE_LOCATION_META
MESSAGE_LOCATION_FIELDS
)
// Abstract function to move entries from one location to another
func moveInMessage(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, from, to MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
//cclog.ComponentDebug("MessageProcessor", "Move from", from, "to", to)
if value.(bool) {
var v string
var ok bool = false
switch from {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Getting tag key", data.Key)
v, ok = message.GetTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Getting meta key", data.Key)
//cclog.ComponentDebug("MessageProcessor", message.Meta())
v, ok = message.GetMeta(data.Key)
case MESSAGE_LOCATION_FIELDS:
var x interface{}
//cclog.ComponentDebug("MessageProcessor", "Getting field key", data.Key)
x, ok = message.GetField(data.Key)
v = fmt.Sprintf("%v", x)
}
if ok {
switch from {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Removing tag key", data.Key)
message.RemoveTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Removing meta key", data.Key)
message.RemoveMeta(data.Key)
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Removing field key", data.Key)
message.RemoveField(data.Key)
}
switch to {
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", v)
message.AddTag(data.Value, v)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", v)
message.AddMeta(data.Value, v)
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", v)
message.AddField(data.Value, v)
}
}
}
}
return false, nil
}
func deleteIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
switch location {
case MESSAGE_LOCATION_FIELDS:
switch data.Key {
case "value", "event", "log", "control":
return false, errors.New("cannot delete protected fields")
default:
//cclog.ComponentDebug("MessageProcessor", "Removing field for", data.Key)
message.RemoveField(data.Key)
}
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Removing tag for", data.Key)
message.RemoveTag(data.Key)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Removing meta for", data.Key)
message.RemoveMeta(data.Key)
}
}
}
return false, nil
}
func addIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig, location MessageLocation) (bool, error) {
for d, data := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
switch location {
case MESSAGE_LOCATION_FIELDS:
//cclog.ComponentDebug("MessageProcessor", "Adding field", data.Value, "->", data.Value)
message.AddField(data.Key, data.Value)
case MESSAGE_LOCATION_TAGS:
//cclog.ComponentDebug("MessageProcessor", "Adding tag", data.Value, "->", data.Value)
message.AddTag(data.Key, data.Value)
case MESSAGE_LOCATION_META:
//cclog.ComponentDebug("MessageProcessor", "Adding meta", data.Value, "->", data.Value)
message.AddMeta(data.Key, data.Value)
}
}
}
return false, nil
}
func deleteTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_TAGS)
}
func addTagIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_TAGS)
}
func moveTagToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_META)
}
func moveTagToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_TAGS, MESSAGE_LOCATION_FIELDS)
}
func deleteMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_META)
}
func addMetaIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_META)
}
func moveMetaToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_TAGS)
}
func moveMetaToField(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_META, MESSAGE_LOCATION_FIELDS)
}
func deleteFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return deleteIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
}
func addFieldIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return addIf(message, params, checks, MESSAGE_LOCATION_FIELDS)
}
func moveFieldToTag(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_TAGS)
}
func moveFieldToMeta(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]messageProcessorTagConfig) (bool, error) {
return moveInMessage(message, params, checks, MESSAGE_LOCATION_FIELDS, MESSAGE_LOCATION_META)
}
func dropMessagesIf(params *map[string]interface{}, checks *map[*vm.Program]struct{}) (bool, error) {
for d := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
return true, nil
}
}
return false, nil
}
func normalizeUnits(message lp2.CCMessage) (bool, error) {
if in_unit, ok := message.GetMeta("unit"); ok {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
message.AddMeta("unit", u.Short())
}
} else if in_unit, ok := message.GetTag("unit"); ok {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", u.Short())
message.AddTag("unit", u.Short())
}
}
return false, nil
}
func changeUnitPrefix(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
for r, n := range *checks {
value, err := expr.Run(r, *params)
if err != nil {
return false, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
newPrefix := units.NewPrefix(n)
//cclog.ComponentDebug("MessageProcessor", "Condition matches, change to prefix", newPrefix.String())
if in_unit, ok := message.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
if conv != nil && out_unit.Valid() {
if val, ok := message.GetField("value"); ok {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
message.AddField("value", conv(val))
message.AddMeta("unit", out_unit.Short())
}
}
}
} else if in_unit, ok := message.GetTag("unit"); ok && newPrefix != units.InvalidPrefix {
u := units.NewUnit(in_unit)
if u.Valid() {
//cclog.ComponentDebug("MessageProcessor", "Input unit", u.Short())
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
if conv != nil && out_unit.Valid() {
if val, ok := message.GetField("value"); ok {
//cclog.ComponentDebug("MessageProcessor", "Update unit with", out_unit.Short())
message.AddField("value", conv(val))
message.AddTag("unit", out_unit.Short())
}
}
}
}
}
}
return false, nil
}
func renameMessagesIf(message lp2.CCMessage, params *map[string]interface{}, checks *map[*vm.Program]string) (bool, error) {
for d, n := range *checks {
value, err := expr.Run(d, *params)
if err != nil {
return true, fmt.Errorf("failed to evaluate: %v", err.Error())
}
if value.(bool) {
old := message.Name()
//cclog.ComponentDebug("MessageProcessor", "Rename to", n)
message.SetName(n)
//cclog.ComponentDebug("MessageProcessor", "Add old name as 'oldname' to meta", old)
message.AddMeta("oldname", old)
}
}
return false, nil
}

View File

@@ -1,396 +0,0 @@
package messageprocessor
import (
"encoding/json"
"errors"
"fmt"
"testing"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
)
func generate_message_lists(num_lists, num_entries int) ([][]lp.CCMessage, error) {
mlist := make([][]lp.CCMessage, 0)
for j := 0; j < num_lists; j++ {
out := make([]lp.CCMessage, 0)
for i := 0; i < num_entries; i++ {
var x lp.CCMessage
var err error = nil
switch {
case i%4 == 0:
x, err = lp.NewEvent("myevent", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "nothing happend", time.Now())
case i%4 == 1:
x, err = lp.NewMetric("mymetric", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{"unit": "kByte"}, 12.145, time.Now())
case i%4 == 2:
x, err = lp.NewLog("mylog", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, "disk status: OK", time.Now())
case i%4 == 3:
x, err = lp.NewGetControl("mycontrol", map[string]string{"type": "socket", "type-id": "0"}, map[string]string{}, time.Now())
}
if err == nil {
x.AddTag("hostname", "myhost")
out = append(out, x)
} else {
return nil, errors.New("failed to create message")
}
}
mlist = append(mlist, out)
}
return mlist, nil
}
func TestNewMessageProcessor(t *testing.T) {
_, err := NewMessageProcessor()
if err != nil {
t.Error(err.Error())
}
}
type Configs struct {
name string
config json.RawMessage
drop bool
errors bool
pre func(msg lp.CCMessage) error
check func(msg lp.CCMessage) error
}
var test_configs = []Configs{
{
name: "single_dropif_nomatch",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
},
{
name: "drop_by_name",
config: json.RawMessage(`{"drop_messages": [ "net_bytes_in"]}`),
drop: true,
},
{
name: "drop_by_type_match",
config: json.RawMessage(`{"drop_by_message_type": [ "metric"]}`),
drop: true,
},
{
name: "drop_by_type_nomatch",
config: json.RawMessage(`{"drop_by_message_type": [ "event"]}`),
},
{
name: "single_dropif_match",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'"]}`),
drop: true,
},
{
name: "double_dropif_match_nomatch",
config: json.RawMessage(`{"drop_messages_if": [ "name == 'net_bytes_in' && tags.type == 'node'", "name == 'testname' && tags.type == 'socket' && tags.typeid % 2 == 1"]}`),
drop: true,
},
{
name: "rename_simple",
config: json.RawMessage(`{"rename_messages": { "net_bytes_in" : "net_bytes_out", "rapl_power": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_out" {
return errors.New("expected name net_bytes_out but still have net_bytes_in")
}
return nil
},
},
{
name: "rename_match",
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_in'" : "net_bytes_out", "name == 'rapl_power'": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_out" {
return errors.New("expected name net_bytes_out but still have net_bytes_in")
}
return nil
},
},
{
name: "rename_nomatch",
config: json.RawMessage(`{"rename_messages_if": { "name == 'net_bytes_out'" : "net_bytes_in", "name == 'rapl_power'": "cpu_power"}}`),
check: func(msg lp.CCMessage) error {
if msg.Name() != "net_bytes_in" {
return errors.New("expected name net_bytes_in but still have net_bytes_out")
}
return nil
},
},
{
name: "add_tag",
config: json.RawMessage(`{"add_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "cluster", "value" : "mycluster"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasTag("cluster") {
return errors.New("expected new tag 'cluster' but not present")
}
return nil
},
},
{
name: "del_tag",
config: json.RawMessage(`{"delete_tags_if": [{"if": "name == 'net_bytes_in'", "key" : "type"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type") {
return errors.New("expected to have no 'type' but still present")
}
return nil
},
},
{
name: "add_meta",
config: json.RawMessage(`{"add_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "source", "value" : "example"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasMeta("source") {
return errors.New("expected new tag 'source' but not present")
}
return nil
},
},
{
name: "del_meta",
config: json.RawMessage(`{"delete_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") {
return errors.New("expected to have no 'unit' but still present")
}
return nil
},
},
{
name: "add_field",
config: json.RawMessage(`{"add_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value" : "example"}]}`),
check: func(msg lp.CCMessage) error {
if !msg.HasField("myfield") {
return errors.New("expected new tag 'source' but not present")
}
return nil
},
},
{
name: "delete_fields_if_protected",
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "value"}]}`),
errors: true,
check: func(msg lp.CCMessage) error {
if !msg.HasField("value") {
return errors.New("expected to still have 'value' field because it is a protected field key")
}
return nil
},
},
{
name: "delete_fields_if_unprotected",
config: json.RawMessage(`{"delete_fields_if": [{"if": "name == 'net_bytes_in'", "key" : "testfield"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("testfield") {
return errors.New("expected to still have 'testfield' field but should be deleted")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("testfield", 4.123)
return nil
},
},
{
name: "single_change_prefix_match",
config: json.RawMessage(`{"change_unit_prefix": {"name == 'net_bytes_in' && tags.type == 'node'": "M"}}`),
check: func(msg lp.CCMessage) error {
if u, ok := msg.GetMeta("unit"); ok {
if u != "MB" {
return fmt.Errorf("expected unit MB but have %s", u)
}
} else if u, ok := msg.GetTag("unit"); ok {
if u != "MB" {
return fmt.Errorf("expected unit MB but have %s", u)
}
}
return nil
},
},
{
name: "normalize_units",
config: json.RawMessage(`{"normalize_units": true}`),
check: func(msg lp.CCMessage) error {
if u, ok := msg.GetMeta("unit"); ok {
if u != "B" {
return fmt.Errorf("expected unit B but have %s", u)
}
} else if u, ok := msg.GetTag("unit"); ok {
if u != "B" {
return fmt.Errorf("expected unit B but have %s", u)
}
}
return nil
},
},
{
name: "move_tag_to_meta",
config: json.RawMessage(`{"move_tag_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type-id") || !msg.HasMeta("typeid") {
return errors.New("moving tag 'type-id' to meta 'typeid' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddTag("type-id", "0")
return nil
},
},
{
name: "move_tag_to_field",
config: json.RawMessage(`{"move_tag_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "type-id", "value": "typeid"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasTag("type-id") || !msg.HasField("typeid") {
return errors.New("moving tag 'type-id' to field 'typeid' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddTag("type-id", "0")
return nil
},
},
{
name: "move_meta_to_tag",
config: json.RawMessage(`{"move_meta_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") || !msg.HasTag("unit") {
return errors.New("moving meta 'unit' to tag 'unit' failed")
}
return nil
},
},
{
name: "move_meta_to_field",
config: json.RawMessage(`{"move_meta_to_field_if": [{"if": "name == 'net_bytes_in'", "key" : "unit", "value": "unit"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasMeta("unit") || !msg.HasField("unit") {
return errors.New("moving meta 'unit' to field 'unit' failed")
}
return nil
},
},
{
name: "move_field_to_tag",
config: json.RawMessage(`{"move_field_to_tag_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("myfield") || !msg.HasTag("field") {
return errors.New("moving meta 'myfield' to tag 'field' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("myfield", 12)
return nil
},
},
{
name: "move_field_to_meta",
config: json.RawMessage(`{"move_field_to_meta_if": [{"if": "name == 'net_bytes_in'", "key" : "myfield", "value": "field"}]}`),
check: func(msg lp.CCMessage) error {
if msg.HasField("myfield") || !msg.HasMeta("field") {
return errors.New("moving meta 'myfield' to meta 'field' failed")
}
return nil
},
pre: func(msg lp.CCMessage) error {
msg.AddField("myfield", 12)
return nil
},
},
}
func TestConfigList(t *testing.T) {
for _, c := range test_configs {
t.Run(c.name, func(t *testing.T) {
m, err := lp.NewMetric("net_bytes_in", map[string]string{"type": "node", "type-id": "0"}, map[string]string{"unit": "Byte"}, float64(1024.0), time.Now())
if err != nil {
t.Error(err.Error())
return
}
if c.pre != nil {
if err = c.pre(m); err != nil {
t.Errorf("error running pre-test function: %v", err.Error())
return
}
}
mp, err := NewMessageProcessor()
if err != nil {
t.Error(err.Error())
return
}
err = mp.FromConfigJSON(c.config)
if err != nil {
t.Error(err.Error())
return
}
//t.Log(m.ToLineProtocol(nil))
out, err := mp.ProcessMessage(m)
if err != nil && !c.errors {
cclog.SetDebug()
mp.ProcessMessage(m)
t.Error(err.Error())
return
}
if out == nil && !c.drop {
t.Error("fail, message should NOT be dropped but processor signalled dropping")
return
} else if out != nil && c.drop {
t.Error("fail, message should be dropped but processor signalled NO dropping")
return
}
// {
// if c.drop {
// t.Error("fail, message should be dropped but processor signalled NO dropping")
// } else {
// t.Error("fail, message should NOT be dropped but processor signalled dropping")
// }
// cclog.SetDebug()
// mp.ProcessMessage(m)
// return
// }
if c.check != nil {
if err := c.check(out); err != nil {
t.Errorf("check failed with %v", err.Error())
t.Log("Rerun with debugging")
cclog.SetDebug()
mp.ProcessMessage(m)
return
}
}
})
}
}
func BenchmarkProcessing(b *testing.B) {
mlist, err := generate_message_lists(b.N, 1000)
if err != nil {
b.Error(err.Error())
return
}
mp, err := NewMessageProcessor()
if err != nil {
b.Error(err.Error())
return
}
err = mp.FromConfigJSON(json.RawMessage(`{"move_meta_to_tag_if": [{"if" : "name == 'mymetric'", "key":"unit", "value":"unit"}]}`))
if err != nil {
b.Error(err.Error())
return
}
b.StartTimer()
for i := 0; i < b.N; i++ {
for _, m := range mlist[i] {
if _, err := mp.ProcessMessage(m); err != nil {
b.Errorf("failed processing message '%s': %v", m.ToLineProtocol(nil), err.Error())
return
}
}
}
b.StopTimer()
b.ReportMetric(float64(b.Elapsed())/float64(len(mlist)*b.N), "ns/message")
}

View File

@@ -7,23 +7,23 @@
},
"redfish_recv": {
"type": "redfish",
"endpoint": "https://%h-bmc",
"client_config": [
{
"host_list": "my-host-1-[1-2]",
"hostname": "my-host-1",
"username": "username-1",
"password": "password-1"
"password": "password-1",
"endpoint": "https://my-endpoint-1"
},
{
"host_list": "my-host-2-[1,2]",
"hostname": "my-host-2",
"username": "username-2",
"password": "password-2"
"password": "password-2",
"endpoint": "https://my-endpoint-2"
}
]
},
"ipmi_recv": {
"type": "ipmi",
"endpoint": "ipmi-sensors://%h-ipmi",
"exclude_metrics": [
"fan_speed",
"voltage"
@@ -32,12 +32,18 @@
{
"username": "username-1",
"password": "password-1",
"host_list": "my-host-1-[1-2]"
"endpoint": "ipmi-sensors://my-endpoint-1",
"host_list": [
"my-host-1"
]
},
{
"username": "username-2",
"password": "password-2",
"host_list": "my-host-2-[1,2]"
"endpoint": "ipmi-sensors://my-endpoint-2",
"host_list": [
"my-host-2"
]
}
]
}

View File

@@ -2,7 +2,7 @@
This folder contains the ReceiveManager and receiver implementations for the cc-metric-collector.
## Configuration
# Configuration
The configuration file for the receivers is a list of configurations. The `type` field in each specifies which receiver to initialize.
@@ -22,11 +22,8 @@ This allows to specify
- [`nats`](./natsReceiver.md): Receive metrics from the NATS network
- [`prometheus`](./prometheusReceiver.md): Scrape data from a Prometheus client
- [`http`](./httpReceiver.md): Listen for HTTP Post requests transporting metrics in InfluxDB line protocol
- [`ipmi`](./ipmiReceiver.md): Read IPMI sensor readings
- [`redfish`](redfishReceiver.md) Use the Redfish (specification) to query thermal and power metrics
## Contributing own receivers
# Contributing own receivers
A receiver contains a few functions and is derived from the type `Receiver` (in `metricReceiver.go`):
For an example, check the [sample receiver](./sampleReceiver.go)

View File

@@ -0,0 +1,164 @@
package receivers
import (
"bufio"
"encoding/json"
"fmt"
"net"
"os"
"sync"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influx "github.com/influxdata/line-protocol"
)
// SampleReceiver configuration: receiver type, listen address, port
type AppMetricReceiverConfig struct {
Type string `json:"type"`
SocketFile string `json:"socket_file"`
}
type AppMetricReceiver struct {
receiver
config AppMetricReceiverConfig
// Storage for static information
meta map[string]string
// Use in case of own go routine
done chan bool
wg sync.WaitGroup
// Influx stuff
handler *influx.MetricHandler
parser *influx.Parser
// WaitGroup for individual connections
connWg sync.WaitGroup
listener net.Listener
}
func (r *AppMetricReceiver) newConnection(conn net.Conn) {
//defer conn.Close()
//defer wg.Done()
buffer, err := bufio.NewReader(conn).ReadBytes('\n')
if err != nil {
conn.Close()
return
}
metrics, err := r.parser.Parse(buffer)
if err != nil {
cclog.ComponentError(r.name, "failed to parse received metrics")
return
}
for _, m := range metrics {
y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
y.AddMeta(k, v)
}
if r.sink != nil {
r.sink <- y
}
}
r.newConnection(conn)
}
func (r *AppMetricReceiver) newAccepter(listenSocket net.Listener) {
accept_loop:
for {
select {
case <-r.done:
break accept_loop
default:
conn, err := listenSocket.Accept()
if err == nil {
r.connWg.Add(1)
go func() {
r.newConnection(conn)
r.connWg.Done()
}()
}
}
}
r.wg.Done()
}
// Implement functions required for Receiver interface
// Start(), Close()
// See: metricReceiver.go
func (r *AppMetricReceiver) Start() {
var err error = nil
cclog.ComponentDebug(r.name, "START")
r.listener, err = net.Listen("unix", r.config.SocketFile)
if err != nil {
cclog.ComponentError(r.name, "failed to listen at socket", r.config.SocketFile)
}
if _, err := os.Stat(r.config.SocketFile); err != nil {
cclog.ComponentError(r.name, "failed to create socket", r.config.SocketFile)
}
r.done = make(chan bool)
r.wg.Add(1)
go r.newAccepter(r.listener)
}
// Close receiver: close network connection, close files, close libraries, ...
func (r *AppMetricReceiver) Close() {
cclog.ComponentDebug(r.name, "CLOSE")
if _, err := os.Stat(r.config.SocketFile); err == nil {
if err := os.RemoveAll(r.config.SocketFile); err != nil {
cclog.ComponentError(r.name, "Failed to remove UNIX socket", r.config.SocketFile)
}
}
// in case of own go routine, send the signal and wait
r.listener.Close()
r.done <- true
close(r.done)
r.connWg.Wait()
r.wg.Wait()
}
// New function to create a new instance of the receiver
// Initialize the receiver by giving it a name and reading in the config JSON
func NewAppMetricReceiver(name string, config json.RawMessage) (Receiver, error) {
r := new(AppMetricReceiver)
// Set name of SampleReceiver
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
r.name = fmt.Sprintf("AppMetricReceiver(%s)", name)
// Set static information
r.meta = map[string]string{"source": r.name}
// Set defaults in r.config
// Allow overwriting these defaults by reading config JSON
r.config.SocketFile = "/tmp/cc.sock"
// Read the sample receiver specific JSON config
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
if len(r.config.SocketFile) == 0 {
cclog.ComponentError(r.name, "Invalid socket_file setting:", r.config.SocketFile)
return nil, fmt.Errorf("invalid socket_file setting: %s", r.config.SocketFile)
}
// Check that all required fields in the configuration are set
// Use 'if len(r.config.Option) > 0' for strings
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
return r, nil
}

View File

@@ -0,0 +1,23 @@
## `appmetrics` receiver
The `appmetrics` receiver can be used to submit metrics from an application into the monitoring system. It listens for incoming connections on a UNIX socket.
### Configuration structure
```json
{
"<name>": {
"type": "appmetrics",
"socket_file" : "/tmp/cc.sock",
}
}
```
- `type`: makes the receiver a `appmetrics` receiver
- `socket_file`: Listen UNIX socket
### Inputs from applications
Applcations can connect to the `appmetrics` socket and provide metric in the [InfluxDB line protocol](https://github.com/influxdata/line-protocol). It is currently not possible to submit meta information as the Influx line protocol does not know them.

View File

@@ -5,57 +5,40 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/gorilla/mux"
influx "github.com/influxdata/line-protocol"
)
const HTTP_RECEIVER_PORT = "8080"
type HttpReceiverConfig struct {
defaultReceiverConfig
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
Path string `json:"path"`
// Maximum amount of time to wait for the next request when keep-alives are enabled
// should be larger than the measurement interval to keep the connection open
IdleTimeout string `json:"idle_timeout"`
idleTimeout time.Duration
// Controls whether HTTP keep-alives are enabled. By default, keep-alives are enabled
KeepAlivesEnabled bool `json:"keep_alives_enabled"`
// Basic authentication
Username string `json:"username"`
Password string `json:"password"`
useBasicAuth bool
}
type HttpReceiver struct {
receiver
//meta map[string]string
config HttpReceiverConfig
server *http.Server
wg sync.WaitGroup
handler *influx.MetricHandler
parser *influx.Parser
meta map[string]string
config HttpReceiverConfig
router *mux.Router
server *http.Server
wg sync.WaitGroup
}
func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
r.name = fmt.Sprintf("HttpReceiver(%s)", name)
// Set default values
r.config.Port = HTTP_RECEIVER_PORT
r.config.KeepAlivesEnabled = true
// should be larger than the measurement interval to keep the connection open
r.config.IdleTimeout = "120s"
// Read config
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
if err != nil {
@@ -66,59 +49,20 @@ func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
if len(r.config.Port) == 0 {
return errors.New("not all configuration variables set required by HttpReceiver")
}
// Check idle timeout config
if len(r.config.IdleTimeout) > 0 {
t, err := time.ParseDuration(r.config.IdleTimeout)
if err == nil {
cclog.ComponentDebug(r.name, "idleTimeout", t)
r.config.idleTimeout = t
}
}
// Check basic authentication config
if len(r.config.Username) > 0 || len(r.config.Password) > 0 {
r.config.useBasicAuth = true
}
if r.config.useBasicAuth && len(r.config.Username) == 0 {
return errors.New("basic authentication requires username")
}
if r.config.useBasicAuth && len(r.config.Password) == 0 {
return errors.New("basic authentication requires password")
}
msgp, err := mp.NewMessageProcessor()
if err != nil {
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = msgp
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
r.mp.AddAddMetaByCondition("true", "source", r.name)
//r.meta = map[string]string{"source": r.name}
r.meta = map[string]string{"source": r.name}
p := r.config.Path
if !strings.HasPrefix(p, "/") {
p = "/" + p
}
addr := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
uri := addr + p
cclog.ComponentDebug(r.name, "INIT", "listen on:", uri)
// Register handler function r.ServerHttp for path p in the DefaultServeMux
http.HandleFunc(p, r.ServerHttp)
// Create http server
r.server = &http.Server{
Addr: addr,
Handler: nil, // handler to invoke, http.DefaultServeMux if nil
IdleTimeout: r.config.idleTimeout,
}
r.server.SetKeepAlivesEnabled(r.config.KeepAlivesEnabled)
uri := fmt.Sprintf("%s:%s%s", r.config.Addr, r.config.Port, p)
cclog.ComponentDebug(r.name, "INIT", uri)
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
r.router = mux.NewRouter()
r.router.Path(p).HandlerFunc(r.ServerHttp)
r.server = &http.Server{Addr: uri, Handler: r.router}
return nil
}
@@ -135,96 +79,28 @@ func (r *HttpReceiver) Start() {
}
func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) {
// Check request method, only post method is handled
if req.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
// Check basic authentication
if r.config.useBasicAuth {
username, password, ok := req.BasicAuth()
if !ok || username != r.config.Username || password != r.config.Password {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
body, err := io.ReadAll(req.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
metrics, err := r.parser.Parse(body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
if r.sink != nil {
d := influx.NewDecoder(req.Body)
for d.Next() {
// Decode measurement name
measurement, err := d.Measurement()
if err != nil {
msg := "ServerHttp: Failed to decode measurement: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
// Decode tags
tags := make(map[string]string)
for {
key, value, err := d.NextTag()
if err != nil {
msg := "ServerHttp: Failed to decode tag: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
if key == nil {
break
}
tags[string(key)] = string(value)
}
// Decode fields
fields := make(map[string]interface{})
for {
key, value, err := d.NextField()
if err != nil {
msg := "ServerHttp: Failed to decode field: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
if key == nil {
break
}
fields[string(key)] = value.Interface()
}
// Decode time stamp
t, err := d.Time(influx.Nanosecond, time.Time{})
if err != nil {
msg := "ServerHttp: Failed to decode time stamp: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
y, _ := lp.NewMessage(
string(measurement),
tags,
nil,
fields,
t,
)
m, err := r.mp.ProcessMessage(y)
if err == nil && m != nil {
r.sink <- m
}
for _, m := range metrics {
y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
y.AddMeta(k, v)
}
// Check for IO errors
err := d.Err()
if err != nil {
msg := "ServerHttp: Failed to decode: " + err.Error()
cclog.ComponentError(r.name, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
if r.sink != nil {
r.sink <- y
}
}

View File

@@ -10,10 +10,7 @@ The `http` receiver can be used receive metrics through HTTP POST requests.
"type": "http",
"address" : "",
"port" : "8080",
"path" : "/write",
"idle_timeout": "120s",
"username": "myUser",
"password": "myPW"
"path" : "/write"
}
}
```
@@ -22,22 +19,5 @@ The `http` receiver can be used receive metrics through HTTP POST requests.
- `address`: Listen address
- `port`: Listen port
- `path`: URL path for the write endpoint
- `idle_timeout`: Maximum amount of time to wait for the next request when keep-alives are enabled should be larger than the measurement interval to keep the connection open
- `keep_alives_enabled`: Controls whether HTTP keep-alives are enabled. By default, keep-alives are enabled.
- `username`: username for basic authentication
- `password`: password for basic authentication
The HTTP endpoint listens to `http://<address>:<port>/<path>`
### Debugging
- Install [curl](https://curl.se/)
- Use curl to send message to `http` receiver
```bash
curl http://localhost:8080/write \
--user "myUser:myPW" \
--data \
"myMetric,hostname=myHost,type=hwthread,type-id=0,unit=Hz value=400000i 1694777161164284635
myMetric,hostname=myHost,type=hwthread,type-id=1,unit=Hz value=400001i 1694777161164284635"
```

View File

@@ -13,10 +13,8 @@ import (
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
"github.com/ClusterCockpit/cc-metric-collector/pkg/hostlist"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type IPMIReceiverClientConfig struct {
@@ -32,13 +30,11 @@ type IPMIReceiverClientConfig struct {
Password string // Password to use for authentication
CLIOptions []string // Additional command line options for ipmi-sensors
isExcluded map[string]bool // is metric excluded
mp mp.MessageProcessor
}
type IPMIReceiver struct {
receiver
config struct {
defaultReceiverConfig
Interval time.Duration
// Client config for each IPMI hosts
@@ -46,11 +42,10 @@ type IPMIReceiver struct {
}
// Storage for static information
//meta map[string]string
meta map[string]string
done chan bool // channel to finish / stop IPMI receiver
wg sync.WaitGroup // wait group for IPMI receiver
mp mp.MessageProcessor
}
// doReadMetrics reads metrics from all configure IPMI hosts.
@@ -217,7 +212,7 @@ func (r *IPMIReceiver) doReadMetric() {
continue
}
y, err := lp.NewMessage(
y, err := lp.New(
metric,
map[string]string{
"hostname": host,
@@ -234,14 +229,7 @@ func (r *IPMIReceiver) doReadMetric() {
},
time.Now())
if err == nil {
mc, err := clientConfig.mp.ProcessMessage(y)
if err == nil && mc != nil {
m, err := r.mp.ProcessMessage(mc)
if err == nil && m != nil {
r.sink <- m
}
}
r.sink <- y
}
}
@@ -307,12 +295,11 @@ func (r *IPMIReceiver) Close() {
// NewIPMIReceiver creates a new instance of the redfish receiver
// Initialize the receiver by giving it a name and reading in the config JSON
func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
var err error
r := new(IPMIReceiver)
// Config options from config file
configJSON := struct {
defaultReceiverConfig
Type string `json:"type"`
// How often the IPMI sensor metrics should be read and send to the sink (default: 30 s)
IntervalString string `json:"interval,omitempty"`
@@ -332,19 +319,18 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ClientConfigs []struct {
Fanout int `json:"fanout,omitempty"` // Maximum number of simultaneous IPMI connections (default: 64)
DriverType string `json:"driver_type,omitempty"` // Out of band IPMI driver (default: LAN_2_0)
HostList string `json:"host_list"` // List of hosts with the same client configuration
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the IPMI service
Fanout int `json:"fanout,omitempty"` // Maximum number of simultaneous IPMI connections (default: 64)
DriverType string `json:"driver_type,omitempty"` // Out of band IPMI driver (default: LAN_2_0)
HostList []string `json:"host_list"` // List of hosts with the same client configuration
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the IPMI service
// Per client excluded metrics
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
// Additional command line options for ipmi-sensors
CLIOptions []string `json:"cli_options,omitempty"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
CLIOptions []string `json:"cli_options,omitempty"`
} `json:"client_config"`
}{
// Set defaults values
@@ -360,38 +346,24 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
// Create done channel
r.done = make(chan bool)
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
// Set static information
//r.meta = map[string]string{"source": r.name}
r.mp.AddAddMetaByCondition("true", "source", r.name)
r.meta = map[string]string{"source": r.name}
// Read the IPMI receiver specific JSON config
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&configJSON); err != nil {
err := json.Unmarshal(config, &configJSON)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Convert interval string representation to duration
var err error
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
if err != nil {
err := fmt.Errorf(
"failed to parse duration string interval='%s': %w",
"Failed to parse duration string interval='%s': %w",
configJSON.IntervalString,
err,
)
@@ -463,17 +435,10 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
return nil, err
}
// Create mapping between IPMI host name and node host name
// This also guaranties that all IPMI host names are unique
// Create mapping between ipmi hostname and node hostname
// This also guaranties that all ipmi hostnames are uniqu
ipmi2HostMapping := make(map[string]string)
hostList, err := hostlist.Expand(clientConfigJSON.HostList)
if err != nil {
err := fmt.Errorf("client config number %d failed to parse host list %s: %v",
i, clientConfigJSON.HostList, err)
cclog.ComponentError(r.name, err)
return nil, err
}
for _, host := range hostList {
for _, host := range clientConfigJSON.HostList {
ipmiHost := strings.Replace(host_pattern, "%h", host, -1)
ipmi2HostMapping[ipmiHost] = host
}
@@ -532,16 +497,6 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
for _, key := range configJSON.ExcludeMetrics {
isExcluded[key] = true
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
if len(clientConfigJSON.MessageProcessor) > 0 {
err = p.FromConfigJSON(clientConfigJSON.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
r.config.ClientConfigs = append(
r.config.ClientConfigs,
@@ -556,7 +511,6 @@ func NewIPMIReceiver(name string, config json.RawMessage) (Receiver, error) {
Password: password,
CLIOptions: cliOptions,
isExcluded: isExcluded,
mp: p,
})
}

View File

@@ -12,14 +12,14 @@ The IPMI Receiver uses `ipmi-sensors` from the [FreeIPMI](https://www.gnu.org/so
"fanout": 256,
"username": "<Username>",
"password": "<Password>",
"endpoint": "ipmi-sensors://%h-bmc",
"endpoint": "ipmi-sensors://%h-p",
"exclude_metrics": [ "fan_speed", "voltage" ],
"client_config": [
{
"host_list": "n[1,2-4]"
"host_list": ["n1", "n2", "n3", "n4" ]
},
{
"host_list": "n[5-6]",
"host_list": [ "n5", "n6" ],
"driver_type": "LAN",
"cli_options": [ "--workaround-flags=..." ],
"password": "<Password 2>"

View File

@@ -1,15 +1,11 @@
package receivers
import (
"encoding/json"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type defaultReceiverConfig struct {
Type string `json:"type"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
Type string `json:"type"`
}
// Receiver configuration: Listen address, port
@@ -23,15 +19,14 @@ type ReceiverConfig struct {
type receiver struct {
name string
sink chan lp.CCMessage
mp mp.MessageProcessor
sink chan lp.CCMetric
}
type Receiver interface {
Start()
Close() // Close / finish metric receiver
Name() string // Name of the metric receiver
SetSink(sink chan lp.CCMessage) // Set sink channel
Close() // Close / finish metric receiver
Name() string // Name of the metric receiver
SetSink(sink chan lp.CCMetric) // Set sink channel
}
// Name returns the name of the metric receiver
@@ -40,6 +35,6 @@ func (r *receiver) Name() string {
}
// SetSink set the sink channel
func (r *receiver) SetSink(sink chan lp.CCMessage) {
func (r *receiver) SetSink(sink chan lp.CCMetric) {
r.sink = sink
}

View File

@@ -4,110 +4,54 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
)
type NatsReceiverConfig struct {
defaultReceiverConfig
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
Subject string `json:"subject"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
NkeyFile string `json:"nkey_file,omitempty"`
}
type NatsReceiver struct {
receiver
nc *nats.Conn
//meta map[string]string
config NatsReceiverConfig
nc *nats.Conn
handler *influx.MetricHandler
parser *influx.Parser
meta map[string]string
config NatsReceiverConfig
}
var DefaultTime = func() time.Time {
return time.Unix(42, 0)
}
// Start subscribes to the configured NATS subject
// Messages wil be handled by r._NatsReceive
func (r *NatsReceiver) Start() {
cclog.ComponentDebug(r.name, "START")
r.nc.Subscribe(r.config.Subject, r._NatsReceive)
}
// _NatsReceive receives subscribed messages from the NATS server
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
if r.sink != nil {
d := influx.NewDecoderWithBytes(m.Data)
for d.Next() {
// Decode measurement name
measurement, err := d.Measurement()
if err != nil {
msg := "_NatsReceive: Failed to decode measurement: " + err.Error()
cclog.ComponentError(r.name, msg)
return
metrics, err := r.parser.Parse(m.Data)
if err == nil {
for _, m := range metrics {
y := lp.FromInfluxMetric(m)
for k, v := range r.meta {
y.AddMeta(k, v)
}
// Decode tags
tags := make(map[string]string)
for {
key, value, err := d.NextTag()
if err != nil {
msg := "_NatsReceive: Failed to decode tag: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
if key == nil {
break
}
tags[string(key)] = string(value)
}
// Decode fields
fields := make(map[string]interface{})
for {
key, value, err := d.NextField()
if err != nil {
msg := "_NatsReceive: Failed to decode field: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
if key == nil {
break
}
fields[string(key)] = value.Interface()
}
// Decode time stamp
t, err := d.Time(influx.Nanosecond, time.Time{})
if err != nil {
msg := "_NatsReceive: Failed to decode time: " + err.Error()
cclog.ComponentError(r.name, msg)
return
}
y, _ := lp.NewMessage(
string(measurement),
tags,
nil,
fields,
t,
)
m, err := r.mp.ProcessMessage(y)
if err == nil && m != nil {
r.sink <- m
if r.sink != nil {
r.sink <- y
}
}
}
}
// Close closes the connection to the NATS server
func (r *NatsReceiver) Close() {
if r.nc != nil {
cclog.ComponentDebug(r.name, "CLOSE")
@@ -115,14 +59,10 @@ func (r *NatsReceiver) Close() {
}
}
// NewNatsReceiver creates a new Receiver which subscribes to messages from a NATS server
func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
var uinfo nats.Option = nil
r := new(NatsReceiver)
r.name = fmt.Sprintf("NatsReceiver(%s)", name)
// Read configuration file, allow overwriting default config
r.config.Addr = "localhost"
r.config.Addr = nats.DefaultURL
r.config.Port = "4222"
if len(config) > 0 {
err := json.Unmarshal(config, &r.config)
@@ -136,45 +76,17 @@ func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
len(r.config.Subject) == 0 {
return nil, errors.New("not all configuration variables set required by NatsReceiver")
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Set metadata
// r.meta = map[string]string{
// "source": r.name,
// }
r.mp.AddAddMetaByCondition("true", "source", r.name)
if len(r.config.User) > 0 && len(r.config.Password) > 0 {
uinfo = nats.UserInfo(r.config.User, r.config.Password)
} else if len(r.config.NkeyFile) > 0 {
_, err := os.Stat(r.config.NkeyFile)
if err == nil {
uinfo = nats.UserCredentials(r.config.NkeyFile)
} else {
cclog.ComponentError(r.name, "NKEY file", r.config.NkeyFile, "does not exist: %v", err.Error())
return nil, err
}
}
// Connect to NATS server
url := fmt.Sprintf("nats://%s:%s", r.config.Addr, r.config.Port)
cclog.ComponentDebug(r.name, "NewNatsReceiver", url, "Subject", r.config.Subject)
if nc, err := nats.Connect(url, uinfo); err == nil {
r.meta = map[string]string{"source": r.name}
uri := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
cclog.ComponentDebug(r.name, "NewNatsReceiver", uri, "Subject", r.config.Subject)
if nc, err := nats.Connect(uri); err == nil {
r.nc = nc
} else {
r.nc = nil
return nil, err
}
r.handler = influx.NewMetricHandler()
r.parser = influx.NewParser(r.handler)
r.parser.SetTimeFunc(DefaultTime)
return r, nil
}

View File

@@ -10,10 +10,7 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
"type": "nats",
"address" : "nats-server.example.org",
"port" : "4222",
"subject" : "subject",
"user": "natsuser",
"password": "natssecret",
"nkey_file": "/path/to/nkey_file"
"subject" : "subject"
}
}
```
@@ -22,35 +19,3 @@ The `nats` receiver can be used receive metrics from the NATS network. The `nats
- `address`: Address of the NATS control server
- `port`: Port of the NATS control server
- `subject`: Subscribes to this subject and receive metrics
- `user`: Connect to nats using this user
- `password`: Connect to nats using this password
- `nkey_file`: Path to credentials file with NKEY
### Debugging
- Install NATS server and command line client
- Start NATS server
```bash
nats-server --net nats-server.example.org --port 4222
```
- Check NATS server works as expected
```bash
nats --server=nats-server-db.example.org:4222 server check
```
- Use NATS command line client to subscribe to all messages
```bash
nats --server=nats-server-db.example.org:4222 sub ">"
```
- Use NATS command line client to send message to NATS receiver
```bash
nats --server=nats-server-db.example.org:4222 pub subject \
"myMetric,hostname=myHost,type=hwthread,type-id=0,unit=Hz value=400000i 1694777161164284635
myMetric,hostname=myHost,type=hwthread,type-id=1,unit=Hz value=400001i 1694777161164284635"
```

View File

@@ -13,7 +13,7 @@ import (
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type PrometheusReceiverConfig struct {
@@ -74,7 +74,7 @@ func (r *PrometheusReceiver) Start() {
}
value, err := strconv.ParseFloat(lineSplit[1], 64)
if err == nil {
y, err := lp.NewMessage(name, tags, r.meta, map[string]interface{}{"value": value}, t)
y, err := lp.New(name, tags, r.meta, map[string]interface{}{"value": value}, t)
if err == nil {
r.sink <- y
}

View File

@@ -7,26 +7,26 @@ import (
"sync"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){
"http": NewHttpReceiver,
"ipmi": NewIPMIReceiver,
"nats": NewNatsReceiver,
"redfish": NewRedfishReceiver,
"ipmi": NewIPMIReceiver,
"nats": NewNatsReceiver,
"redfish": NewRedfishReceiver,
"appmetrics": NewAppMetricReceiver,
}
type receiveManager struct {
inputs []Receiver
output chan lp.CCMessage
output chan lp.CCMetric
config []json.RawMessage
}
type ReceiveManager interface {
Init(wg *sync.WaitGroup, receiverConfigFile string) error
AddInput(name string, rawConfig json.RawMessage) error
AddOutput(output chan lp.CCMessage)
AddOutput(output chan lp.CCMetric)
Start()
Close()
}
@@ -93,7 +93,7 @@ func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error
return nil
}
func (rm *receiveManager) AddOutput(output chan lp.CCMessage) {
func (rm *receiveManager) AddOutput(output chan lp.CCMetric) {
rm.output = output
for _, r := range rm.inputs {
r.SetSink(rm.output)

View File

@@ -1,22 +1,17 @@
package receivers
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"maps"
"net/http"
"strconv"
"strings"
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
"github.com/ClusterCockpit/cc-metric-collector/pkg/hostlist"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
// See: https://pkg.go.dev/github.com/stmcginnis/gofish
"github.com/stmcginnis/gofish"
@@ -34,17 +29,11 @@ type RedfishReceiverClientConfig struct {
doPowerMetric bool
doProcessorMetrics bool
doSensors bool
doThermalMetrics bool
skipProcessorMetricsURL map[string]bool
// readSensorURLs stores for each chassis ID a list of sensor URLs to read
readSensorURLs map[string][]string
gofish gofish.ClientConfig
mp mp.MessageProcessor
}
// RedfishReceiver configuration:
@@ -52,7 +41,6 @@ type RedfishReceiver struct {
receiver
config struct {
defaultReceiverConfig
fanout int
Interval time.Duration
HttpTimeout time.Duration
@@ -65,232 +53,7 @@ type RedfishReceiver struct {
wg sync.WaitGroup // wait group for redfish receiver
}
// deleteEmptyTags removes tags or meta data tags with empty value
func deleteEmptyTags(tags map[string]string) {
maps.DeleteFunc(
tags,
func(key string, value string) bool {
return value == ""
},
)
}
// setMetricValue sets the value entry in the fields map
func setMetricValue(value any) map[string]interface{} {
return map[string]interface{}{
"value": value,
}
}
// sendMetric sends the metric through the sink channel
func (r *RedfishReceiver) sendMetric(mp mp.MessageProcessor, name string, tags map[string]string, meta map[string]string, value any, timestamp time.Time) {
deleteEmptyTags(tags)
deleteEmptyTags(meta)
y, err := lp.NewMessage(name, tags, meta, setMetricValue(value), timestamp)
if err == nil {
mc, err := mp.ProcessMessage(y)
if err == nil && mc != nil {
m, err := r.mp.ProcessMessage(mc)
if err == nil && m != nil {
r.sink <- m
}
}
}
}
// readSensors reads sensors from a redfish device
// See: https://redfish.dmtf.org/schemas/v1/Sensor.json
// Redfish URI: /redfish/v1/Chassis/{ChassisId}/Sensors/{SensorId}
func (r *RedfishReceiver) readSensors(
clientConfig *RedfishReceiverClientConfig,
chassis *redfish.Chassis) error {
writeTemperatureSensor := func(sensor *redfish.Sensor) {
tags := map[string]string{
"hostname": clientConfig.Hostname,
"type": "node",
// ChassisType shall indicate the physical form factor for the type of chassis
"chassis_typ": string(chassis.ChassisType),
// Chassis name
"chassis_name": chassis.Name,
// ID uniquely identifies the resource
"sensor_id": sensor.ID,
// The area or device to which this sensor measurement applies
"temperature_physical_context": string(sensor.PhysicalContext),
// Name
"temperature_name": sensor.Name,
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
"group": "Temperature",
"unit": "degC",
}
r.sendMetric(clientConfig.mp, "temperature", tags, meta, sensor.Reading, time.Now())
}
writeFanSpeedSensor := func(sensor *redfish.Sensor) {
tags := map[string]string{
"hostname": clientConfig.Hostname,
"type": "node",
// ChassisType shall indicate the physical form factor for the type of chassis
"chassis_typ": string(chassis.ChassisType),
// Chassis name
"chassis_name": chassis.Name,
// ID uniquely identifies the resource
"sensor_id": sensor.ID,
// The area or device to which this sensor measurement applies
"fan_physical_context": string(sensor.PhysicalContext),
// Name
"fan_name": sensor.Name,
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
"group": "FanSpeed",
"unit": string(sensor.ReadingUnits),
}
r.sendMetric(clientConfig.mp, "fan_speed", tags, meta, sensor.Reading, time.Now())
}
writePowerSensor := func(sensor *redfish.Sensor) {
// Set tags
tags := map[string]string{
"hostname": clientConfig.Hostname,
"type": "node",
// ChassisType shall indicate the physical form factor for the type of chassis
"chassis_typ": string(chassis.ChassisType),
// Chassis name
"chassis_name": chassis.Name,
// ID uniquely identifies the resource
"sensor_id": sensor.ID,
// The area or device to which this sensor measurement applies
"power_physical_context": string(sensor.PhysicalContext),
// Name
"power_name": sensor.Name,
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
"group": "Energy",
"unit": "watts",
}
r.sendMetric(clientConfig.mp, "power", tags, meta, sensor.Reading, time.Now())
}
if _, ok := clientConfig.readSensorURLs[chassis.ID]; !ok {
// First time run of read sensors for this chassis
clientConfig.readSensorURLs[chassis.ID] = make([]string, 0)
// Get sensor information for this chassis
sensors, err := chassis.Sensors()
if err != nil {
return fmt.Errorf("readSensors: chassis.Sensors() failed: %v", err)
}
// Skip empty sensors information
if sensors == nil {
return nil
}
for _, sensor := range sensors {
// Skip all sensors which are not in enabled state or which are unhealthy
if sensor.Status.State != common.EnabledState || sensor.Status.Health != common.OKHealth {
continue
}
// Skip sensors with missing readings units or type
if sensor.ReadingUnits == "" || sensor.ReadingType == "" {
continue
}
// Power readings
if (sensor.ReadingType == redfish.PowerReadingType && sensor.ReadingUnits == "Watts") ||
(sensor.ReadingType == redfish.CurrentReadingType && sensor.ReadingUnits == "Watts") {
if clientConfig.isExcluded["power"] {
continue
}
clientConfig.readSensorURLs[chassis.ID] = append(clientConfig.readSensorURLs[chassis.ID], sensor.ODataID)
writePowerSensor(sensor)
continue
}
// Fan speed readings
if (sensor.ReadingType == redfish.AirFlowReadingType && sensor.ReadingUnits == "RPM") ||
(sensor.ReadingType == redfish.AirFlowReadingType && sensor.ReadingUnits == "Percent") {
// Skip, when fan_speed metric is excluded
if clientConfig.isExcluded["fan_speed"] {
continue
}
clientConfig.readSensorURLs[chassis.ID] = append(clientConfig.readSensorURLs[chassis.ID], sensor.ODataID)
writeFanSpeedSensor(sensor)
}
// Temperature readings
if sensor.ReadingType == redfish.TemperatureReadingType && sensor.ReadingUnits == "C" {
if clientConfig.isExcluded["temperature"] {
continue
}
clientConfig.readSensorURLs[chassis.ID] = append(clientConfig.readSensorURLs[chassis.ID], sensor.ODataID)
writeTemperatureSensor(sensor)
continue
}
}
} else {
common.CollectCollection(
func(uri string) {
sensor, err := redfish.GetSensor(chassis.GetClient(), uri)
if err != nil {
cclog.ComponentError(r.name, "redfish.GetSensor() for uri '", uri, "' failed")
}
// Power readings
if (sensor.ReadingType == redfish.PowerReadingType && sensor.ReadingUnits == "Watts") ||
(sensor.ReadingType == redfish.CurrentReadingType && sensor.ReadingUnits == "Watts") {
writePowerSensor(sensor)
return
}
// Fan speed readings
if (sensor.ReadingType == redfish.AirFlowReadingType && sensor.ReadingUnits == "RPM") ||
(sensor.ReadingType == redfish.AirFlowReadingType && sensor.ReadingUnits == "Percent") {
writeFanSpeedSensor(sensor)
return
}
// Temperature readings
if sensor.ReadingType == redfish.TemperatureReadingType && sensor.ReadingUnits == "C" {
writeTemperatureSensor(sensor)
return
}
},
clientConfig.readSensorURLs[chassis.ID])
}
return nil
}
// readThermalMetrics reads thermal metrics from a redfish device
// See: https://redfish.dmtf.org/schemas/v1/Thermal.json
// Redfish URI: /redfish/v1/Chassis/{ChassisId}/Thermal
// -> deprecated in favor of the ThermalSubsystem schema
// -> on Lenovo servers /redfish/v1/Chassis/{ChassisId}/ThermalSubsystem/ThermalMetrics links to /redfish/v1/Chassis/{ChassisId}/Sensors/{SensorId}
func (r *RedfishReceiver) readThermalMetrics(
clientConfig *RedfishReceiverClientConfig,
chassis *redfish.Chassis) error {
@@ -340,6 +103,13 @@ func (r *RedfishReceiver) readThermalMetrics(
"temperature_name": temperature.Name,
}
// Delete empty tags
for key, value := range tags {
if value == "" {
delete(tags, key)
}
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
@@ -350,7 +120,14 @@ func (r *RedfishReceiver) readThermalMetrics(
// ReadingCelsius shall be the current value of the temperature sensor's reading.
value := temperature.ReadingCelsius
r.sendMetric(clientConfig.mp, "temperature", tags, meta, value, timestamp)
y, err := lp.New("temperature", tags, meta,
map[string]interface{}{
"value": value,
},
timestamp)
if err == nil {
r.sink <- y
}
}
for _, fan := range thermal.Fans {
@@ -384,6 +161,13 @@ func (r *RedfishReceiver) readThermalMetrics(
"fan_name": fan.Name,
}
// Delete empty tags
for key, value := range tags {
if value == "" {
delete(tags, key)
}
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
@@ -391,16 +175,23 @@ func (r *RedfishReceiver) readThermalMetrics(
"unit": string(fan.ReadingUnits),
}
r.sendMetric(clientConfig.mp, "fan_speed", tags, meta, fan.Reading, timestamp)
// Reading shall be the current value of the fan sensor's reading
value := fan.Reading
y, err := lp.New("fan_speed", tags, meta,
map[string]interface{}{
"value": value,
},
timestamp)
if err == nil {
r.sink <- y
}
}
return nil
}
// readPowerMetrics reads power metrics from a redfish device
// See: https://redfish.dmtf.org/schemas/v1/Power.json
// Redfish URI: /redfish/v1/Chassis/{ChassisId}/Power
// -> deprecated in favor of the PowerSubsystem schema
func (r *RedfishReceiver) readPowerMetrics(
clientConfig *RedfishReceiverClientConfig,
chassis *redfish.Chassis) error {
@@ -480,6 +271,13 @@ func (r *RedfishReceiver) readPowerMetrics(
"power_control_name": pc.Name,
}
// Delete empty tags
for key, value := range tags {
if value == "" {
delete(tags, key)
}
}
// Set meta data tags
meta := map[string]string{
"source": r.name,
@@ -488,8 +286,23 @@ func (r *RedfishReceiver) readPowerMetrics(
"unit": "watts",
}
// Delete empty meta data tags
for key, value := range meta {
if value == "" {
delete(meta, key)
}
}
for name, value := range metrics {
r.sendMetric(clientConfig.mp, name, tags, meta, value, timestamp)
y, err := lp.New(name, tags, meta,
map[string]interface{}{
"value": value,
},
timestamp)
if err == nil {
r.sink <- y
}
}
}
@@ -498,7 +311,6 @@ func (r *RedfishReceiver) readPowerMetrics(
// readProcessorMetrics reads processor metrics from a redfish device
// See: https://redfish.dmtf.org/schemas/v1/ProcessorMetrics.json
// Redfish URI: /redfish/v1/Systems/{ComputerSystemId}/Processors/{ProcessorId}/ProcessorMetrics
func (r *RedfishReceiver) readProcessorMetrics(
clientConfig *RedfishReceiverClientConfig,
processor *redfish.Processor) error {
@@ -513,7 +325,7 @@ func (r *RedfishReceiver) readProcessorMetrics(
return nil
}
resp, err := processor.GetClient().Get(URL)
resp, err := processor.Client.Get(URL)
if err != nil {
// Skip non existing URLs
if statusCode := err.(*common.Error).HTTPReturnedStatusCode; statusCode == http.StatusNotFound {
@@ -521,7 +333,7 @@ func (r *RedfishReceiver) readProcessorMetrics(
return nil
}
return fmt.Errorf("processor.GetClient().Get(%v) failed: %+w", URL, err)
return fmt.Errorf("processor.Client.Get(%v) failed: %+w", URL, err)
}
var processorMetrics struct {
@@ -534,18 +346,11 @@ func (r *RedfishReceiver) readProcessorMetrics(
// This property shall contain the temperature, in Celsius, of the processor.
TemperatureCelsius float32 `json:"TemperatureCelsius"`
}
body, err := io.ReadAll(resp.Body)
err = json.NewDecoder(resp.Body).Decode(&processorMetrics)
if err != nil {
return fmt.Errorf("unable to read response body for processor metrics: %+w", err)
}
err = json.Unmarshal(body, &processorMetrics)
if err != nil {
return fmt.Errorf(
"unable to unmarshal JSON='%s' for processor metrics: %+w",
string(body),
err,
)
return fmt.Errorf("unable to decode JSON for processor metrics: %+w", err)
}
processorMetrics.SetClient(processor.Client)
// Set tags
tags := map[string]string{
@@ -559,6 +364,13 @@ func (r *RedfishReceiver) readProcessorMetrics(
"processor_id": processor.ID,
}
// Delete empty tags
for key, value := range tags {
if value == "" {
delete(tags, key)
}
}
// Set meta data tags
metaPower := map[string]string{
"source": r.name,
@@ -568,10 +380,15 @@ func (r *RedfishReceiver) readProcessorMetrics(
namePower := "consumed_power"
if !clientConfig.isExcluded[namePower] &&
// Some servers return "ConsumedPowerWatt":65535 instead of "ConsumedPowerWatt":null
processorMetrics.ConsumedPowerWatt != 65535 {
r.sendMetric(clientConfig.mp, namePower, tags, metaPower, processorMetrics.ConsumedPowerWatt, timestamp)
if !clientConfig.isExcluded[namePower] {
y, err := lp.New(namePower, tags, metaPower,
map[string]interface{}{
"value": processorMetrics.ConsumedPowerWatt,
},
timestamp)
if err == nil {
r.sink <- y
}
}
// Set meta data tags
metaThermal := map[string]string{
@@ -583,7 +400,14 @@ func (r *RedfishReceiver) readProcessorMetrics(
nameThermal := "temperature"
if !clientConfig.isExcluded[nameThermal] {
r.sendMetric(clientConfig.mp, nameThermal, tags, metaThermal, processorMetrics.TemperatureCelsius, timestamp)
y, err := lp.New(nameThermal, tags, metaThermal,
map[string]interface{}{
"value": processorMetrics.TemperatureCelsius,
},
timestamp)
if err == nil {
r.sink <- y
}
}
return nil
}
@@ -616,8 +440,7 @@ func (r *RedfishReceiver) readMetrics(clientConfig *RedfishReceiverClientConfig)
// Get all chassis managed by this service
isChassisListRequired :=
clientConfig.doSensors ||
clientConfig.doThermalMetrics ||
clientConfig.doThermalMetrics ||
clientConfig.doPowerMetric
var chassisList []*redfish.Chassis
if isChassisListRequired {
@@ -637,16 +460,6 @@ func (r *RedfishReceiver) readMetrics(clientConfig *RedfishReceiverClientConfig)
}
}
// Read sensors
if clientConfig.doSensors {
for _, chassis := range chassisList {
err := r.readSensors(clientConfig, chassis)
if err != nil {
return err
}
}
}
// read thermal metrics
if clientConfig.doThermalMetrics {
for _, chassis := range chassisList {
@@ -786,13 +599,11 @@ func (r *RedfishReceiver) Close() {
// NewRedfishReceiver creates a new instance of the redfish receiver
// Initialize the receiver by giving it a name and reading in the config JSON
func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
var err error
r := new(RedfishReceiver)
// Config options from config file
configJSON := struct {
Type string `json:"type"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
Type string `json:"type"`
// Maximum number of simultaneous redfish connections (default: 64)
Fanout int `json:"fanout,omitempty"`
@@ -813,27 +624,24 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
// Globally disable collection of power, processor or thermal metrics
DisablePowerMetrics bool `json:"disable_power_metrics"`
DisableProcessorMetrics bool `json:"disable_processor_metrics"`
DisableSensors bool `json:"disable_sensors"`
DisableThermalMetrics bool `json:"disable_thermal_metrics"`
// Globally excluded metrics
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ClientConfigs []struct {
HostList string `json:"host_list"` // List of hosts with the same client configuration
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the redfish service
HostList []string `json:"host_list"` // List of hosts with the same client configuration
Username *string `json:"username"` // User name to authenticate with
Password *string `json:"password"` // Password to use for authentication
Endpoint *string `json:"endpoint"` // URL of the redfish service
// Per client disable collection of power,processor or thermal metrics
DisablePowerMetrics bool `json:"disable_power_metrics"`
DisableProcessorMetrics bool `json:"disable_processor_metrics"`
DisableSensors bool `json:"disable_sensors"`
DisableThermalMetrics bool `json:"disable_thermal_metrics"`
// Per client excluded metrics
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
} `json:"client_config"`
}{
// Set defaults values
@@ -852,31 +660,19 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
// Read the redfish receiver specific JSON config
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&configJSON); err != nil {
err := json.Unmarshal(config, &configJSON)
if err != nil {
cclog.ComponentError(r.name, "Error reading config:", err.Error())
return nil, err
}
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Convert interval string representation to duration
var err error
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
if err != nil {
err := fmt.Errorf(
"failed to parse duration string interval='%s': %w",
"Failed to parse duration string interval='%s': %w",
configJSON.IntervalString,
err,
)
@@ -888,7 +684,7 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
r.config.HttpTimeout, err = time.ParseDuration(configJSON.HttpTimeoutString)
if err != nil {
err := fmt.Errorf(
"failed to parse duration string http_timeout='%s': %w",
"Failed to parse duration string http_timeout='%s': %w",
configJSON.HttpTimeoutString,
err,
)
@@ -914,7 +710,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
clientConfigJSON := &configJSON.ClientConfigs[i]
// Redfish endpoint
var endpoint_pattern string
if clientConfigJSON.Endpoint != nil {
endpoint_pattern = *clientConfigJSON.Endpoint
@@ -926,7 +721,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
return nil, err
}
// Redfish username
var username string
if clientConfigJSON.Username != nil {
username = *clientConfigJSON.Username
@@ -938,7 +732,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
return nil, err
}
// Redfish password
var password string
if clientConfigJSON.Password != nil {
password = *clientConfigJSON.Password
@@ -957,9 +750,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
doProcessorMetrics :=
!(configJSON.DisableProcessorMetrics ||
clientConfigJSON.DisableProcessorMetrics)
doSensors :=
!(configJSON.DisableSensors ||
clientConfigJSON.DisableSensors)
doThermalMetrics :=
!(configJSON.DisableThermalMetrics ||
clientConfigJSON.DisableThermalMetrics)
@@ -972,27 +762,8 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
for _, key := range configJSON.ExcludeMetrics {
isExcluded[key] = true
}
p, err = mp.NewMessageProcessor()
if err != nil {
cclog.ComponentError(r.name, err.Error())
return nil, err
}
if len(clientConfigJSON.MessageProcessor) > 0 {
err = p.FromConfigJSON(clientConfigJSON.MessageProcessor)
if err != nil {
cclog.ComponentError(r.name, err.Error())
return nil, err
}
}
hostList, err := hostlist.Expand(clientConfigJSON.HostList)
if err != nil {
err := fmt.Errorf("client config number %d failed to parse host list %s: %v",
i, clientConfigJSON.HostList, err)
cclog.ComponentError(r.name, err)
return nil, err
}
for _, host := range hostList {
for _, host := range clientConfigJSON.HostList {
// Endpoint of the redfish service
endpoint := strings.Replace(endpoint_pattern, "%h", host, -1)
@@ -1004,17 +775,14 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
isExcluded: isExcluded,
doPowerMetric: doPowerMetric,
doProcessorMetrics: doProcessorMetrics,
doSensors: doSensors,
doThermalMetrics: doThermalMetrics,
skipProcessorMetricsURL: make(map[string]bool),
readSensorURLs: map[string][]string{},
gofish: gofish.ClientConfig{
Username: username,
Password: password,
Endpoint: endpoint,
HTTPClient: httpClient,
},
mp: p,
})
}
@@ -1027,7 +795,6 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
r.config.fanout = numClients
}
// Check that at least on client config exists
if numClients == 0 {
err := fmt.Errorf("at least one client config is required")
cclog.ComponentError(r.name, err)
@@ -1039,7 +806,7 @@ func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
for i := range r.config.ClientConfigs {
host := r.config.ClientConfigs[i].Hostname
if isDuplicate[host] {
err := fmt.Errorf("found duplicate client config for host %s", host)
err := fmt.Errorf("Found duplicate client config for host %s", host)
cclog.ComponentError(r.name, err)
return nil, err
}

View File

@@ -8,26 +8,24 @@ The Redfish receiver uses the [Redfish (specification)](https://www.dmtf.org/sta
{
"<redfish receiver name>": {
"type": "redfish",
"username": "<Username>",
"password": "<Password>",
"username": "<user A>",
"password": "<password A>",
"endpoint": "https://%h-bmc",
"exclude_metrics": [ "min_consumed_watts" ],
"client_config": [
{
"host_list": "n[1,2-4]"
"host_list": [ "<host 1>", "<host 2>" ]
},
{
"host_list": "n5",
"disable_power_metrics": true,
"disable_processor_metrics": true,
"disable_thermal_metrics": true
"host_list": [ "<host 3>", "<host 4>" ]
"disable_power_metrics": true
},
{
"host_list": "n6" ],
"username": "<Username 2>",
"password": "<Password 2>",
"host_list": [ "<host 5>" ],
"username": "<user B>",
"password": "<password B>",
"endpoint": "https://%h-BMC",
"disable_sensor_metrics": true
"disable_thermal_metrics": true
}
]
}
@@ -43,18 +41,9 @@ Global settings:
Global and per redfish device settings (per redfish device settings overwrite the global settings):
- `disable_power_metrics`:
disable collection of power metrics
(`/redfish/v1/Chassis/{ChassisId}/Power`)
- `disable_processor_metrics`:
disable collection of processor metrics
(`/redfish/v1/Systems/{ComputerSystemId}/Processors/{ProcessorId}/ProcessorMetrics`)
- `disable_sensors`:
disable collection of fan, power and thermal sensor metrics
(`/redfish/v1/Chassis/{ChassisId}/Sensors/{SensorId}`)
- `disable_thermal_metrics`:
disable collection of thermal metrics
(`/redfish/v1/Chassis/{ChassisId}/Thermal`)
- `disable_power_metrics`: disable collection of power metrics
- `disable_processor_metrics`: disable collection of processor metrics
- `disable_thermal_metrics`: disable collection of thermal metrics
- `exclude_metrics`: list of excluded metrics
- `username`: User name to authenticate with
- `password`: Password to use for authentication

View File

@@ -5,13 +5,11 @@ import (
"fmt"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
)
// SampleReceiver configuration: receiver type, listen address, port
// The defaultReceiverConfig contains the keys 'type' and 'process_messages'
type SampleReceiverConfig struct {
defaultReceiverConfig
Type string `json:"type"`
Addr string `json:"address"`
Port string `json:"port"`
}
@@ -21,6 +19,7 @@ type SampleReceiver struct {
config SampleReceiverConfig
// Storage for static information
meta map[string]string
// Use in case of own go routine
// done chan bool
// wg sync.WaitGroup
@@ -80,19 +79,8 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
r.name = fmt.Sprintf("SampleReceiver(%s)", name)
// create new message processor
p, err := mp.NewMessageProcessor()
if err != nil {
cclog.ComponentError(r.name, "Initialization of message processor failed:", err.Error())
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
r.mp = p
// Set static information
err = r.mp.AddAddMetaByCondition("true", "source", r.name)
if err != nil {
cclog.ComponentError(r.name, fmt.Sprintf("Failed to add static information source=%s:", r.name), err.Error())
return nil, fmt.Errorf("failed to add static information source=%s: %v", r.name, err.Error())
}
r.meta = map[string]string{"source": r.name}
// Set defaults in r.config
// Allow overwriting these defaults by reading config JSON
@@ -106,15 +94,6 @@ func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
}
}
// Add message processor config
if len(r.config.MessageProcessor) > 0 {
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
if err != nil {
cclog.ComponentError(r.name, "Failed parsing JSON for message processor:", err.Error())
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Check that all required fields in the configuration are set
// Use 'if len(r.config.Option) > 0' for strings

View File

@@ -1,23 +1,22 @@
{
"process_messages" : {
"add_tag_if": [
{
"key" : "cluster",
"value" : "testcluster",
"if" : "true"
},
{
"key" : "test",
"value" : "testing",
"if" : "name == 'temp_package_id_0'"
}
],
"delete_tag_if": [
{
"key" : "unit",
"if" : "true"
}
]
},
"add_tags" : [
{
"key" : "cluster",
"value" : "testcluster",
"if" : "*"
},
{
"key" : "test",
"value" : "testing",
"if" : "name == 'temp_package_id_0'"
}
],
"delete_tags" : [
{
"key" : "unit",
"value" : "*",
"if" : "*"
}
],
"interval_timestamp" : true
}

View File

@@ -25,7 +25,7 @@ CC_USER=clustercockpit
CC_GROUP=clustercockpit
CONF_DIR=/etc/cc-metric-collector
PID_FILE=/var/run/$NAME.pid
DAEMON=/usr/bin/$NAME
DAEMON=/usr/sbin/$NAME
CONF_FILE=${CONF_DIR}/cc-metric-collector.json
umask 0027

View File

@@ -3,6 +3,7 @@ Description=ClusterCockpit metric collector
Documentation=https://github.com/ClusterCockpit/cc-metric-collector
Wants=network-online.target
After=network-online.target
After=postgresql.service mariadb.service mysql.service
[Service]
EnvironmentFile=/etc/default/cc-metric-collector
@@ -13,7 +14,7 @@ Restart=on-failure
WorkingDirectory=/tmp
RuntimeDirectory=cc-metric-collector
RuntimeDirectoryMode=0750
ExecStart=/usr/bin/cc-metric-collector --config=${CONF_FILE}
ExecStart=/usr/sbin/cc-metric-collector --config=${CONF_FILE}
LimitNOFILE=10000
TimeoutStopSec=20
UMask=0027

View File

@@ -10,8 +10,6 @@ BuildRequires: go-toolset
BuildRequires: systemd-rpm-macros
# for header downloads
BuildRequires: wget
# Recommended when using the sysusers_create_package macro
Requires(pre): /usr/bin/systemd-sysusers
Provides: %{name} = %{version}
@@ -29,7 +27,7 @@ make
%install
install -Dpm 0750 %{name} %{buildroot}%{_bindir}/%{name}
install -Dpm 0750 %{name} %{buildroot}%{_sbindir}/%{name}
install -Dpm 0600 config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json
install -Dpm 0600 collectors.json %{buildroot}%{_sysconfdir}/%{name}/collectors.json
install -Dpm 0600 sinks.json %{buildroot}%{_sysconfdir}/%{name}/sinks.json
@@ -44,7 +42,7 @@ install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.c
# go test should be here... :)
%pre
%sysusers_create_package %{name} scripts/%{name}.sysusers
%sysusers_create_package scripts/%{name}.sysusers
%post
%systemd_post %{name}.service
@@ -54,7 +52,7 @@ install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.c
%files
# Binary
%attr(-,clustercockpit,clustercockpit) %{_bindir}/%{name}
%attr(-,clustercockpit,clustercockpit) %{_sbindir}/%{name}
# Config
%dir %{_sysconfdir}/%{name}
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json

View File

@@ -45,7 +45,7 @@ def group_to_json(groupfile):
if "PWR" in calc:
scope = "socket"
m = {"name" : metric, "calc": calc, "type" : scope, "publish" : True}
m = {"name" : metric, "calc": calc, "scope" : scope, "publish" : True}
metrics.append(m)
return {"events" : events, "metrics" : metrics}

View File

@@ -17,7 +17,7 @@ This folder contains the SinkManager and sink implementations for the cc-metric-
The configuration file for the sinks is a list of configurations. The `type` field in each specifies which sink to initialize.
```json
{
[
"mystdout" : {
"type" : "stdout",
"meta_as_tags" : [
@@ -31,7 +31,7 @@ The configuration file for the sinks is a list of configurations. The `type` fie
"database" : "ccmetric",
"password" : "<jwt token>"
}
}
]
```

View File

@@ -4,10 +4,10 @@ import (
"fmt"
"strings"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
func GangliaMetricName(point lp.CCMessage) string {
func GangliaMetricName(point lp.CCMetric) string {
name := point.Name()
metricType, typeOK := point.GetTag("type")
metricTid, tidOk := point.GetTag("type-id")
@@ -39,7 +39,7 @@ func GangliaMetricRename(name string) string {
return name
}
func GangliaSlopeType(point lp.CCMessage) uint {
func GangliaSlopeType(point lp.CCMetric) uint {
name := point.Name()
if name == "mem_total" || name == "swap_total" {
return 0
@@ -151,7 +151,7 @@ type GangliaMetricConfig struct {
Name string
}
func GetCommonGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
func GetCommonGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
mname := GangliaMetricRename(point.Name())
if oldname, ok := point.GetMeta("oldname"); ok {
mname = GangliaMetricRename(oldname)
@@ -207,7 +207,7 @@ func GetCommonGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
}
}
func GetGangliaConfig(point lp.CCMessage) GangliaMetricConfig {
func GetGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
mname := GangliaMetricRename(point.Name())
if oldname, ok := point.GetMeta("oldname"); ok {
mname = GangliaMetricRename(oldname)

View File

@@ -1,7 +1,6 @@
package sinks
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -10,9 +9,8 @@ import (
// "time"
"os/exec"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const GMETRIC_EXEC = `gmetric`
@@ -36,53 +34,50 @@ type GangliaSink struct {
config GangliaSinkConfig
}
func (s *GangliaSink) Write(msg lp.CCMessage) error {
func (s *GangliaSink) Write(point lp.CCMetric) error {
var err error = nil
//var tagsstr []string
var argstr []string
point, err := s.mp.ProcessMessage(msg)
if err == nil && point != nil {
// Get metric config (type, value, ... in suitable format)
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddGangliaGroup {
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
}
if s.config.AddUnits && len(conf.Unit) > 0 {
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
}
if len(s.config.ClusterName) > 0 {
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
}
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
// }
if len(s.gmetric_config) > 0 {
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
}
if s.config.AddTypeToName {
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
} else {
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
}
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
command := exec.Command(s.gmetric_path, argstr...)
command.Wait()
_, err = command.Output()
// Get metric config (type, value, ... in suitable format)
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddGangliaGroup {
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
}
if s.config.AddUnits && len(conf.Unit) > 0 {
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
}
if len(s.config.ClusterName) > 0 {
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
}
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
// }
if len(s.gmetric_config) > 0 {
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
}
if s.config.AddTypeToName {
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
} else {
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
}
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
command := exec.Command(s.gmetric_path, argstr...)
command.Wait()
_, err = command.Output()
return err
}
@@ -99,22 +94,14 @@ func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
s.config.AddTagsAsDesc = false
s.config.AddGangliaGroup = false
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
s.gmetric_path = ""
s.gmetric_config = ""
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.GmetricPath) > 0 {
p, err := exec.LookPath(s.config.GmetricPath)
if err == nil {
@@ -133,15 +120,5 @@ func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
if len(s.config.GmetricConfig) > 0 {
s.gmetric_config = s.config.GmetricConfig
}
if len(s.config.MessageProcessor) > 0 {
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
}
return s, nil
}

View File

@@ -8,18 +8,14 @@ The `ganglia` sink uses the `gmetric` tool of the [Ganglia Monitoring System](ht
{
"<name>": {
"type": "ganglia",
"meta_as_tags" : true,
"gmetric_path" : "/path/to/gmetric",
"add_ganglia_group" : true,
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"add_ganglia_group" : true
}
}
```
- `type`: makes the sink an `ganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmetric_path`: Path to `gmetric` executable (optional). If not given, the sink searches in `$PATH` for `gmetric`.
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.

View File

@@ -9,143 +9,81 @@ import (
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
"golang.org/x/exp/slices"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influx "github.com/influxdata/line-protocol"
)
type HttpSinkConfig struct {
defaultSinkConfig
// The full URL of the endpoint
URL string `json:"url"`
// JSON web tokens for authentication (Using the *Bearer* scheme)
JWT string `json:"jwt,omitempty"`
// Basic authentication
Username string `json:"username"`
Password string `json:"password"`
useBasicAuth bool
// time limit for requests made by the http client
Timeout string `json:"timeout,omitempty"`
timeout time.Duration
// Maximum amount of time an idle (keep-alive) connection will remain idle before closing itself
// should be larger than the measurement interval to keep the connection open
URL string `json:"url,omitempty"`
JWT string `json:"jwt,omitempty"`
Timeout string `json:"timeout,omitempty"`
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
idleConnTimeout time.Duration
// Batch all writes arriving in during this duration
// (default '5s', batching can be disabled by setting it to 0)
FlushDelay string `json:"flush_delay,omitempty"`
flushDelay time.Duration
// Maximum number of retries to connect to the http server (default: 3)
MaxRetries int `json:"max_retries,omitempty"`
// Timestamp precision
Precision string `json:"precision,omitempty"`
FlushDelay string `json:"flush_delay,omitempty"`
MaxRetries int `json:"max_retries,omitempty"`
}
type HttpSink struct {
sink
client *http.Client
// influx line protocol encoder
encoder influx.Encoder
// Flush() runs in another goroutine and accesses the influx line protocol encoder,
// so this encoderLock has to protect the encoder
encoderLock sync.Mutex
// timer to run Flush()
flushTimer *time.Timer
// Lock to assure that only one timer is running at a time
timerLock sync.Mutex
config HttpSinkConfig
client *http.Client
encoder *influx.Encoder
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
buffer *bytes.Buffer
flushTimer *time.Timer
config HttpSinkConfig
idleConnTimeout time.Duration
timeout time.Duration
flushDelay time.Duration
}
// Write sends metric m as http message
func (s *HttpSink) Write(msg lp.CCMessage) error {
// submit m only after applying processing/dropping rules
m, err := s.mp.ProcessMessage(msg)
if err == nil && m != nil {
// Lock for encoder usage
s.encoderLock.Lock()
err = EncoderAdd(&s.encoder, m)
// Unlock encoder usage
s.encoderLock.Unlock()
// Check that encoding worked
if err != nil {
return fmt.Errorf("encoding failed: %v", err)
}
func (s *HttpSink) Write(m lp.CCMetric) error {
p := m.ToPoint(s.meta_as_tags)
s.lock.Lock()
firstWriteOfBatch := s.buffer.Len() == 0
_, err := s.encoder.Encode(p)
s.lock.Unlock()
if err != nil {
cclog.ComponentError(s.name, "encoding failed:", err.Error())
return err
}
if s.config.flushDelay == 0 {
// Directly flush if no flush delay is configured
if s.flushDelay == 0 {
return s.Flush()
} else if s.timerLock.TryLock() {
}
// Setup flush timer when flush delay is configured
// and no other timer is already running
if s.flushTimer != nil {
// Restarting existing flush timer
cclog.ComponentDebug(s.name, "Write(): Restarting flush timer")
s.flushTimer.Reset(s.config.flushDelay)
if firstWriteOfBatch {
if s.flushTimer == nil {
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "flush failed:", err.Error())
}
})
} else {
// Creating and starting flush timer
cclog.ComponentDebug(s.name, "Write(): Starting new flush timer")
s.flushTimer = time.AfterFunc(
s.config.flushDelay,
func() {
defer s.timerLock.Unlock()
cclog.ComponentDebug(s.name, "Starting flush triggered by flush timer")
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Flush triggered by flush timer: flush failed:", err)
}
})
s.flushTimer.Reset(s.flushDelay)
}
}
return nil
}
// Flush sends all metrics stored in encoder to HTTP server
func (s *HttpSink) Flush() error {
// Lock for encoder usage
// Own lock for as short as possible: the time it takes to clone the buffer.
s.encoderLock.Lock()
buf := slices.Clone(s.encoder.Bytes())
s.encoder.Reset()
// Unlock encoder usage
s.encoderLock.Unlock()
// Own lock for as short as possible: the time it takes to copy the buffer.
s.lock.Lock()
buf := make([]byte, s.buffer.Len())
copy(buf, s.buffer.Bytes())
s.buffer.Reset()
s.lock.Unlock()
if len(buf) == 0 {
return nil
}
cclog.ComponentDebug(s.name, "Flush(): Flushing")
var res *http.Response
for i := 0; i < s.config.MaxRetries; i++ {
// Create new request to send buffer
req, err := http.NewRequest(http.MethodPost, s.config.URL, bytes.NewReader(buf))
if err != nil {
cclog.ComponentError(s.name, "Flush(): Failed to create HTTP request:", err)
cclog.ComponentError(s.name, "failed to create request:", err.Error())
return err
}
@@ -154,15 +92,10 @@ func (s *HttpSink) Flush() error {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
}
// Set basic authentication
if s.config.useBasicAuth {
req.SetBasicAuth(s.config.Username, s.config.Password)
}
// Do request
res, err = s.client.Do(req)
if err != nil {
cclog.ComponentError(s.name, "Flush(): transport/tcp error:", err)
cclog.ComponentError(s.name, "transport/tcp error:", err.Error())
// Wait between retries
time.Sleep(time.Duration(i+1) * (time.Second / 2))
continue
@@ -178,7 +111,7 @@ func (s *HttpSink) Flush() error {
// Handle application errors
if res.StatusCode != http.StatusOK {
err := errors.New(res.Status)
cclog.ComponentError(s.name, "Flush(): Application error:", err)
cclog.ComponentError(s.name, "application error:", err.Error())
return err
}
@@ -186,121 +119,62 @@ func (s *HttpSink) Flush() error {
}
func (s *HttpSink) Close() {
cclog.ComponentDebug(s.name, "Closing HTTP connection")
// Stop existing timer and immediately flush
if s.flushTimer != nil {
if ok := s.flushTimer.Stop(); ok {
s.timerLock.Unlock()
}
}
// Flush
s.flushTimer.Stop()
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Close(): Flush failed:", err)
cclog.ComponentError(s.name, "flush failed:", err.Error())
}
s.client.CloseIdleConnections()
}
// NewHttpSink creates a new http sink
func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
s := new(HttpSink)
// Set default values
s.name = fmt.Sprintf("HttpSink(%s)", name)
// should be larger than the measurement interval to keep the connection open
s.config.IdleConnTimeout = "120s"
s.config.IdleConnTimeout = "120s" // should be larger than the measurement interval.
s.config.Timeout = "5s"
s.config.FlushDelay = "5s"
s.config.MaxRetries = 3
s.config.Precision = "s"
cclog.ComponentDebug(s.name, "Init()")
// Read config
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
if len(s.config.URL) == 0 {
return nil, errors.New("`url` config option is required for HTTP sink")
}
// Check basic authentication config
if len(s.config.Username) > 0 || len(s.config.Password) > 0 {
s.config.useBasicAuth = true
}
if s.config.useBasicAuth && len(s.config.Username) == 0 {
return nil, errors.New("basic authentication requires username")
}
if s.config.useBasicAuth && len(s.config.Password) == 0 {
return nil, errors.New("basic authentication requires password")
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.IdleConnTimeout) > 0 {
t, err := time.ParseDuration(s.config.IdleConnTimeout)
if err == nil {
cclog.ComponentDebug(s.name, "Init(): idleConnTimeout", t)
s.config.idleConnTimeout = t
s.idleConnTimeout = t
}
}
if len(s.config.Timeout) > 0 {
t, err := time.ParseDuration(s.config.Timeout)
if err == nil {
s.config.timeout = t
cclog.ComponentDebug(s.name, "Init(): timeout", t)
s.timeout = t
}
}
if len(s.config.FlushDelay) > 0 {
t, err := time.ParseDuration(s.config.FlushDelay)
if err == nil {
s.config.flushDelay = t
cclog.ComponentDebug(s.name, "Init(): flushDelay", t)
}
}
if len(s.config.MessageProcessor) > 0 {
err = p.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
s.flushDelay = t
}
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
s.meta_as_tags[k] = true
}
precision := influx.Second
if len(s.config.Precision) > 0 {
switch s.config.Precision {
case "s":
precision = influx.Second
case "ms":
precision = influx.Millisecond
case "us":
precision = influx.Microsecond
case "ns":
precision = influx.Nanosecond
}
tr := &http.Transport{
MaxIdleConns: 1, // We will only ever talk to one host.
IdleConnTimeout: s.idleConnTimeout,
}
// Create http client
s.client = &http.Client{
Transport: &http.Transport{
MaxIdleConns: 1, // We will only ever talk to one host.
IdleConnTimeout: s.config.idleConnTimeout,
},
Timeout: s.config.timeout,
}
// Configure influx line protocol encoder
s.encoder.SetPrecision(precision)
s.client = &http.Client{Transport: tr, Timeout: s.timeout}
s.buffer = &bytes.Buffer{}
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
return s, nil
}

View File

@@ -8,37 +8,22 @@ The `http` sink uses POST requests to a HTTP server to submit the metrics in the
{
"<name>": {
"type": "http",
"meta_as_tags" : true,
"url" : "https://my-monitoring.example.com:1234/api/write",
"jwt" : "blabla.blabla.blabla",
"username": "myUser",
"password": "myPW",
"timeout": "5s",
"max_idle_connections" : 10,
"idle_connection_timeout" : "5s",
"flush_delay": "2s",
"batch_size": 1000,
"precision": "s",
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
}
}
```
- `type`: makes the sink an `http` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `url`: The full URL of the endpoint
- `jwt`: JSON web tokens for authentication (Using the *Bearer* scheme)
- `username`: username for basic authentication
- `password`: password for basic authentication
- `jwt`: JSON web tokens for authentification (Using the *Bearer* scheme)
- `timeout`: General timeout for the HTTP client (default '5s')
- `max_retries`: Maximum number of retries to connect to the http server
- `idle_connection_timeout`: Timeout for idle connections (default '120s'). Should be larger than the measurement interval to keep the connection open
- `max_idle_connections`: Maximally idle connections (default 10)
- `idle_connection_timeout`: Timeout for idle connections (default '5s')
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
### Using `http` sink for communication with cc-metric-store
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.

View File

@@ -1,7 +1,6 @@
package sinks
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
@@ -10,9 +9,8 @@ import (
"strings"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
influxdb2ApiHttp "github.com/influxdata/influxdb-client-go/v2/api/http"
@@ -37,8 +35,6 @@ type InfluxAsyncSinkConfig struct {
InfluxMaxRetryTime string `json:"max_retry_time,omitempty"`
CustomFlushInterval string `json:"custom_flush_interval,omitempty"`
MaxRetryAttempts uint `json:"max_retry_attempts,omitempty"`
// Timestamp precision
Precision string `json:"precision,omitempty"`
}
type InfluxAsyncSink struct {
@@ -96,22 +92,7 @@ func (s *InfluxAsyncSink) connect() error {
&tls.Config{
InsecureSkipVerify: true,
},
)
precision := time.Second
if len(s.config.Precision) > 0 {
switch s.config.Precision {
case "s":
precision = time.Second
case "ms":
precision = time.Millisecond
case "us":
precision = time.Microsecond
case "ns":
precision = time.Nanosecond
}
}
clientOptions.SetPrecision(precision)
).SetPrecision(time.Second)
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
s.writeApi = s.client.WriteAPI(s.config.Organization, s.config.Database)
@@ -130,7 +111,7 @@ func (s *InfluxAsyncSink) connect() error {
return nil
}
func (s *InfluxAsyncSink) Write(m lp.CCMessage) error {
func (s *InfluxAsyncSink) Write(m lp.CCMetric) error {
if s.customFlushInterval != 0 && s.flushTimer == nil {
// Run a batched flush for all lines that have arrived in the defined interval
s.flushTimer = time.AfterFunc(s.customFlushInterval, func() {
@@ -139,10 +120,9 @@ func (s *InfluxAsyncSink) Write(m lp.CCMessage) error {
}
})
}
msg, err := s.mp.ProcessMessage(m)
if err == nil && msg != nil {
s.writeApi.WritePoint(msg.ToPoint(nil))
}
s.writeApi.WritePoint(
m.ToPoint(s.meta_as_tags),
)
return nil
}
@@ -177,7 +157,6 @@ func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
s.config.CustomFlushInterval = ""
s.customFlushInterval = time.Duration(0)
s.config.MaxRetryAttempts = 1
s.config.Precision = "s"
// Default retry intervals (in seconds)
// 1 2
@@ -201,43 +180,27 @@ func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
// 262144 524288
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
if len(s.config.Port) == 0 {
return nil, errors.New("missing port configuration required by InfluxSink")
return nil, errors.New("Missing port configuration required by InfluxSink")
}
if len(s.config.Database) == 0 {
return nil, errors.New("missing database configuration required by InfluxSink")
return nil, errors.New("Missing database configuration required by InfluxSink")
}
if len(s.config.Organization) == 0 {
return nil, errors.New("missing organization configuration required by InfluxSink")
return nil, errors.New("Missing organization configuration required by InfluxSink")
}
if len(s.config.Password) == 0 {
return nil, errors.New("missing password configuration required by InfluxSink")
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.MessageProcessor) > 0 {
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
return nil, errors.New("Missing password configuration required by InfluxSink")
}
// Create lookup map to use meta infos as tags in the output metric
// s.meta_as_tags = make(map[string]bool)
// for _, k := range s.config.MetaAsTags {
// s.meta_as_tags[k] = true
// }
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
s.meta_as_tags[k] = true
}
toUint := func(duration string, def uint) uint {

View File

@@ -9,6 +9,7 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
{
"<name>": {
"type": "influxasync",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
@@ -19,18 +20,14 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
"batch_size": 200,
"retry_interval" : "1s",
"retry_exponential_base" : 2,
"precision": "s",
"max_retries": 20,
"max_retry_time" : "168h",
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"max_retry_time" : "168h"
}
}
```
- `type`: makes the sink an `influxdb` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Portnumber (as string) of the InfluxDB database server
@@ -43,12 +40,5 @@ The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
- `max_retries`: Maximal number of retry attempts
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)
### Using `influxasync` sink for communication with cc-metric-store
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)

View File

@@ -1,7 +1,6 @@
package sinks
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
@@ -10,13 +9,11 @@ import (
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
"golang.org/x/exp/slices"
"github.com/influxdata/influxdb-client-go/v2/api/write"
)
type InfluxSink struct {
@@ -35,51 +32,20 @@ type InfluxSink struct {
// Maximum number of points sent to server in single request.
// Default: 1000
BatchSize int `json:"batch_size,omitempty"`
// Time interval for delayed sending of metrics.
// If the buffers are already filled before the end of this interval,
// the metrics are sent without further delay.
// Default: 1s
FlushInterval string `json:"flush_delay,omitempty"`
flushDelay time.Duration
// Influx client options:
// HTTP request timeout
HTTPRequestTimeout string `json:"http_request_timeout"`
// Retry interval
InfluxRetryInterval string `json:"retry_interval,omitempty"`
// maximum delay between each retry attempt
InfluxMaxRetryInterval string `json:"max_retry_interval,omitempty"`
// base for the exponential retry delay
InfluxExponentialBase uint `json:"retry_exponential_base,omitempty"`
// maximum count of retry attempts of failed writes
InfluxMaxRetries uint `json:"max_retries,omitempty"`
// maximum total retry timeout
InfluxMaxRetryTime string `json:"max_retry_time,omitempty"`
// Specify whether to use GZip compression in write requests
InfluxUseGzip bool `json:"use_gzip"`
// Timestamp precision
Precision string `json:"precision,omitempty"`
// Number of metrics that are dropped when buffer is full
// Default: 100
DropRate int `json:"drop_rate,omitempty"`
}
// influx line protocol encoder
encoder influx.Encoder
// number of records stored in the encoder
numRecordsInEncoder int
// List of tags and meta data tags which should be used as tags
extended_tag_list []key_value_pair
// Flush() runs in another goroutine and accesses the influx line protocol encoder,
// so this encoderLock has to protect the encoder and numRecordsInEncoder
encoderLock sync.Mutex
// timer to run Flush()
flushTimer *time.Timer
// Lock to assure that only one timer is running at a time
timerLock sync.Mutex
// WaitGroup to ensure only one send operation is running at a time
sendWaitGroup sync.WaitGroup
batch []*write.Point
flushTimer *time.Timer
flushDelay time.Duration
batchMutex sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
flushTimerMutex sync.Mutex // Ensure only one flush timer is running
}
// connect connects to the InfluxDB server
@@ -104,7 +70,7 @@ func (s *InfluxSink) connect() error {
} else {
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
}
cclog.ComponentDebug(s.name, "connect():",
cclog.ComponentDebug(s.name,
"Using URI='"+uri+"'",
"Org='"+s.config.Organization+"'",
"Bucket='"+s.config.Database+"'")
@@ -112,95 +78,6 @@ func (s *InfluxSink) connect() error {
// Set influxDB client options
clientOptions := influxdb2.DefaultOptions()
// set HTTP request timeout
if len(s.config.HTTPRequestTimeout) > 0 {
if t, err := time.ParseDuration(s.config.HTTPRequestTimeout); err == nil {
httpRequestTimeout := uint(t.Seconds())
clientOptions.SetHTTPRequestTimeout(httpRequestTimeout)
} else {
cclog.ComponentError(s.name, "connect():", "Failed to parse duration for HTTP RequestTimeout: ", s.config.HTTPRequestTimeout)
}
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options HTTPRequestTimeout:",
time.Second*time.Duration(clientOptions.HTTPRequestTimeout()))
// Set retry interval
if len(s.config.InfluxRetryInterval) > 0 {
if t, err := time.ParseDuration(s.config.InfluxRetryInterval); err == nil {
influxRetryInterval := uint(t.Milliseconds())
clientOptions.SetRetryInterval(influxRetryInterval)
} else {
cclog.ComponentError(s.name, "connect():", "Failed to parse duration for Influx RetryInterval: ", s.config.InfluxRetryInterval)
}
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options RetryInterval:",
time.Millisecond*time.Duration(clientOptions.RetryInterval()))
// Set the maximum delay between each retry attempt
if len(s.config.InfluxMaxRetryInterval) > 0 {
if t, err := time.ParseDuration(s.config.InfluxMaxRetryInterval); err == nil {
influxMaxRetryInterval := uint(t.Milliseconds())
clientOptions.SetMaxRetryInterval(influxMaxRetryInterval)
} else {
cclog.ComponentError(s.name, "connect():", "Failed to parse duration for Influx MaxRetryInterval: ", s.config.InfluxMaxRetryInterval)
}
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options MaxRetryInterval:",
time.Millisecond*time.Duration(clientOptions.MaxRetryInterval()))
// Set the base for the exponential retry delay
if s.config.InfluxExponentialBase != 0 {
clientOptions.SetExponentialBase(s.config.InfluxExponentialBase)
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options ExponentialBase:",
clientOptions.ExponentialBase())
// Set maximum count of retry attempts of failed writes
if s.config.InfluxMaxRetries != 0 {
clientOptions.SetMaxRetries(s.config.InfluxMaxRetries)
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options MaxRetries:",
clientOptions.MaxRetries())
// Set the maximum total retry timeout
if len(s.config.InfluxMaxRetryTime) > 0 {
if t, err := time.ParseDuration(s.config.InfluxMaxRetryTime); err == nil {
influxMaxRetryTime := uint(t.Milliseconds())
cclog.ComponentDebug(s.name, "connect():", "MaxRetryTime", s.config.InfluxMaxRetryTime)
clientOptions.SetMaxRetryTime(influxMaxRetryTime)
} else {
cclog.ComponentError(s.name, "connect():", "Failed to parse duration for Influx MaxRetryInterval: ", s.config.InfluxMaxRetryInterval)
}
}
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options MaxRetryTime:",
time.Millisecond*time.Duration(clientOptions.MaxRetryTime()))
// Specify whether to use GZip compression in write requests
clientOptions.SetUseGZip(s.config.InfluxUseGzip)
cclog.ComponentDebug(
s.name,
"connect():",
"Influx client options UseGZip:",
clientOptions.UseGZip())
// Do not check InfluxDB certificate
clientOptions.SetTLSConfig(
&tls.Config{
@@ -208,21 +85,7 @@ func (s *InfluxSink) connect() error {
},
)
// Set time precision
precision := time.Second
if len(s.config.Precision) > 0 {
switch s.config.Precision {
case "s":
precision = time.Second
case "ms":
precision = time.Millisecond
case "us":
precision = time.Microsecond
case "ns":
precision = time.Nanosecond
}
}
clientOptions.SetPrecision(precision)
clientOptions.SetPrecision(time.Second)
// Create new writeAPI
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
@@ -239,192 +102,95 @@ func (s *InfluxSink) connect() error {
return nil
}
// Write sends metric m in influxDB line protocol
func (s *InfluxSink) Write(msg lp.CCMessage) error {
func (s *InfluxSink) Write(m lp.CCMetric) error {
m, err := s.mp.ProcessMessage(msg)
if err == nil && m != nil {
// Lock for encoder usage
s.encoderLock.Lock()
// Encode measurement name
s.encoder.StartLine(m.Name())
// copy tags and meta data which should be used as tags
s.extended_tag_list = s.extended_tag_list[:0]
for key, value := range m.Tags() {
s.extended_tag_list =
append(
s.extended_tag_list,
key_value_pair{
key: key,
value: value,
},
)
}
// for _, key := range s.config.MetaAsTags {
// if value, ok := m.GetMeta(key); ok {
// s.extended_tag_list =
// append(
// s.extended_tag_list,
// key_value_pair{
// key: key,
// value: value,
// },
// )
// }
// }
// Encode tags (they musts be in lexical order)
slices.SortFunc(
s.extended_tag_list,
func(a key_value_pair, b key_value_pair) int {
if a.key < b.key {
return -1
if s.flushDelay != 0 && s.flushTimerMutex.TryLock() {
// Run a batched flush for all metrics that arrived in the last flush delay interval
cclog.ComponentDebug(s.name, "Starting new flush timer")
s.flushTimer = time.AfterFunc(
s.flushDelay,
func() {
defer s.flushTimerMutex.Unlock()
cclog.ComponentDebug(s.name, "Starting flush in flush timer")
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Flush timer: flush failed:", err)
}
if a.key > b.key {
return +1
}
return 0
},
)
for i := range s.extended_tag_list {
s.encoder.AddTag(
s.extended_tag_list[i].key,
s.extended_tag_list[i].value,
)
}
// Encode fields
for key, value := range m.Fields() {
s.encoder.AddField(key, influx.MustNewValue(value))
}
// Encode time stamp
s.encoder.EndLine(m.Time())
// Check for encoder errors
if err := s.encoder.Err(); err != nil {
// Unlock encoder usage
s.encoderLock.Unlock()
return fmt.Errorf("encoding failed: %v", err)
}
s.numRecordsInEncoder++
})
}
if s.config.flushDelay == 0 {
// Unlock encoder usage
s.encoderLock.Unlock()
// Lock access to batch slice
s.batchMutex.Lock()
// Directly flush if no flush delay is configured
return s.Flush()
} else if s.numRecordsInEncoder == s.config.BatchSize {
// Unlock encoder usage
s.encoderLock.Unlock()
// batch slice full, dropping oldest metric(s)
// e.g. when previous flushes failed and batch slice was not cleared
if len(s.batch) == s.config.BatchSize {
newSize := s.config.BatchSize - s.config.DropRate
// Stop flush timer
if s.flushTimer != nil {
if ok := s.flushTimer.Stop(); ok {
cclog.ComponentDebug(s.name, "Write(): Stopped flush timer. Batch size limit reached before flush delay")
s.timerLock.Unlock()
}
for i := 0; i < newSize; i++ {
s.batch[i] = s.batch[i+s.config.DropRate]
}
// Flush if batch size is reached
return s.Flush()
} else if s.timerLock.TryLock() {
// Setup flush timer when flush delay is configured
// and no other timer is already running
if s.flushTimer != nil {
// Restarting existing flush timer
cclog.ComponentDebug(s.name, "Write(): Restarting flush timer")
s.flushTimer.Reset(s.config.flushDelay)
} else {
// Creating and starting flush timer
cclog.ComponentDebug(s.name, "Write(): Starting new flush timer")
s.flushTimer = time.AfterFunc(
s.config.flushDelay,
func() {
defer s.timerLock.Unlock()
cclog.ComponentDebug(s.name, "Starting flush triggered by flush timer")
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Flush triggered by flush timer: flush failed:", err)
}
})
for i := newSize; i < s.config.BatchSize; i++ {
s.batch[i] = nil
}
s.batch = s.batch[:newSize]
cclog.ComponentError(s.name, "Batch slice full, dropping", s.config.DropRate, "oldest metric(s)")
}
// Unlock encoder usage
s.encoderLock.Unlock()
// Append metric to batch slice
p := m.ToPoint(s.meta_as_tags)
s.batch = append(s.batch, p)
// Flush synchronously if "flush_delay" is zero
// or
// Flush if batch size is reached
if s.flushDelay == 0 ||
len(s.batch) == s.config.BatchSize {
// Unlock access to batch slice
s.batchMutex.Unlock()
return s.Flush()
}
// Unlock access to batch slice
s.batchMutex.Unlock()
return nil
}
// Flush sends all metrics stored in encoder to InfluxDB server
// Flush sends all metrics buffered in batch slice to InfluxDB server
func (s *InfluxSink) Flush() error {
cclog.ComponentDebug(s.name, "Flushing")
// Lock for encoder usage
// Own lock for as short as possible: the time it takes to clone the buffer.
s.encoderLock.Lock()
// Lock access to batch slice
s.batchMutex.Lock()
defer s.batchMutex.Unlock()
buf := slices.Clone(s.encoder.Bytes())
numRecordsInBuf := s.numRecordsInEncoder
s.encoder.Reset()
s.numRecordsInEncoder = 0
// Unlock encoder usage
s.encoderLock.Unlock()
if len(buf) == 0 {
// Nothing to do, batch slice is empty
if len(s.batch) == 0 {
return nil
}
cclog.ComponentDebug(s.name, "Flush(): Flushing", numRecordsInBuf, "metrics")
// Send metrics from batch slice
err := s.writeApi.WritePoint(context.Background(), s.batch...)
if err != nil {
cclog.ComponentError(s.name, "Flush(): Flush of", len(s.batch), "metrics failed:", err)
return err
}
// Asynchron send of encoder metrics
s.sendWaitGroup.Add(1)
go func() {
defer s.sendWaitGroup.Done()
startTime := time.Now()
err := s.writeApi.WriteRecord(context.Background(), string(buf))
if err != nil {
cclog.ComponentError(
s.name,
"Flush():",
"Flush failed:", err,
"(number of records =", numRecordsInBuf,
", buffer size =", len(buf),
", send duration =", time.Since(startTime),
")",
)
return
}
}()
// Clear batch slice
for i := range s.batch {
s.batch[i] = nil
}
s.batch = s.batch[:0]
return nil
}
func (s *InfluxSink) Close() {
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
// Stop existing timer and immediately flush
if s.flushTimer != nil {
if ok := s.flushTimer.Stop(); ok {
s.timerLock.Unlock()
}
}
// Flush
s.flushTimer.Stop()
s.Flush()
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Close():", "Flush failed:", err)
cclog.ComponentError(s.name, "Close(): Flush failed:", err)
}
// Wait for send operations to finish
s.sendWaitGroup.Wait()
s.client.Close()
}
@@ -436,69 +202,64 @@ func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
// Set config default values
s.config.BatchSize = 1000
s.config.FlushInterval = "1s"
s.config.Precision = "s"
s.config.DropRate = 100
// Read config
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
return nil, err
err := json.Unmarshal(config, &s.config)
if err != nil {
return s, err
}
}
if len(s.config.Host) == 0 {
return s, errors.New("missing host configuration required by InfluxSink")
return s, errors.New("Missing host configuration required by InfluxSink")
}
if len(s.config.Port) == 0 {
return s, errors.New("missing port configuration required by InfluxSink")
return s, errors.New("Missing port configuration required by InfluxSink")
}
if len(s.config.Database) == 0 {
return s, errors.New("missing database configuration required by InfluxSink")
return s, errors.New("Missing database configuration required by InfluxSink")
}
if len(s.config.Organization) == 0 {
return s, errors.New("missing organization configuration required by InfluxSink")
return s, errors.New("Missing organization configuration required by InfluxSink")
}
if len(s.config.Password) == 0 {
return s, errors.New("missing password configuration required by InfluxSink")
return s, errors.New("Missing password configuration required by InfluxSink")
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.MessageProcessor) > 0 {
err = p.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
s.meta_as_tags[k] = true
}
// Configure flush delay duration
if len(s.config.FlushInterval) > 0 {
t, err := time.ParseDuration(s.config.FlushInterval)
if err == nil {
s.config.flushDelay = t
s.flushDelay = t
}
}
if !(s.config.BatchSize > 0) {
return s, fmt.Errorf("batch_size=%d in InfluxDB config must be > 0", s.config.BatchSize)
}
if !(s.config.DropRate > 0) {
return s, fmt.Errorf("drop_rate=%d in InfluxDB config must be > 0", s.config.DropRate)
}
if !(s.config.BatchSize > s.config.DropRate) {
return s, fmt.Errorf(
"batch_size=%d must be greater then drop_rate=%d in InfluxDB config",
s.config.BatchSize, s.config.DropRate)
}
// allocate batch slice
s.batch = make([]*write.Point, 0, s.config.BatchSize)
// Connect to InfluxDB server
if err := s.connect(); err != nil {
return s, fmt.Errorf("unable to connect: %v", err)
}
// Configure influx line protocol encoder
s.encoder.SetPrecision(influx.Nanosecond)
s.extended_tag_list = make([]key_value_pair, 0)
return s, nil
}

View File

@@ -2,12 +2,14 @@
The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
### Configuration structure
```json
{
"<name>": {
"type": "influxdb",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
@@ -16,43 +18,20 @@ The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.de
"organization": "myorg",
"ssl": true,
"flush_delay" : "1s",
"batch_size" : 1000,
"use_gzip": true,
"precision": "s",
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"batch_size" : 100
}
}
```
- `type`: makes the sink an `influxdb` sink
- `database`: All metrics are written to this bucket
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are written to this bucket
- `host`: Hostname of the InfluxDB database server
- `port`: Port number (as string) of the InfluxDB database server
- `user`: Username for basic authentication
- `password`: Password for basic authentication
- `port`: Portnumber (as string) of the InfluxDB database server
- `user`: Username for basic authentification
- `password`: Password for basic authentification
- `organization`: Organization in the InfluxDB
- `ssl`: Use SSL connection
- `flush_delay`: Group metrics coming in to a single batch
- `batch_size`: Maximal batch size. If `batch_size` is reached before the end of `flush_delay`, the metrics are sent without further delay
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
Influx client options:
=======
- `batch_size`: Maximal batch size
- `meta_as_tags`: move meta information keys to tags (optional)
- `http_request_timeout`: HTTP request timeout
- `retry_interval`: retry interval
- `max_retry_interval`: maximum delay between each retry attempt
- `retry_exponential_base`: base for the exponential retry delay
- `max_retries`: maximum count of retry attempts of failed writes
- `max_retry_time`: maximum total retry timeout
- `use_gzip`: Specify whether to use GZip compression in write requests
### Using `influxdb` sink for communication with cc-metric-store
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.

View File

@@ -66,15 +66,13 @@ void Ganglia_pool_destroy( Ganglia_pool pool );
import "C"
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"unsafe"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/NVIDIA/go-nvml/pkg/dl"
)
@@ -111,102 +109,99 @@ type LibgangliaSink struct {
cstrCache map[string]*C.char
}
func (s *LibgangliaSink) Write(msg lp.CCMessage) error {
func (s *LibgangliaSink) Write(point lp.CCMetric) error {
var err error = nil
var c_name *C.char
var c_value *C.char
var c_type *C.char
var c_unit *C.char
point, err := s.mp.ProcessMessage(msg)
if err == nil && point != nil {
// helper function for looking up C strings in the cache
lookup := func(key string) *C.char {
if _, exist := s.cstrCache[key]; !exist {
s.cstrCache[key] = C.CString(key)
}
return s.cstrCache[key]
// helper function for looking up C strings in the cache
lookup := func(key string) *C.char {
if _, exist := s.cstrCache[key]; !exist {
s.cstrCache[key] = C.CString(key)
}
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddTypeToName {
conf.Name = GangliaMetricName(point)
}
c_value = C.CString(conf.Value)
c_type = lookup(conf.Type)
c_name = lookup(conf.Name)
// Add unit
unit := ""
if s.config.AddUnits {
unit = conf.Unit
}
c_unit = lookup(unit)
// Determine the slope of the metric. Ganglia's own collector mostly use
// 'both' but the mem and swap total uses 'zero'.
slope_type := C.GANGLIA_SLOPE_BOTH
switch conf.Slope {
case "zero":
slope_type = C.GANGLIA_SLOPE_ZERO
case "both":
slope_type = C.GANGLIA_SLOPE_BOTH
}
// Create a new Ganglia metric
gmetric := C.Ganglia_metric_create(s.global_context)
// Set name, value, type and unit in the Ganglia metric
// The default slope_type is both directions, so up and down. Some metrics want 'zero' slope, probably constant.
// The 'tmax' value is by default 300.
rval := C.int(0)
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), C.uint(conf.Tmax), 0)
switch rval {
case 1:
C.free(unsafe.Pointer(c_value))
return errors.New("invalid parameters")
case 2:
C.free(unsafe.Pointer(c_value))
return errors.New("one of your parameters has an invalid character '\"'")
case 3:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the type parameter \"%s\" is not a valid type", conf.Type)
case 4:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the value parameter \"%s\" does not represent a number", conf.Value)
default:
}
// Set the cluster name, otherwise it takes it from the configuration file
if len(s.config.ClusterName) > 0 {
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
}
// Set the group metadata in the Ganglia metric if configured
if s.config.AddGangliaGroup {
c_group := lookup(conf.Group)
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
}
// Now we send the metric
// gmetric does provide some more options like description and other options
// but they are not provided by the collectors
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
if rval != 0 {
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
// fall throuph to use Ganglia_metric_destroy from common cleanup
}
// Cleanup Ganglia metric
C.Ganglia_metric_destroy(gmetric)
// Free the value C string, the only one not stored in the cache
C.free(unsafe.Pointer(c_value))
return s.cstrCache[key]
}
conf := GetCommonGangliaConfig(point)
if len(conf.Type) == 0 {
conf = GetGangliaConfig(point)
}
if len(conf.Type) == 0 {
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
}
if s.config.AddTypeToName {
conf.Name = GangliaMetricName(point)
}
c_value = C.CString(conf.Value)
c_type = lookup(conf.Type)
c_name = lookup(conf.Name)
// Add unit
unit := ""
if s.config.AddUnits {
unit = conf.Unit
}
c_unit = lookup(unit)
// Determine the slope of the metric. Ganglia's own collector mostly use
// 'both' but the mem and swap total uses 'zero'.
slope_type := C.GANGLIA_SLOPE_BOTH
switch conf.Slope {
case "zero":
slope_type = C.GANGLIA_SLOPE_ZERO
case "both":
slope_type = C.GANGLIA_SLOPE_BOTH
}
// Create a new Ganglia metric
gmetric := C.Ganglia_metric_create(s.global_context)
// Set name, value, type and unit in the Ganglia metric
// The default slope_type is both directions, so up and down. Some metrics want 'zero' slope, probably constant.
// The 'tmax' value is by default 300.
rval := C.int(0)
rval = C.Ganglia_metric_set(gmetric, c_name, c_value, c_type, c_unit, C.uint(slope_type), C.uint(conf.Tmax), 0)
switch rval {
case 1:
C.free(unsafe.Pointer(c_value))
return errors.New("invalid parameters")
case 2:
C.free(unsafe.Pointer(c_value))
return errors.New("one of your parameters has an invalid character '\"'")
case 3:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the type parameter \"%s\" is not a valid type", conf.Type)
case 4:
C.free(unsafe.Pointer(c_value))
return fmt.Errorf("the value parameter \"%s\" does not represent a number", conf.Value)
default:
}
// Set the cluster name, otherwise it takes it from the configuration file
if len(s.config.ClusterName) > 0 {
C.Ganglia_metadata_add(gmetric, lookup("CLUSTER"), lookup(s.config.ClusterName))
}
// Set the group metadata in the Ganglia metric if configured
if s.config.AddGangliaGroup {
c_group := lookup(conf.Group)
C.Ganglia_metadata_add(gmetric, lookup("GROUP"), c_group)
}
// Now we send the metric
// gmetric does provide some more options like description and other options
// but they are not provided by the collectors
rval = C.Ganglia_metric_send(gmetric, s.send_channels)
if rval != 0 {
err = fmt.Errorf("there was an error sending metric %s to %d of the send channels ", point.Name(), rval)
// fall throuph to use Ganglia_metric_destroy from common cleanup
}
// Cleanup Ganglia metric
C.Ganglia_metric_destroy(gmetric)
// Free the value C string, the only one not stored in the cache
C.free(unsafe.Pointer(c_value))
return err
}
@@ -238,27 +233,12 @@ func NewLibgangliaSink(name string, config json.RawMessage) (Sink, error) {
s.config.GmondConfig = string(GMOND_CONFIG_FILE)
s.config.GangliaLib = string(GANGLIA_LIB_NAME)
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
err = json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
return nil, err
}
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.MessageProcessor) > 0 {
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
}
lib := dl.New(s.config.GangliaLib, GANGLIA_LIB_DL_FLAGS)
if lib == nil {
return nil, fmt.Errorf("error instantiating DynamicLibrary for %s", s.config.GangliaLib)

View File

@@ -15,23 +15,18 @@ The `libganglia` sink has probably less overhead compared to the `ganglia` sink
"cluster_name": "MyCluster",
"add_ganglia_group" : true,
"add_type_to_name": true,
"add_units" : true,
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"add_units" : true
}
}
```
- `type`: makes the sink an `libganglia` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `gmond_config`: Path to the Ganglia configuration file `gmond.conf` (default: `/etc/ganglia/gmond.conf`)
- `cluster_name`: Set a cluster name for the metric. If not set, it is taken from `gmond_config`
- `add_ganglia_group`: Add a Ganglia metric group based on meta information. Some old versions of `gmetric` do not support the `--group` option
- `add_type_to_name`: Ganglia commonly uses only node-level metrics but with cc-metric-collector, there are metrics for cpus, memory domains, CPU sockets and the whole node. In order to get eeng, this option prefixes the metric name with `<type><type-id>_` or `device_` depending on the metric tags and meta information. For metrics of the whole node `type=node`, no prefix is added
- `add_units`: Add metric value unit if there is a `unit` entry in the metric tags or meta information
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
### Ganglia Installation

View File

@@ -1,88 +1,27 @@
package sinks
import (
"encoding/json"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
"golang.org/x/exp/slices"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type defaultSinkConfig struct {
MetaAsTags []string `json:"meta_as_tags,omitempty"`
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
Type string `json:"type"`
MetaAsTags []string `json:"meta_as_tags,omitempty"`
Type string `json:"type"`
}
type sink struct {
meta_as_tags map[string]bool // Use meta data tags as tags
mp mp.MessageProcessor // message processor for the sink
name string // Name of the sink
meta_as_tags map[string]bool // Use meta data tags as tags
name string // Name of the sink
}
type Sink interface {
Write(point lp.CCMessage) error // Write metric to the sink
Flush() error // Flush buffered metrics
Close() // Close / finish metric sink
Name() string // Name of the metric sink
Write(point lp.CCMetric) error // Write metric to the sink
Flush() error // Flush buffered metrics
Close() // Close / finish metric sink
Name() string // Name of the metric sink
}
// Name returns the name of the metric sink
func (s *sink) Name() string {
return s.name
}
type key_value_pair struct {
key string
value string
}
func EncoderAdd(encoder *influx.Encoder, msg lp.CCMessage) error {
// Encode measurement name
encoder.StartLine(msg.Name())
tag_list := make([]key_value_pair, 0, 10)
// copy tags and meta data which should be used as tags
for key, value := range msg.Tags() {
tag_list =
append(
tag_list,
key_value_pair{
key: key,
value: value,
},
)
}
// Encode tags (they musts be in lexical order)
slices.SortFunc(
tag_list,
func(a key_value_pair, b key_value_pair) int {
if a.key < b.key {
return -1
}
if a.key > b.key {
return +1
}
return 0
},
)
for i := range tag_list {
encoder.AddTag(
tag_list[i].key,
tag_list[i].value,
)
}
// Encode fields
for key, value := range msg.Fields() {
encoder.AddField(key, influx.MustNewValue(value))
}
// Encode time stamp
encoder.EndLine(msg.Time())
// Return encoder errors
return encoder.Err()
}

View File

@@ -5,16 +5,13 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"sync"
"time"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
influx "github.com/influxdata/line-protocol/v2/lineprotocol"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
influx "github.com/influxdata/line-protocol"
nats "github.com/nats-io/nats.go"
"golang.org/x/exp/slices"
)
type NatsSinkConfig struct {
@@ -25,21 +22,18 @@ type NatsSinkConfig struct {
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
FlushDelay string `json:"flush_delay,omitempty"`
flushDelay time.Duration
NkeyFile string `json:"nkey_file,omitempty"`
// Timestamp precision
Precision string `json:"precision,omitempty"`
}
type NatsSink struct {
sink
client *nats.Conn
encoder influx.Encoder
encoderLock sync.Mutex
config NatsSinkConfig
client *nats.Conn
encoder *influx.Encoder
buffer *bytes.Buffer
config NatsSinkConfig
lock sync.Mutex
flushDelay time.Duration
flushTimer *time.Timer
timerLock sync.Mutex
}
func (s *NatsSink) connect() error {
@@ -48,13 +42,6 @@ func (s *NatsSink) connect() error {
var nc *nats.Conn
if len(s.config.User) > 0 && len(s.config.Password) > 0 {
uinfo = nats.UserInfo(s.config.User, s.config.Password)
} else if len(s.config.NkeyFile) > 0 {
if _, err := os.Stat(s.config.NkeyFile); err == nil {
uinfo = nats.UserCredentials(s.config.NkeyFile)
} else {
cclog.ComponentError(s.name, "NKEY file", s.config.NkeyFile, "does not exist: %v", err.Error())
return err
}
}
uri := fmt.Sprintf("nats://%s:%s", s.config.Host, s.config.Port)
cclog.ComponentDebug(s.name, "Connect to", uri)
@@ -72,65 +59,33 @@ func (s *NatsSink) connect() error {
return nil
}
func (s *NatsSink) Write(m lp.CCMessage) error {
msg, err := s.mp.ProcessMessage(m)
if err == nil && msg != nil {
// Lock for encoder usage
s.encoderLock.Lock()
// Add message to encoder
err = EncoderAdd(&s.encoder, m)
// Unlock encoder usage
s.encoderLock.Unlock()
// Check that encoding worked
if err != nil {
cclog.ComponentError(s.name, "Write:", err.Error())
return err
}
func (s *NatsSink) Write(m lp.CCMetric) error {
s.lock.Lock()
_, err := s.encoder.Encode(m.ToPoint(s.meta_as_tags))
s.lock.Unlock()
if err != nil {
cclog.ComponentError(s.name, "Write:", err.Error())
return err
}
if s.config.flushDelay == 0 {
// Directly flush if no flush delay is configured
return s.Flush()
} else if s.timerLock.TryLock() {
// Setup flush timer when flush delay is configured
// and no other timer is already running
if s.flushTimer != nil {
// Restarting existing flush timer
cclog.ComponentDebug(s.name, "Write(): Restarting flush timer")
s.flushTimer.Reset(s.config.flushDelay)
} else {
// Creating and starting flush timer
cclog.ComponentDebug(s.name, "Write(): Starting new flush timer")
s.flushTimer = time.AfterFunc(
s.config.flushDelay,
func() {
defer s.timerLock.Unlock()
cclog.ComponentDebug(s.name, "Starting flush triggered by flush timer")
if err := s.Flush(); err != nil {
cclog.ComponentError(s.name, "Flush triggered by flush timer: flush failed:", err)
}
})
}
if s.flushDelay == 0 {
s.Flush()
} else if s.flushTimer == nil {
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
s.Flush()
})
} else {
s.flushTimer.Reset(s.flushDelay)
}
return nil
}
func (s *NatsSink) Flush() error {
// Lock for encoder usage
// Own lock for as short as possible: the time it takes to clone the buffer.
s.encoderLock.Lock()
buf := slices.Clone(s.encoder.Bytes())
s.encoder.Reset()
// Unlock encoder usage
s.encoderLock.Unlock()
s.lock.Lock()
buf := append([]byte{}, s.buffer.Bytes()...) // copy bytes
s.buffer.Reset()
s.lock.Unlock()
if len(buf) == 0 {
return nil
@@ -144,28 +99,18 @@ func (s *NatsSink) Flush() error {
}
func (s *NatsSink) Close() {
// Stop existing timer and immediately flush
if s.flushTimer != nil {
if ok := s.flushTimer.Stop(); ok {
s.timerLock.Unlock()
}
}
cclog.ComponentDebug(s.name, "Close NATS connection")
cclog.ComponentDebug(s.name, "Close")
s.client.Close()
}
func NewNatsSink(name string, config json.RawMessage) (Sink, error) {
s := new(NatsSink)
s.name = fmt.Sprintf("NatsSink(%s)", name)
s.config.flushDelay = 5 * time.Second
s.config.FlushDelay = "5s"
s.config.Port = "4222"
s.config.Precision = "s"
s.flushDelay = 10 * time.Second
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
@@ -174,51 +119,28 @@ func NewNatsSink(name string, config json.RawMessage) (Sink, error) {
len(s.config.Subject) == 0 {
return nil, errors.New("not all configuration variables set required by NatsSink")
}
// Create a new message processor
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
// Read config related to message processor
if len(s.config.MessageProcessor) > 0 {
err = s.mp.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Add meta_as_tags list to message processor
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
s.meta_as_tags[k] = true
}
// Setup Influx line protocol encoder
precision := influx.Second
if len(s.config.Precision) > 0 {
switch s.config.Precision {
case "s":
precision = influx.Second
case "ms":
precision = influx.Millisecond
case "us":
precision = influx.Microsecond
case "ns":
precision = influx.Nanosecond
}
}
s.encoder.SetPrecision(precision)
// Setup Influx line protocol
s.buffer = &bytes.Buffer{}
s.buffer.Grow(1025)
s.encoder = influx.NewEncoder(s.buffer)
s.encoder.SetPrecision(time.Second)
s.encoder.SetMaxLineBytes(1024)
// Setup infos for connection
if err := s.connect(); err != nil {
return nil, fmt.Errorf("unable to connect: %v", err)
}
s.flushTimer = nil
if len(s.config.FlushDelay) > 0 {
t, err := time.ParseDuration(s.config.FlushDelay)
if err == nil {
s.config.flushDelay = t
cclog.ComponentDebug(s.name, "Init(): flushDelay", t)
if len(s.config.FlushDelay) != 0 {
var err error
s.flushDelay, err = time.ParseDuration(s.config.FlushDelay)
if err != nil {
return nil, err
}
}

View File

@@ -2,40 +2,27 @@
The `nats` sink publishes all metrics into a NATS network. The publishing key is the database name provided in the configuration file
### Configuration structure
```json
{
"<name>": {
"type": "nats",
"meta_as_tags" : true,
"database" : "mymetrics",
"host": "dbhost.example.com",
"port": "4222",
"user": "exampleuser",
"password" : "examplepw",
"nkey_file": "/path/to/nkey_file",
"flush_delay": "10s",
"precision": "s",
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"password" : "examplepw"
}
}
```
- `type`: makes the sink an `nats` sink
- `meta_as_tags`: print all meta information as tags in the output (optional)
- `database`: All metrics are published with this subject
- `host`: Hostname of the NATS server
- `port`: Port number (as string) of the NATS server
- `user`: Username for basic authentication
- `password`: Password for basic authentication
- `nkey_file`: Path to credentials file with NKEY
- `flush_delay`: Maximum time until metrics are sent out (default '5s')
- `precision`: Precision of the timestamp. Valid values are 's', 'ms', 'us' and 'ns'. (default is 's')
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)
### Using `nats` sink for communication with cc-metric-store
The cc-metric-store only accepts metrics with a timestamp precision in seconds, so it is required to use `"precision": "s"`.
- `port`: Portnumber (as string) of the NATS server
- `user`: Username for basic authentification
- `password`: Password for basic authentification

View File

@@ -1,7 +1,6 @@
package sinks
import (
"bytes"
"context"
"encoding/json"
"errors"
@@ -10,9 +9,8 @@ import (
"strings"
"sync"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@@ -50,13 +48,11 @@ func intToFloat64(input interface{}) (float64, error) {
return float64(value), nil
case int64:
return float64(value), nil
case uint64:
return float64(value), nil
}
return 0, errors.New("cannot cast value to float64")
}
func getLabelValue(metric lp.CCMessage) []string {
func getLabelValue(metric lp.CCMetric) []string {
labelValues := []string{}
if tid, tidok := metric.GetTag("type-id"); tidok && metric.HasTag("type") {
labelValues = append(labelValues, tid)
@@ -69,7 +65,7 @@ func getLabelValue(metric lp.CCMessage) []string {
return labelValues
}
func getLabelNames(metric lp.CCMessage) []string {
func getLabelNames(metric lp.CCMetric) []string {
labelNames := []string{}
if t, tok := metric.GetTag("type"); tok && metric.HasTag("type-id") {
labelNames = append(labelNames, t)
@@ -82,7 +78,7 @@ func getLabelNames(metric lp.CCMessage) []string {
return labelNames
}
func (s *PrometheusSink) newMetric(metric lp.CCMessage) error {
func (s *PrometheusSink) newMetric(metric lp.CCMetric) error {
var value float64 = 0
name := metric.Name()
opts := prometheus.GaugeOpts{
@@ -120,7 +116,7 @@ func (s *PrometheusSink) newMetric(metric lp.CCMessage) error {
return nil
}
func (s *PrometheusSink) updateMetric(metric lp.CCMessage) error {
func (s *PrometheusSink) updateMetric(metric lp.CCMetric) error {
var value float64 = 0.0
name := metric.Name()
labelValues := getLabelValue(metric)
@@ -153,12 +149,8 @@ func (s *PrometheusSink) updateMetric(metric lp.CCMessage) error {
return nil
}
func (s *PrometheusSink) Write(m lp.CCMessage) error {
msg, err := s.mp.ProcessMessage(m)
if err == nil && msg != nil {
err = s.updateMetric(m)
}
return err
func (s *PrometheusSink) Write(m lp.CCMetric) error {
return s.updateMetric(m)
}
func (s *PrometheusSink) Flush() error {
@@ -175,10 +167,9 @@ func NewPrometheusSink(name string, config json.RawMessage) (Sink, error) {
s := new(PrometheusSink)
s.name = "PrometheusSink"
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
return nil, err
}
}
@@ -187,20 +178,6 @@ func NewPrometheusSink(name string, config json.RawMessage) (Sink, error) {
cclog.ComponentError(s.name, err.Error())
return nil, err
}
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
if len(s.config.MessageProcessor) > 0 {
err = p.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
}
s.labelMetrics = make(map[string]*prometheus.GaugeVec)
s.nodeMetrics = make(map[string]prometheus.Gauge)
s.promWg.Add(1)

View File

@@ -11,11 +11,7 @@ The `prometheus` sink publishes all metrics via an HTTP server ready to be scrap
"type": "prometheus",
"host": "localhost",
"port": "8080",
"path": "metrics",
"process_messages" : {
"see" : "docs of message processor for valid fields"
},
"meta_as_tags" : []
"path": "metrics"
}
}
```
@@ -25,5 +21,3 @@ The `prometheus` sink publishes all metrics via an HTTP server ready to be scrap
- `port`: Portnumber (as string) for the HTTP server
- `path`: Path where the metrics should be servered. The metrics will be published at `host`:`port`/`path`
- `group_as_namespace`: Most metrics contain a group as meta information like 'memory', 'load'. With this the metric names are extended to `group`_`name` if possible.
- `process_messages`: Process messages with given rules before progressing or dropping, see [here](../pkg/messageProcessor/README.md) (optional)
- `meta_as_tags`: print all meta information as tags in the output (deprecated, optional)

View File

@@ -1,14 +1,12 @@
package sinks
import (
"bytes"
"encoding/json"
"fmt"
"log"
lp "github.com/ClusterCockpit/cc-energy-manager/pkg/cc-message"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
mp "github.com/ClusterCockpit/cc-metric-collector/pkg/messageProcessor"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
type SampleSinkConfig struct {
@@ -29,14 +27,9 @@ type SampleSink struct {
// See: metricSink.go
// Code to submit a single CCMetric to the sink
func (s *SampleSink) Write(point lp.CCMessage) error {
func (s *SampleSink) Write(point lp.CCMetric) error {
// based on s.meta_as_tags use meta infos as tags
// moreover, submit the point to the message processor
// to apply drop/modify rules
msg, err := s.mp.ProcessMessage(point)
if err == nil && msg != nil {
log.Print(msg)
}
log.Print(point)
return nil
}
@@ -64,32 +57,16 @@ func NewSampleSink(name string, config json.RawMessage) (Sink, error) {
// Read in the config JSON
if len(config) > 0 {
d := json.NewDecoder(bytes.NewReader(config))
d.DisallowUnknownFields()
if err := d.Decode(&s.config); err != nil {
cclog.ComponentError(s.name, "Error reading config:", err.Error())
err := json.Unmarshal(config, &s.config)
if err != nil {
return nil, err
}
}
// Initialize and configure the message processor
p, err := mp.NewMessageProcessor()
if err != nil {
return nil, fmt.Errorf("initialization of message processor failed: %v", err.Error())
}
s.mp = p
// Add message processor configuration
if len(s.config.MessageProcessor) > 0 {
err = p.FromConfigJSON(s.config.MessageProcessor)
if err != nil {
return nil, fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
}
}
// Add rules to move meta information to tag space
// Replacing the legacy 'meta_as_tags' configuration
// Create lookup map to use meta infos as tags in the output metric
s.meta_as_tags = make(map[string]bool)
for _, k := range s.config.MetaAsTags {
s.mp.AddMoveMetaToTags("true", k, k)
s.meta_as_tags[k] = true
}
// Check if all required fields in the config are set

Some files were not shown because too many files have changed in this diff Show More