mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2025-10-20 21:05:06 +02:00
Compare commits
5 Commits
cc_lib_swi
...
smartmon_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f746de084 | ||
|
|
3d5b28e5aa | ||
|
|
e2438f8cec | ||
|
|
d3e9f91ad2 | ||
|
|
e58eff2fac |
10
.github/ci-config.json
vendored
10
.github/ci-config.json
vendored
@@ -1,10 +1,8 @@
|
|||||||
{
|
{
|
||||||
"sinks-file": ".github/ci-sinks.json",
|
"sinks": ".github/ci-sinks.json",
|
||||||
"collectors-file" : ".github/ci-collectors.json",
|
"collectors" : ".github/ci-collectors.json",
|
||||||
"receivers-file" : ".github/ci-receivers.json",
|
"receivers" : ".github/ci-receivers.json",
|
||||||
"router-file" : ".github/ci-router.json",
|
"router" : ".github/ci-router.json",
|
||||||
"main" : {
|
|
||||||
"interval": "5s",
|
"interval": "5s",
|
||||||
"duration": "1s"
|
"duration": "1s"
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
411
.github/workflows/Release.yml
vendored
411
.github/workflows/Release.yml
vendored
@@ -8,17 +8,16 @@ on:
|
|||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- '**'
|
- '**'
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build on AlmaLinux 8 using go-toolset
|
# Build on AlmaLinux 8.5 using go-toolset
|
||||||
#
|
#
|
||||||
AlmaLinux8-RPM-build:
|
AlmaLinux-RPM-build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# See: https://hub.docker.com/_/almalinux
|
# See: https://hub.docker.com/_/almalinux
|
||||||
container: almalinux:8
|
container: almalinux:8.5
|
||||||
# The job outputs link to the outputs of the 'rpmrename' step
|
# The job outputs link to the outputs of the 'rpmrename' step
|
||||||
# Only job outputs can be used in child jobs
|
# Only job outputs can be used in child jobs
|
||||||
outputs:
|
outputs:
|
||||||
@@ -36,146 +35,60 @@ jobs:
|
|||||||
# fetch-depth must be 0 to use git describe
|
# fetch-depth must be 0 to use git describe
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
# See: https://github.com/marketplace/actions/checkout
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# - name: Setup Golang
|
# Use dnf to install build dependencies
|
||||||
# uses: actions/setup-go@v5
|
- name: Install build dependencies
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
run: |
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
rpm -i go*.rpm
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
- name: RPM build MetricCollector
|
||||||
id: rpmbuild
|
id: rpmbuild
|
||||||
run: |
|
run: make RPM
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
# AlmaLinux 8 is a derivate of RedHat Enterprise Linux 8 (UBI8),
|
# AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
|
||||||
# so the created RPM both contain the substring 'el8' in the RPM file names
|
# so the created RPM both contain the substring 'el8' in the RPM file names
|
||||||
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
|
# This step replaces the substring 'el8' to 'alma85'. It uses the move operation
|
||||||
# because it is unclear whether the default AlmaLinux 8 container contains the
|
# because it is unclear whether the default AlmaLinux 8.5 container contains the
|
||||||
# 'rename' command. This way we also get the new names for output.
|
# 'rename' command. This way we also get the new names for output.
|
||||||
- name: Rename RPMs (s/el8/alma8/)
|
- name: Rename RPMs (s/el8/alma85/)
|
||||||
id: rpmrename
|
id: rpmrename
|
||||||
run: |
|
run: |
|
||||||
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
|
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
|
||||||
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
|
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
|
||||||
NEW_RPM="${OLD_RPM/el8/alma8}"
|
NEW_RPM="${OLD_RPM/el8/alma85}"
|
||||||
NEW_SRPM=${OLD_SRPM/el8/alma8}
|
NEW_SRPM=${OLD_SRPM/el8/alma85}
|
||||||
mv "${OLD_RPM}" "${NEW_RPM}"
|
mv "${OLD_RPM}" "${NEW_RPM}"
|
||||||
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
||||||
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
|
echo "::set-output name=SRPM::${NEW_SRPM}"
|
||||||
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
|
echo "::set-output name=RPM::${NEW_RPM}"
|
||||||
|
|
||||||
# See: https://github.com/actions/upload-artifact
|
# See: https://github.com/actions/upload-artifact
|
||||||
- name: Save RPM as artifact
|
- name: Save RPM as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector RPM for AlmaLinux 8
|
name: cc-metric-collector RPM for AlmaLinux 8.5
|
||||||
path: ${{ steps.rpmrename.outputs.RPM }}
|
path: ${{ steps.rpmrename.outputs.RPM }}
|
||||||
overwrite: true
|
|
||||||
- name: Save SRPM as artifact
|
- name: Save SRPM as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector SRPM for AlmaLinux 8
|
name: cc-metric-collector SRPM for AlmaLinux 8.5
|
||||||
path: ${{ steps.rpmrename.outputs.SRPM }}
|
path: ${{ steps.rpmrename.outputs.SRPM }}
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on AlmaLinux 9 using go-toolset
|
|
||||||
#
|
|
||||||
AlmaLinux9-RPM-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# See: https://hub.docker.com/_/almalinux
|
|
||||||
container: almalinux:9
|
|
||||||
# The job outputs link to the outputs of the 'rpmrename' step
|
|
||||||
# Only job outputs can be used in child jobs
|
|
||||||
outputs:
|
|
||||||
rpm : ${{steps.rpmrename.outputs.RPM}}
|
|
||||||
srpm : ${{steps.rpmrename.outputs.SRPM}}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
|
||||||
dnf --assumeyes install wget openssl-devel diffutils delve which
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# - name: Setup Golang
|
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
|
||||||
id: rpmbuild
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
# AlmaLinux 9 is a derivate of RedHat Enterprise Linux 8 (UBI8),
|
|
||||||
# so the created RPM both contain the substring 'el9' in the RPM file names
|
|
||||||
# This step replaces the substring 'el8' to 'alma8'. It uses the move operation
|
|
||||||
# because it is unclear whether the default AlmaLinux 8 container contains the
|
|
||||||
# 'rename' command. This way we also get the new names for output.
|
|
||||||
- name: Rename RPMs (s/el9/alma9/)
|
|
||||||
id: rpmrename
|
|
||||||
run: |
|
|
||||||
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
|
|
||||||
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
|
|
||||||
NEW_RPM="${OLD_RPM/el9/alma9}"
|
|
||||||
NEW_SRPM=${OLD_SRPM/el9/alma9}
|
|
||||||
mv "${OLD_RPM}" "${NEW_RPM}"
|
|
||||||
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
|
||||||
echo "SRPM=${NEW_SRPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "RPM=${NEW_RPM}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# See: https://github.com/actions/upload-artifact
|
|
||||||
- name: Save RPM as artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector RPM for AlmaLinux 9
|
|
||||||
path: ${{ steps.rpmrename.outputs.RPM }}
|
|
||||||
overwrite: true
|
|
||||||
- name: Save SRPM as artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector SRPM for AlmaLinux 9
|
|
||||||
path: ${{ steps.rpmrename.outputs.SRPM }}
|
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build on UBI 8 using go-toolset
|
# Build on UBI 8 using go-toolset
|
||||||
#
|
#
|
||||||
UBI-8-RPM-build:
|
UBI-8-RPM-build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c35984d70cc534b3a3784e?container-tabs=gti
|
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
||||||
container: registry.access.redhat.com/ubi8/ubi:8.8-1032.1692772289
|
container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
|
||||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||||
outputs:
|
outputs:
|
||||||
rpm : ${{steps.rpmbuild.outputs.RPM}}
|
rpm : ${{steps.rpmbuild.outputs.RPM}}
|
||||||
@@ -190,110 +103,42 @@ jobs:
|
|||||||
# fetch-depth must be 0 to use git describe
|
# fetch-depth must be 0 to use git describe
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
# See: https://github.com/marketplace/actions/checkout
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# - name: Setup Golang
|
# Use dnf to install build dependencies
|
||||||
# uses: actions/setup-go@v5
|
- name: Install build dependencies
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
run: |
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
rpm -i go*.rpm
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
- name: RPM build MetricCollector
|
||||||
id: rpmbuild
|
id: rpmbuild
|
||||||
run: |
|
run: make RPM
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
# See: https://github.com/actions/upload-artifact
|
# See: https://github.com/actions/upload-artifact
|
||||||
- name: Save RPM as artifact
|
- name: Save RPM as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector RPM for UBI 8
|
name: cc-metric-collector RPM for UBI 8
|
||||||
path: ${{ steps.rpmbuild.outputs.RPM }}
|
path: ${{ steps.rpmbuild.outputs.RPM }}
|
||||||
overwrite: true
|
|
||||||
- name: Save SRPM as artifact
|
- name: Save SRPM as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector SRPM for UBI 8
|
name: cc-metric-collector SRPM for UBI 8
|
||||||
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build on UBI 9 using go-toolset
|
# Build on Ubuntu 20.04 using official go package
|
||||||
#
|
#
|
||||||
UBI-9-RPM-build:
|
Ubuntu-focal-build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
container: ubuntu:20.04
|
||||||
container: redhat/ubi9
|
|
||||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
|
||||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
|
||||||
outputs:
|
|
||||||
rpm : ${{steps.rpmbuild.outputs.RPM}}
|
|
||||||
srpm : ${{steps.rpmbuild.outputs.SRPM}}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
|
||||||
# - name: Setup Golang
|
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
|
||||||
id: rpmbuild
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
# See: https://github.com/actions/upload-artifact
|
|
||||||
- name: Save RPM as artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector RPM for UBI 9
|
|
||||||
path: ${{ steps.rpmbuild.outputs.RPM }}
|
|
||||||
overwrite: true
|
|
||||||
- name: Save SRPM as artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector SRPM for UBI 9
|
|
||||||
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on Ubuntu 22.04 using official go package
|
|
||||||
#
|
|
||||||
Ubuntu-jammy-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ubuntu:22.04
|
|
||||||
# The job outputs link to the outputs of the 'debrename' step
|
# The job outputs link to the outputs of the 'debrename' step
|
||||||
# Only job outputs can be used in child jobs
|
# Only job outputs can be used in child jobs
|
||||||
outputs:
|
outputs:
|
||||||
@@ -308,86 +153,35 @@ jobs:
|
|||||||
# fetch-depth must be 0 to use git describe
|
# fetch-depth must be 0 to use git describe
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
# See: https://github.com/marketplace/actions/checkout
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
# Use official golang package
|
||||||
- name: Setup Golang
|
- name: Install Golang
|
||||||
uses: actions/setup-go@v5
|
run: |
|
||||||
with:
|
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
|
||||||
go-version: 'stable'
|
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
|
||||||
|
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||||
|
go version
|
||||||
- name: DEB build MetricCollector
|
- name: DEB build MetricCollector
|
||||||
id: dpkg-build
|
id: dpkg-build
|
||||||
run: |
|
run: |
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||||
make DEB
|
make DEB
|
||||||
- name: Rename DEB (add '_ubuntu22.04')
|
- name: Rename DEB (add '_ubuntu20.04')
|
||||||
id: debrename
|
id: debrename
|
||||||
run: |
|
run: |
|
||||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
||||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
|
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb"
|
||||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
||||||
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
|
echo "::set-output name=DEB::${NEW_DEB_FILE}"
|
||||||
# See: https://github.com/actions/upload-artifact
|
# See: https://github.com/actions/upload-artifact
|
||||||
- name: Save DEB as artifact
|
- name: Save DEB as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector DEB for Ubuntu 22.04
|
name: cc-metric-collector DEB for Ubuntu 20.04
|
||||||
path: ${{ steps.debrename.outputs.DEB }}
|
path: ${{ steps.debrename.outputs.DEB }}
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on Ubuntu 24.04 using official go package
|
|
||||||
#
|
|
||||||
Ubuntu-noblenumbat-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ubuntu:24.04
|
|
||||||
# The job outputs link to the outputs of the 'debrename' step
|
|
||||||
# Only job outputs can be used in child jobs
|
|
||||||
outputs:
|
|
||||||
deb : ${{steps.debrename.outputs.DEB}}
|
|
||||||
steps:
|
|
||||||
# Use apt to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
apt update && apt --assume-yes upgrade
|
|
||||||
apt --assume-yes install build-essential sed git wget bash
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Golang
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: 'stable'
|
|
||||||
|
|
||||||
- name: DEB build MetricCollector
|
|
||||||
id: dpkg-build
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make DEB
|
|
||||||
- name: Rename DEB (add '_ubuntu24.04')
|
|
||||||
id: debrename
|
|
||||||
run: |
|
|
||||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
|
||||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu24.04.deb"
|
|
||||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
|
||||||
echo "DEB=${NEW_DEB_FILE}" >> $GITHUB_OUTPUT
|
|
||||||
# See: https://github.com/actions/upload-artifact
|
|
||||||
- name: Save DEB as artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector DEB for Ubuntu 24.04
|
|
||||||
path: ${{ steps.debrename.outputs.DEB }}
|
|
||||||
overwrite: true
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Create release with fresh RPMs
|
# Create release with fresh RPMs
|
||||||
@@ -395,55 +189,32 @@ jobs:
|
|||||||
Release:
|
Release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# We need the RPMs, so add dependency
|
# We need the RPMs, so add dependency
|
||||||
needs: [AlmaLinux8-RPM-build, AlmaLinux9-RPM-build, UBI-8-RPM-build, UBI-9-RPM-build, Ubuntu-jammy-build, Ubuntu-noblenumbat-build]
|
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# See: https://github.com/actions/download-artifact
|
# See: https://github.com/actions/download-artifact
|
||||||
- name: Download AlmaLinux 8 RPM
|
- name: Download AlmaLinux 8.5 RPM
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector RPM for AlmaLinux 8
|
name: cc-metric-collector RPM for AlmaLinux 8.5
|
||||||
- name: Download AlmaLinux 8 SRPM
|
- name: Download AlmaLinux 8.5 SRPM
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector SRPM for AlmaLinux 8
|
name: cc-metric-collector SRPM for AlmaLinux 8.5
|
||||||
|
|
||||||
- name: Download AlmaLinux 9 RPM
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector RPM for AlmaLinux 9
|
|
||||||
- name: Download AlmaLinux 9 SRPM
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector SRPM for AlmaLinux 9
|
|
||||||
|
|
||||||
- name: Download UBI 8 RPM
|
- name: Download UBI 8 RPM
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector RPM for UBI 8
|
name: cc-metric-collector RPM for UBI 8
|
||||||
- name: Download UBI 8 SRPM
|
- name: Download UBI 8 SRPM
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector SRPM for UBI 8
|
name: cc-metric-collector SRPM for UBI 8
|
||||||
|
|
||||||
- name: Download UBI 9 RPM
|
- name: Download Ubuntu 20.04 DEB
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector RPM for UBI 9
|
name: cc-metric-collector DEB for Ubuntu 20.04
|
||||||
- name: Download UBI 9 SRPM
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector SRPM for UBI 9
|
|
||||||
|
|
||||||
- name: Download Ubuntu 22.04 DEB
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector DEB for Ubuntu 22.04
|
|
||||||
|
|
||||||
- name: Download Ubuntu 24.04 DEB
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cc-metric-collector DEB for Ubuntu 24.04
|
|
||||||
|
|
||||||
# The download actions do not publish the name of the downloaded file,
|
# The download actions do not publish the name of the downloaded file,
|
||||||
# so we re-use the job outputs of the parent jobs. The files are all
|
# so we re-use the job outputs of the parent jobs. The files are all
|
||||||
@@ -454,51 +225,31 @@ jobs:
|
|||||||
- name: Set RPM variables
|
- name: Set RPM variables
|
||||||
id: files
|
id: files
|
||||||
run: |
|
run: |
|
||||||
ALMA_8_RPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.rpm}}")
|
ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
|
||||||
ALMA_8_SRPM=$(basename "${{ needs.AlmaLinux8-RPM-build.outputs.srpm}}")
|
ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
|
||||||
ALMA_9_RPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.rpm}}")
|
|
||||||
ALMA_9_SRPM=$(basename "${{ needs.AlmaLinux9-RPM-build.outputs.srpm}}")
|
|
||||||
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
|
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
|
||||||
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
|
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
|
||||||
UBI_9_RPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.rpm}}")
|
U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}")
|
||||||
UBI_9_SRPM=$(basename "${{ needs.UBI-9-RPM-build.outputs.srpm}}")
|
echo "ALMA_85_RPM::${ALMA_85_RPM}"
|
||||||
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
|
echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
|
||||||
U_2404_DEB=$(basename "${{ needs.Ubuntu-noblenumbat-build.outputs.deb}}")
|
|
||||||
echo "ALMA_8_RPM::${ALMA_8_RPM}"
|
|
||||||
echo "ALMA_8_SRPM::${ALMA_8_SRPM}"
|
|
||||||
echo "ALMA_9_RPM::${ALMA_9_RPM}"
|
|
||||||
echo "ALMA_9_SRPM::${ALMA_9_SRPM}"
|
|
||||||
echo "UBI_8_RPM::${UBI_8_RPM}"
|
echo "UBI_8_RPM::${UBI_8_RPM}"
|
||||||
echo "UBI_8_SRPM::${UBI_8_SRPM}"
|
echo "UBI_8_SRPM::${UBI_8_SRPM}"
|
||||||
echo "UBI_9_RPM::${UBI_9_RPM}"
|
echo "U_2004_DEB::${U_2004_DEB}"
|
||||||
echo "UBI_9_SRPM::${UBI_9_SRPM}"
|
echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
|
||||||
echo "U_2204_DEB::${U_2204_DEB}"
|
echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
|
||||||
echo "U_2404_DEB::${U_2404_DEB}"
|
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
|
||||||
echo "ALMA_8_RPM=${ALMA_8_RPM}" >> $GITHUB_OUTPUT
|
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
|
||||||
echo "ALMA_8_SRPM=${ALMA_8_SRPM}" >> $GITHUB_OUTPUT
|
echo "::set-output name=U_2004_DEB::${U_2004_DEB}"
|
||||||
echo "ALMA_9_RPM=${ALMA_9_RPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "ALMA_9_SRPM=${ALMA_9_SRPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "UBI_8_RPM=${UBI_8_RPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "UBI_8_SRPM=${UBI_8_SRPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "UBI_9_RPM=${UBI_9_RPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "UBI_9_SRPM=${UBI_9_SRPM}" >> $GITHUB_OUTPUT
|
|
||||||
echo "U_2204_DEB=${U_2204_DEB}" >> $GITHUB_OUTPUT
|
|
||||||
echo "U_2404_DEB=${U_2404_DEB}" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
# See: https://github.com/softprops/action-gh-release
|
# See: https://github.com/softprops/action-gh-release
|
||||||
- name: Release
|
- name: Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
with:
|
with:
|
||||||
name: cc-metric-collector-${{github.ref_name}}
|
name: cc-metric-collector-${{github.ref_name}}
|
||||||
files: |
|
files: |
|
||||||
${{ steps.files.outputs.ALMA_8_RPM }}
|
${{ steps.files.outputs.ALMA_85_RPM }}
|
||||||
${{ steps.files.outputs.ALMA_8_SRPM }}
|
${{ steps.files.outputs.ALMA_85_SRPM }}
|
||||||
${{ steps.files.outputs.ALMA_9_RPM }}
|
|
||||||
${{ steps.files.outputs.ALMA_9_SRPM }}
|
|
||||||
${{ steps.files.outputs.UBI_8_RPM }}
|
${{ steps.files.outputs.UBI_8_RPM }}
|
||||||
${{ steps.files.outputs.UBI_8_SRPM }}
|
${{ steps.files.outputs.UBI_8_SRPM }}
|
||||||
${{ steps.files.outputs.UBI_9_RPM }}
|
${{ steps.files.outputs.U_2004_DEB }}
|
||||||
${{ steps.files.outputs.UBI_9_SRPM }}
|
|
||||||
${{ steps.files.outputs.U_2204_DEB }}
|
|
||||||
${{ steps.files.outputs.U_2404_DEB }}
|
|
||||||
|
|||||||
259
.github/workflows/runonce.yml
vendored
259
.github/workflows/runonce.yml
vendored
@@ -4,32 +4,28 @@
|
|||||||
name: Run Test
|
name: Run Test
|
||||||
|
|
||||||
# Run on event push
|
# Run on event push
|
||||||
on:
|
on: push
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
#
|
#
|
||||||
# Job build-latest
|
# Job build-1-18
|
||||||
# Build on latest Ubuntu using latest golang version
|
# Build on latest Ubuntu using golang version 1.18
|
||||||
#
|
#
|
||||||
build-latest:
|
build-1-18:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
# See: https://github.com/marketplace/actions/checkout
|
||||||
# Checkout git repository and submodules
|
# Checkout git repository and submodules
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||||
- name: Setup Golang
|
- name: Setup Golang
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.18.2'
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Build MetricCollector
|
- name: Build MetricCollector
|
||||||
run: make
|
run: make
|
||||||
@@ -38,246 +34,27 @@ jobs:
|
|||||||
run: ./cc-metric-collector --once --config .github/ci-config.json
|
run: ./cc-metric-collector --once --config .github/ci-config.json
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build on AlmaLinux 8
|
# Job build-1-19
|
||||||
|
# Build on latest Ubuntu using golang version 1.19
|
||||||
#
|
#
|
||||||
AlmaLinux8-RPM-build:
|
build-1-19:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# See: https://hub.docker.com/_/almalinux
|
|
||||||
container: almalinux:8
|
|
||||||
# The job outputs link to the outputs of the 'rpmrename' step
|
|
||||||
# Only job outputs can be used in child jobs
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
|
||||||
dnf --assumeyes install wget openssl-devel diffutils delve which
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
# See: https://github.com/marketplace/actions/checkout
|
||||||
|
# Checkout git repository and submodules
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
# See: https://github.com/marketplace/actions/setup-go-environment
|
||||||
# - name: Setup Golang
|
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
- name: Setup Golang
|
||||||
run: |
|
uses: actions/setup-go@v3
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
|
||||||
id: rpmbuild
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on AlmaLinux 9
|
|
||||||
#
|
|
||||||
AlmaLinux9-RPM-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# See: https://hub.docker.com/_/almalinux
|
|
||||||
container: almalinux:9
|
|
||||||
# The job outputs link to the outputs of the 'rpmrename' step
|
|
||||||
# Only job outputs can be used in child jobs
|
|
||||||
steps:
|
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
|
||||||
dnf --assumeyes install wget openssl-devel diffutils delve which
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
go-version: '1.19'
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
- name: Build MetricCollector
|
||||||
# - name: Setup Golang
|
run: make
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
- name: Run MetricCollector once
|
||||||
id: rpmbuild
|
run: ./cc-metric-collector --once --config .github/ci-config.json
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on UBI 8 using go-toolset
|
|
||||||
#
|
|
||||||
UBI-8-RPM-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
|
||||||
container: redhat/ubi8
|
|
||||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
|
||||||
steps:
|
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
|
||||||
# - name: Setup Golang
|
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/go-toolset-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-bin-1.22.9-1.module_el8.10.0+3938+8c723e16.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/8/AppStream/x86_64/os/Packages/golang-src-1.22.9-1.module_el8.10.0+3938+8c723e16.noarch.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
|
||||||
id: rpmbuild
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on UBI 9 using go-toolset
|
|
||||||
#
|
|
||||||
UBI-9-RPM-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
|
||||||
container: redhat/ubi9
|
|
||||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
|
||||||
steps:
|
|
||||||
|
|
||||||
# Use dnf to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros gcc make python39 git wget openssl-devel diffutils delve
|
|
||||||
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
|
||||||
# - name: Setup Golang
|
|
||||||
# uses: actions/setup-go@v5
|
|
||||||
# with:
|
|
||||||
# go-version: 'stable'
|
|
||||||
- name: Setup Golang
|
|
||||||
run: |
|
|
||||||
dnf --assumeyes --disableplugin=subscription-manager install \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/go-toolset-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-bin-1.22.7-2.el9_5.x86_64.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-src-1.22.7-2.el9_5.noarch.rpm \
|
|
||||||
https://repo.almalinux.org/almalinux/9/AppStream/x86_64/os/Packages/golang-race-1.22.7-2.el9_5.x86_64.rpm
|
|
||||||
|
|
||||||
- name: RPM build MetricCollector
|
|
||||||
id: rpmbuild
|
|
||||||
run: |
|
|
||||||
git config --global --add safe.directory /__w/cc-metric-collector/cc-metric-collector
|
|
||||||
make RPM
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on Ubuntu 22.04 using official go package
|
|
||||||
#
|
|
||||||
Ubuntu-jammy-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ubuntu:22.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Use apt to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
apt update && apt --assume-yes upgrade
|
|
||||||
apt --assume-yes install build-essential sed git wget bash
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
# Use official golang package
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
|
||||||
- name: Setup Golang
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: 'stable'
|
|
||||||
|
|
||||||
- name: DEB build MetricCollector
|
|
||||||
id: dpkg-build
|
|
||||||
run: |
|
|
||||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
|
||||||
make DEB
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build on Ubuntu 24.04 using official go package
|
|
||||||
#
|
|
||||||
Ubuntu-noblenumbat-build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container: ubuntu:24.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Use apt to install development packages
|
|
||||||
- name: Install development packages
|
|
||||||
run: |
|
|
||||||
apt update && apt --assume-yes upgrade
|
|
||||||
apt --assume-yes install build-essential sed git wget bash
|
|
||||||
# Checkout git repository and submodules
|
|
||||||
# fetch-depth must be 0 to use git describe
|
|
||||||
# See: https://github.com/marketplace/actions/checkout
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
fetch-depth: 0
|
|
||||||
# Use official golang package
|
|
||||||
# See: https://github.com/marketplace/actions/setup-go-environment
|
|
||||||
- name: Setup Golang
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: 'stable'
|
|
||||||
|
|
||||||
- name: DEB build MetricCollector
|
|
||||||
id: dpkg-build
|
|
||||||
run: |
|
|
||||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
|
||||||
make DEB
|
|
||||||
|
|||||||
29
.zenodo.json
29
.zenodo.json
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"title": "cc-metric-collector",
|
|
||||||
"description": "Monitoring agent for ClusterCockpit.",
|
|
||||||
"creators": [
|
|
||||||
{
|
|
||||||
"affiliation": "Regionales Rechenzentrum Erlangen, Friedrich-Alexander-Universität Erlangen-Nürnberg",
|
|
||||||
"name": "Thomas Gruber",
|
|
||||||
"orcid": "0000-0001-5560-6964"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"affiliation": "Steinbuch Centre for Computing, Karlsruher Institut für Technologie",
|
|
||||||
"name": "Holger Obermaier",
|
|
||||||
"orcid": "0000-0002-6830-6626"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"upload_type": "software",
|
|
||||||
"license": "MIT",
|
|
||||||
"access_right": "open",
|
|
||||||
"keywords": [
|
|
||||||
"performance-monitoring",
|
|
||||||
"cluster-monitoring",
|
|
||||||
"open-source"
|
|
||||||
],
|
|
||||||
"communities": [
|
|
||||||
{
|
|
||||||
"identifier": "clustercockpit"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
29
Makefile
29
Makefile
@@ -22,7 +22,7 @@ GOBIN = $(shell which go)
|
|||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: $(APP)
|
all: $(APP)
|
||||||
|
|
||||||
$(APP): $(GOSRC) go.mod
|
$(APP): $(GOSRC)
|
||||||
make -C collectors
|
make -C collectors
|
||||||
$(GOBIN) get
|
$(GOBIN) get
|
||||||
$(GOBIN) build -o $(APP) $(GOSRC_APP)
|
$(GOBIN) build -o $(APP) $(GOSRC_APP)
|
||||||
@@ -84,7 +84,7 @@ RPM: scripts/cc-metric-collector.spec
|
|||||||
@COMMITISH="HEAD"
|
@COMMITISH="HEAD"
|
||||||
@VERS=$$(git describe --tags $${COMMITISH})
|
@VERS=$$(git describe --tags $${COMMITISH})
|
||||||
@VERS=$${VERS#v}
|
@VERS=$${VERS#v}
|
||||||
@VERS=$$(echo $${VERS} | sed -e s+'-'+'_'+g)
|
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
|
||||||
@eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}")
|
@eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}")
|
||||||
@PREFIX="$${NAME}-$${VERSION}"
|
@PREFIX="$${NAME}-$${VERSION}"
|
||||||
@FORMAT="tar.gz"
|
@FORMAT="tar.gz"
|
||||||
@@ -96,8 +96,10 @@ RPM: scripts/cc-metric-collector.spec
|
|||||||
@if [[ "$${GITHUB_ACTIONS}" == true ]]; then
|
@if [[ "$${GITHUB_ACTIONS}" == true ]]; then
|
||||||
@ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm"
|
@ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm"
|
||||||
@ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm"
|
@ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm"
|
||||||
@ echo "SRPM=$${SRPMFILE}" >> $${GITHUB_OUTPUT}
|
@ echo "RPM: $${RPMFILE}"
|
||||||
@ echo "RPM=$${RPMFILE}" >> $${GITHUB_OUTPUT}
|
@ echo "SRPM: $${SRPMFILE}"
|
||||||
|
@ echo "::set-output name=SRPM::$${SRPMFILE}"
|
||||||
|
@ echo "::set-output name=RPM::$${RPMFILE}"
|
||||||
@fi
|
@fi
|
||||||
|
|
||||||
.PHONY: DEB
|
.PHONY: DEB
|
||||||
@@ -106,24 +108,21 @@ DEB: scripts/cc-metric-collector.deb.control $(APP)
|
|||||||
@WORKSPACE=$${PWD}/.dpkgbuild
|
@WORKSPACE=$${PWD}/.dpkgbuild
|
||||||
@DEBIANDIR=$${WORKSPACE}/debian
|
@DEBIANDIR=$${WORKSPACE}/debian
|
||||||
@DEBIANBINDIR=$${WORKSPACE}/DEBIAN
|
@DEBIANBINDIR=$${WORKSPACE}/DEBIAN
|
||||||
@mkdir --parents --verbose $${WORKSPACE} $${DEBIANBINDIR}
|
@mkdir --parents --verbose $$WORKSPACE $$DEBIANBINDIR
|
||||||
#@mkdir --parents --verbose $$DEBIANDIR
|
#@mkdir --parents --verbose $$DEBIANDIR
|
||||||
@CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control"
|
@CONTROLFILE="$${BASEDIR}/scripts/cc-metric-collector.deb.control"
|
||||||
@COMMITISH="HEAD"
|
@COMMITISH="HEAD"
|
||||||
@VERS=$$(git describe --tags --abbrev=0 $${COMMITISH})
|
@VERS=$$(git describe --tags --abbrev=0 $${COMMITISH})
|
||||||
@if [ -z "$${VERS}" ]; then VERS=${GITHUB_REF_NAME}; fi
|
|
||||||
@VERS=$${VERS#v}
|
@VERS=$${VERS#v}
|
||||||
|
@VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g)
|
||||||
@ARCH=$$(uname -m)
|
@ARCH=$$(uname -m)
|
||||||
@ARCH=$$(echo $${ARCH} | sed -e s+'_'+'-'+g)
|
@ARCH=$$(echo $$ARCH | sed -e s+'_'+'-'+g)
|
||||||
@if [ "$${ARCH}" = "x86-64" ]; then ARCH=amd64; fi
|
|
||||||
@PREFIX="$${NAME}-$${VERSION}_$${ARCH}"
|
@PREFIX="$${NAME}-$${VERSION}_$${ARCH}"
|
||||||
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$${WORKSPACE}"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
|
@SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$$WORKSPACE"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//')
|
||||||
@SIZE="$$(awk -v size="$${SIZE_BYTES}" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
|
@SIZE="$$(awk -v size="$$SIZE_BYTES" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')"
|
||||||
@sed -e s+"{VERSION}"+"$${VERS}"+g -e s+"{INSTALLED_SIZE}"+"$${SIZE}"+g -e s+"{ARCH}"+"$${ARCH}"+g $${CONTROLFILE} > $${DEBIANBINDIR}/control
|
#@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANDIR}/control
|
||||||
|
@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANBINDIR}/control
|
||||||
@make PREFIX=$${WORKSPACE} install
|
@make PREFIX=$${WORKSPACE} install
|
||||||
@DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb"
|
@DEB_FILE="cc-metric-collector_$${VERS}_$${ARCH}.deb"
|
||||||
@dpkg-deb -b $${WORKSPACE} "$${DEB_FILE}"
|
@dpkg-deb -b $${WORKSPACE} "$$DEB_FILE"
|
||||||
@if [ "$${GITHUB_ACTIONS}" = "true" ]; then
|
|
||||||
@ echo "DEB=$${DEB_FILE}" >> $${GITHUB_OUTPUT}
|
|
||||||
@fi
|
|
||||||
@rm -r "$${WORKSPACE}"
|
@rm -r "$${WORKSPACE}"
|
||||||
|
|||||||
@@ -8,10 +8,6 @@ There is a single timer loop that triggers all collectors serially, collects the
|
|||||||
|
|
||||||
The receiver runs as a go routine side-by-side with the timer loop and asynchronously forwards received metrics to the sink.
|
The receiver runs as a go routine side-by-side with the timer loop and asynchronously forwards received metrics to the sink.
|
||||||
|
|
||||||
|
|
||||||
[](https://doi.org/10.5281/zenodo.7438287)
|
|
||||||
|
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
Configuration is implemented using a single json document that is distributed over network and may be persisted as file.
|
Configuration is implemented using a single json document that is distributed over network and may be persisted as file.
|
||||||
|
|||||||
@@ -7,24 +7,39 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-lib/receivers"
|
|
||||||
"github.com/ClusterCockpit/cc-lib/sinks"
|
|
||||||
"github.com/ClusterCockpit/cc-metric-collector/collectors"
|
"github.com/ClusterCockpit/cc-metric-collector/collectors"
|
||||||
|
"github.com/ClusterCockpit/cc-metric-collector/receivers"
|
||||||
|
"github.com/ClusterCockpit/cc-metric-collector/sinks"
|
||||||
|
|
||||||
// "strings"
|
// "strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ccconf "github.com/ClusterCockpit/cc-lib/ccConfig"
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
|
mr "github.com/ClusterCockpit/cc-metric-collector/internal/metricRouter"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CentralConfigFile struct {
|
type CentralConfigFile struct {
|
||||||
Interval string `json:"interval"`
|
Interval string `json:"interval"`
|
||||||
Duration string `json:"duration"`
|
Duration string `json:"duration"`
|
||||||
|
CollectorConfigFile string `json:"collectors"`
|
||||||
|
RouterConfigFile string `json:"router"`
|
||||||
|
SinkConfigFile string `json:"sinks"`
|
||||||
|
ReceiverConfigFile string `json:"receivers,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadCentralConfiguration(file string, config *CentralConfigFile) error {
|
||||||
|
configFile, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Error(err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer configFile.Close()
|
||||||
|
jsonParser := json.NewDecoder(configFile)
|
||||||
|
err = jsonParser.Decode(config)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type RuntimeConfig struct {
|
type RuntimeConfig struct {
|
||||||
@@ -39,7 +54,7 @@ type RuntimeConfig struct {
|
|||||||
ReceiveManager receivers.ReceiveManager
|
ReceiveManager receivers.ReceiveManager
|
||||||
MultiChanTicker mct.MultiChanTicker
|
MultiChanTicker mct.MultiChanTicker
|
||||||
|
|
||||||
Channels []chan lp.CCMessage
|
Channels []chan lp.CCMetric
|
||||||
Sync sync.WaitGroup
|
Sync sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,7 +87,7 @@ func ReadCli() map[string]string {
|
|||||||
cfg := flag.String("config", "./config.json", "Path to configuration file")
|
cfg := flag.String("config", "./config.json", "Path to configuration file")
|
||||||
logfile := flag.String("log", "stderr", "Path for logfile")
|
logfile := flag.String("log", "stderr", "Path for logfile")
|
||||||
once := flag.Bool("once", false, "Run all collectors only once")
|
once := flag.Bool("once", false, "Run all collectors only once")
|
||||||
loglevel := flag.String("loglevel", "info", "Set log level")
|
debug := flag.Bool("debug", false, "Activate debug output")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
m = make(map[string]string)
|
m = make(map[string]string)
|
||||||
m["configfile"] = *cfg
|
m["configfile"] = *cfg
|
||||||
@@ -82,7 +97,12 @@ func ReadCli() map[string]string {
|
|||||||
} else {
|
} else {
|
||||||
m["once"] = "false"
|
m["once"] = "false"
|
||||||
}
|
}
|
||||||
m["loglevel"] = *loglevel
|
if *debug {
|
||||||
|
m["debug"] = "true"
|
||||||
|
cclog.SetDebug()
|
||||||
|
} else {
|
||||||
|
m["debug"] = "false"
|
||||||
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,15 +167,8 @@ func mainFunc() int {
|
|||||||
CliArgs: ReadCli(),
|
CliArgs: ReadCli(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set loglevel based on command line input.
|
|
||||||
cclog.Init(rcfg.CliArgs["loglevel"], false)
|
|
||||||
|
|
||||||
// Init ccConfig with configuration file
|
|
||||||
ccconf.Init(rcfg.CliArgs["configfile"])
|
|
||||||
|
|
||||||
// Load and check configuration
|
// Load and check configuration
|
||||||
main := ccconf.GetPackageConfig("main")
|
err = LoadCentralConfiguration(rcfg.CliArgs["configfile"], &rcfg.ConfigFile)
|
||||||
err = json.Unmarshal(main, &rcfg.ConfigFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error("Error reading configuration file ", rcfg.CliArgs["configfile"], ": ", err.Error())
|
cclog.Error("Error reading configuration file ", rcfg.CliArgs["configfile"], ": ", err.Error())
|
||||||
return 1
|
return 1
|
||||||
@@ -191,74 +204,70 @@ func mainFunc() int {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
routerConf := ccconf.GetPackageConfig("router")
|
if len(rcfg.ConfigFile.RouterConfigFile) == 0 {
|
||||||
if len(routerConf) == 0 {
|
|
||||||
cclog.Error("Metric router configuration file must be set")
|
cclog.Error("Metric router configuration file must be set")
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
sinkConf := ccconf.GetPackageConfig("sinks")
|
if len(rcfg.ConfigFile.SinkConfigFile) == 0 {
|
||||||
if len(sinkConf) == 0 {
|
|
||||||
cclog.Error("Sink configuration file must be set")
|
cclog.Error("Sink configuration file must be set")
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
collectorConf := ccconf.GetPackageConfig("collectors")
|
if len(rcfg.ConfigFile.CollectorConfigFile) == 0 {
|
||||||
if len(collectorConf) == 0 {
|
|
||||||
cclog.Error("Metric collector configuration file must be set")
|
cclog.Error("Metric collector configuration file must be set")
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set log file
|
// Set log file
|
||||||
// if logfile := rcfg.CliArgs["logfile"]; logfile != "stderr" {
|
if logfile := rcfg.CliArgs["logfile"]; logfile != "stderr" {
|
||||||
// cclog.SetOutput(logfile)
|
cclog.SetOutput(logfile)
|
||||||
// }
|
}
|
||||||
|
|
||||||
// Creat new multi channel ticker
|
// Creat new multi channel ticker
|
||||||
rcfg.MultiChanTicker = mct.NewTicker(rcfg.Interval)
|
rcfg.MultiChanTicker = mct.NewTicker(rcfg.Interval)
|
||||||
|
|
||||||
// Create new metric router
|
// Create new metric router
|
||||||
rcfg.MetricRouter, err = mr.New(rcfg.MultiChanTicker, &rcfg.Sync, routerConf)
|
rcfg.MetricRouter, err = mr.New(rcfg.MultiChanTicker, &rcfg.Sync, rcfg.ConfigFile.RouterConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error(err.Error())
|
cclog.Error(err.Error())
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new sink
|
// Create new sink
|
||||||
rcfg.SinkManager, err = sinks.New(&rcfg.Sync, sinkConf)
|
rcfg.SinkManager, err = sinks.New(&rcfg.Sync, rcfg.ConfigFile.SinkConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error(err.Error())
|
cclog.Error(err.Error())
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect metric router to sink manager
|
// Connect metric router to sink manager
|
||||||
RouterToSinksChannel := make(chan lp.CCMessage, 200)
|
RouterToSinksChannel := make(chan lp.CCMetric, 200)
|
||||||
rcfg.SinkManager.AddInput(RouterToSinksChannel)
|
rcfg.SinkManager.AddInput(RouterToSinksChannel)
|
||||||
rcfg.MetricRouter.AddOutput(RouterToSinksChannel)
|
rcfg.MetricRouter.AddOutput(RouterToSinksChannel)
|
||||||
|
|
||||||
// Create new collector manager
|
// Create new collector manager
|
||||||
rcfg.CollectManager, err = collectors.New(rcfg.MultiChanTicker, rcfg.Duration, &rcfg.Sync, collectorConf)
|
rcfg.CollectManager, err = collectors.New(rcfg.MultiChanTicker, rcfg.Duration, &rcfg.Sync, rcfg.ConfigFile.CollectorConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error(err.Error())
|
cclog.Error(err.Error())
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect collector manager to metric router
|
// Connect collector manager to metric router
|
||||||
CollectToRouterChannel := make(chan lp.CCMessage, 200)
|
CollectToRouterChannel := make(chan lp.CCMetric, 200)
|
||||||
rcfg.CollectManager.AddOutput(CollectToRouterChannel)
|
rcfg.CollectManager.AddOutput(CollectToRouterChannel)
|
||||||
rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel)
|
rcfg.MetricRouter.AddCollectorInput(CollectToRouterChannel)
|
||||||
|
|
||||||
// Create new receive manager
|
// Create new receive manager
|
||||||
receiveConf := ccconf.GetPackageConfig("receivers")
|
if len(rcfg.ConfigFile.ReceiverConfigFile) > 0 {
|
||||||
if len(receiveConf) > 0 {
|
rcfg.ReceiveManager, err = receivers.New(&rcfg.Sync, rcfg.ConfigFile.ReceiverConfigFile)
|
||||||
rcfg.ReceiveManager, err = receivers.New(&rcfg.Sync, receiveConf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error(err.Error())
|
cclog.Error(err.Error())
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect receive manager to metric router
|
// Connect receive manager to metric router
|
||||||
ReceiveToRouterChannel := make(chan lp.CCMessage, 200)
|
ReceiveToRouterChannel := make(chan lp.CCMetric, 200)
|
||||||
rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel)
|
rcfg.ReceiveManager.AddOutput(ReceiveToRouterChannel)
|
||||||
rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel)
|
rcfg.MetricRouter.AddReceiverInput(ReceiveToRouterChannel)
|
||||||
use_recv = true
|
use_recv = true
|
||||||
|
|||||||
@@ -1,33 +1,31 @@
|
|||||||
# LIKWID version
|
# LIKWID version
|
||||||
LIKWID_VERSION := 5.4.1
|
LIKWID_VERSION = 5.2.1
|
||||||
LIKWID_INSTALLED_FOLDER := $(shell dirname $$(which likwid-topology 2>/dev/null) 2>/dev/null)
|
LIKWID_INSTALLED_FOLDER=$(shell dirname $(shell which likwid-topology 2>/dev/null) 2>/dev/null)
|
||||||
|
|
||||||
LIKWID_FOLDER := $(CURDIR)/likwid
|
LIKWID_FOLDER="$(shell pwd)/likwid"
|
||||||
|
|
||||||
all: likwid
|
all: $(LIKWID_FOLDER)/likwid.h
|
||||||
|
|
||||||
.ONESHELL:
|
.ONESHELL:
|
||||||
.PHONY: likwid
|
.PHONY: $(LIKWID_FOLDER)/likwid.h
|
||||||
likwid:
|
$(LIKWID_FOLDER)/likwid.h:
|
||||||
if [ -n "$(LIKWID_INSTALLED_FOLDER)" ]; then
|
if [ "$(LIKWID_INSTALLED_FOLDER)" != "" ]; then \
|
||||||
# Using likwid include files from system installation
|
BASE="$(LIKWID_INSTALLED_FOLDER)/../include"; \
|
||||||
INCLUDE_DIR="$(LIKWID_INSTALLED_FOLDER)/../include"
|
mkdir -p $(LIKWID_FOLDER); \
|
||||||
mkdir --parents --verbose "$(LIKWID_FOLDER)"
|
cp $$BASE/*.h $(LIKWID_FOLDER); \
|
||||||
cp "$${INCLUDE_DIR}"/*.h "$(LIKWID_FOLDER)"
|
else \
|
||||||
else
|
BUILD_FOLDER="$${PWD}/likwidbuild"; \
|
||||||
# Using likwid include files from downloaded tar archive
|
if [ -d $(LIKWID_FOLDER) ]; then rm -r $(LIKWID_FOLDER); fi; \
|
||||||
if [ -d "$(LIKWID_FOLDER)" ]; then
|
mkdir --parents --verbose $(LIKWID_FOLDER) $${BUILD_FOLDER}; \
|
||||||
rm --recursive "$(LIKWID_FOLDER)"
|
wget -P "$${BUILD_FOLDER}" http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz; \
|
||||||
fi
|
tar -C $${BUILD_FOLDER} -xf $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION).tar.gz; \
|
||||||
BUILD_FOLDER="$${PWD}/likwidbuild"
|
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/likwid*.h $(LIKWID_FOLDER)/; \
|
||||||
mkdir --parents --verbose "$${BUILD_FOLDER}"
|
install -Dpm 0644 $${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes/bstrlib.h $(LIKWID_FOLDER)/; \
|
||||||
wget --output-document=- http://ftp.rrze.uni-erlangen.de/mirrors/likwid/likwid-$(LIKWID_VERSION).tar.gz |
|
rm -r $${BUILD_FOLDER}; \
|
||||||
tar --directory="$${BUILD_FOLDER}" --extract --gz
|
|
||||||
install -D --verbose --preserve-timestamps --mode=0644 --target-directory="$(LIKWID_FOLDER)" "$${BUILD_FOLDER}/likwid-$(LIKWID_VERSION)/src/includes"/likwid*.h
|
|
||||||
rm --recursive "$${BUILD_FOLDER}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf likwid
|
rm -rf likwid
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
|
const DEFAULT_BEEGFS_CMD = "beegfs-ctl"
|
||||||
@@ -110,7 +110,7 @@ func (m *BeegfsMetaCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -216,7 +216,7 @@ func (m *BeegfsMetaCollector) Read(interval time.Duration, output chan lp.CCMess
|
|||||||
|
|
||||||
for key, data := range m.matches {
|
for key, data := range m.matches {
|
||||||
value, _ := strconv.ParseFloat(data, 32)
|
value, _ := strconv.ParseFloat(data, 32)
|
||||||
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Struct for the collector-specific JSON config
|
// Struct for the collector-specific JSON config
|
||||||
@@ -103,7 +103,7 @@ func (m *BeegfsStorageCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -208,7 +208,7 @@ func (m *BeegfsStorageCollector) Read(interval time.Duration, output chan lp.CCM
|
|||||||
|
|
||||||
for key, data := range m.matches {
|
for key, data := range m.matches {
|
||||||
value, _ := strconv.ParseFloat(data, 32)
|
value, _ := strconv.ParseFloat(data, 32)
|
||||||
y, err := lp.NewMessage(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err := lp.New(key, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ package collectors
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,18 +36,16 @@ var AvailableCollectors = map[string]MetricCollector{
|
|||||||
"numastats": new(NUMAStatsCollector),
|
"numastats": new(NUMAStatsCollector),
|
||||||
"beegfs_meta": new(BeegfsMetaCollector),
|
"beegfs_meta": new(BeegfsMetaCollector),
|
||||||
"beegfs_storage": new(BeegfsStorageCollector),
|
"beegfs_storage": new(BeegfsStorageCollector),
|
||||||
"rapl": new(RAPLCollector),
|
|
||||||
"rocm_smi": new(RocmSmiCollector),
|
"rocm_smi": new(RocmSmiCollector),
|
||||||
"self": new(SelfCollector),
|
"smartmon": new(SmartMonCollector),
|
||||||
"schedstat": new(SchedstatCollector),
|
"schedstat": new(SchedstatCollector),
|
||||||
"nfsiostat": new(NfsIOStatCollector),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metric collector manager data structure
|
// Metric collector manager data structure
|
||||||
type collectorManager struct {
|
type collectorManager struct {
|
||||||
collectors []MetricCollector // List of metric collectors to read in parallel
|
collectors []MetricCollector // List of metric collectors to read in parallel
|
||||||
serial []MetricCollector // List of metric collectors to read serially
|
serial []MetricCollector // List of metric collectors to read serially
|
||||||
output chan lp.CCMessage // Output channels
|
output chan lp.CCMetric // Output channels
|
||||||
done chan bool // channel to finish / stop metric collector manager
|
done chan bool // channel to finish / stop metric collector manager
|
||||||
ticker mct.MultiChanTicker // periodically ticking once each interval
|
ticker mct.MultiChanTicker // periodically ticking once each interval
|
||||||
duration time.Duration // duration (for metrics that measure over a given duration)
|
duration time.Duration // duration (for metrics that measure over a given duration)
|
||||||
@@ -58,8 +57,8 @@ type collectorManager struct {
|
|||||||
|
|
||||||
// Metric collector manager access functions
|
// Metric collector manager access functions
|
||||||
type CollectorManager interface {
|
type CollectorManager interface {
|
||||||
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfig json.RawMessage) error
|
Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error
|
||||||
AddOutput(output chan lp.CCMessage)
|
AddOutput(output chan lp.CCMetric)
|
||||||
Start()
|
Start()
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
@@ -71,7 +70,7 @@ type CollectorManager interface {
|
|||||||
// * ticker (from variable ticker)
|
// * ticker (from variable ticker)
|
||||||
// * configuration (read from config file in variable collectConfigFile)
|
// * configuration (read from config file in variable collectConfigFile)
|
||||||
// Initialization is done for all configured collectors
|
// Initialization is done for all configured collectors
|
||||||
func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfig json.RawMessage) error {
|
func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) error {
|
||||||
cm.collectors = make([]MetricCollector, 0)
|
cm.collectors = make([]MetricCollector, 0)
|
||||||
cm.serial = make([]MetricCollector, 0)
|
cm.serial = make([]MetricCollector, 0)
|
||||||
cm.output = nil
|
cm.output = nil
|
||||||
@@ -80,7 +79,15 @@ func (cm *collectorManager) Init(ticker mct.MultiChanTicker, duration time.Durat
|
|||||||
cm.ticker = ticker
|
cm.ticker = ticker
|
||||||
cm.duration = duration
|
cm.duration = duration
|
||||||
|
|
||||||
err := json.Unmarshal(collectConfig, &cm.config)
|
// Read collector config file
|
||||||
|
configFile, err := os.Open(collectConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
cclog.Error(err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer configFile.Close()
|
||||||
|
jsonParser := json.NewDecoder(configFile)
|
||||||
|
err = jsonParser.Decode(&cm.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.Error(err.Error())
|
cclog.Error(err.Error())
|
||||||
return err
|
return err
|
||||||
@@ -178,7 +185,7 @@ func (cm *collectorManager) Start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddOutput adds the output channel to the metric collector manager
|
// AddOutput adds the output channel to the metric collector manager
|
||||||
func (cm *collectorManager) AddOutput(output chan lp.CCMessage) {
|
func (cm *collectorManager) AddOutput(output chan lp.CCMetric) {
|
||||||
cm.output = output
|
cm.output = output
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,9 +198,9 @@ func (cm *collectorManager) Close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new initialized metric collector manager
|
// New creates a new initialized metric collector manager
|
||||||
func New(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfig json.RawMessage) (CollectorManager, error) {
|
func New(ticker mct.MultiChanTicker, duration time.Duration, wg *sync.WaitGroup, collectConfigFile string) (CollectorManager, error) {
|
||||||
cm := new(collectorManager)
|
cm := new(collectorManager)
|
||||||
err := cm.Init(ticker, duration, wg, collectConfig)
|
err := cm.Init(ticker, duration, wg, collectConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,22 +10,33 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//
|
||||||
// CPUFreqCollector
|
// CPUFreqCollector
|
||||||
// a metric collector to measure the current frequency of the CPUs
|
// a metric collector to measure the current frequency of the CPUs
|
||||||
// as obtained from /proc/cpuinfo
|
// as obtained from /proc/cpuinfo
|
||||||
// Only measure on the first hyperthread
|
// Only measure on the first hyperthread
|
||||||
|
//
|
||||||
type CPUFreqCpuInfoCollectorTopology struct {
|
type CPUFreqCpuInfoCollectorTopology struct {
|
||||||
|
processor string // logical processor number (continuous, starting at 0)
|
||||||
|
coreID string // socket local core ID
|
||||||
|
coreID_int int64
|
||||||
|
physicalPackageID string // socket / package ID
|
||||||
|
physicalPackageID_int int64
|
||||||
|
numPhysicalPackages string // number of sockets / packages
|
||||||
|
numPhysicalPackages_int int64
|
||||||
isHT bool
|
isHT bool
|
||||||
|
numNonHT string // number of non hyperthreading processors
|
||||||
|
numNonHT_int int64
|
||||||
tagSet map[string]string
|
tagSet map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CPUFreqCpuInfoCollector struct {
|
type CPUFreqCpuInfoCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
topology []CPUFreqCpuInfoCollectorTopology
|
topology []*CPUFreqCpuInfoCollectorTopology
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
||||||
@@ -54,9 +65,11 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
|||||||
// Collect topology information from file cpuinfo
|
// Collect topology information from file cpuinfo
|
||||||
foundFreq := false
|
foundFreq := false
|
||||||
processor := ""
|
processor := ""
|
||||||
|
var numNonHT_int int64 = 0
|
||||||
coreID := ""
|
coreID := ""
|
||||||
physicalPackageID := ""
|
physicalPackageID := ""
|
||||||
m.topology = make([]CPUFreqCpuInfoCollectorTopology, 0)
|
var maxPhysicalPackageID int64 = 0
|
||||||
|
m.topology = make([]*CPUFreqCpuInfoCollectorTopology, 0)
|
||||||
coreSeenBefore := make(map[string]bool)
|
coreSeenBefore := make(map[string]bool)
|
||||||
|
|
||||||
// Read cpuinfo file, line by line
|
// Read cpuinfo file, line by line
|
||||||
@@ -85,22 +98,41 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
|||||||
len(coreID) > 0 &&
|
len(coreID) > 0 &&
|
||||||
len(physicalPackageID) > 0 {
|
len(physicalPackageID) > 0 {
|
||||||
|
|
||||||
|
topology := new(CPUFreqCpuInfoCollectorTopology)
|
||||||
|
|
||||||
|
// Processor
|
||||||
|
topology.processor = processor
|
||||||
|
|
||||||
|
// Core ID
|
||||||
|
topology.coreID = coreID
|
||||||
|
topology.coreID_int, err = strconv.ParseInt(coreID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Physical package ID
|
||||||
|
topology.physicalPackageID = physicalPackageID
|
||||||
|
topology.physicalPackageID_int, err = strconv.ParseInt(physicalPackageID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert physicalPackageID '%s' to int64: %v", physicalPackageID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// increase maximun socket / package ID, when required
|
||||||
|
if topology.physicalPackageID_int > maxPhysicalPackageID {
|
||||||
|
maxPhysicalPackageID = topology.physicalPackageID_int
|
||||||
|
}
|
||||||
|
|
||||||
|
// is hyperthread?
|
||||||
globalID := physicalPackageID + ":" + coreID
|
globalID := physicalPackageID + ":" + coreID
|
||||||
|
topology.isHT = coreSeenBefore[globalID]
|
||||||
|
coreSeenBefore[globalID] = true
|
||||||
|
if !topology.isHT {
|
||||||
|
// increase number on non hyper thread cores
|
||||||
|
numNonHT_int++
|
||||||
|
}
|
||||||
|
|
||||||
// store collected topology information
|
// store collected topology information
|
||||||
m.topology = append(m.topology,
|
m.topology = append(m.topology, topology)
|
||||||
CPUFreqCpuInfoCollectorTopology{
|
|
||||||
isHT: coreSeenBefore[globalID],
|
|
||||||
tagSet: map[string]string{
|
|
||||||
"type": "hwthread",
|
|
||||||
"type-id": processor,
|
|
||||||
"package_id": physicalPackageID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// mark core as seen before
|
|
||||||
coreSeenBefore[globalID] = true
|
|
||||||
|
|
||||||
// reset topology information
|
// reset topology information
|
||||||
foundFreq = false
|
foundFreq = false
|
||||||
@@ -110,16 +142,26 @@ func (m *CPUFreqCpuInfoCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if at least one CPU with frequency information was detected
|
numPhysicalPackageID_int := maxPhysicalPackageID + 1
|
||||||
if len(m.topology) == 0 {
|
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
|
||||||
return fmt.Errorf("no CPU frequency info found in %s", cpuInfoFile)
|
numNonHT := fmt.Sprint(numNonHT_int)
|
||||||
|
for _, t := range m.topology {
|
||||||
|
t.numPhysicalPackages = numPhysicalPackageID
|
||||||
|
t.numPhysicalPackages_int = numPhysicalPackageID_int
|
||||||
|
t.numNonHT = numNonHT
|
||||||
|
t.numNonHT_int = numNonHT_int
|
||||||
|
t.tagSet = map[string]string{
|
||||||
|
"type": "hwthread",
|
||||||
|
"type-id": t.processor,
|
||||||
|
"package_id": t.physicalPackageID,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.init = true
|
m.init = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Check if already initialized
|
// Check if already initialized
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
@@ -154,7 +196,7 @@ func (m *CPUFreqCpuInfoCollector) Read(interval time.Duration, output chan lp.CC
|
|||||||
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
|
fmt.Sprintf("Read(): Failed to convert cpu MHz '%s' to float64: %v", lineSplit[1], err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
|
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": value}, now); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
## `cpufreq_cpuinfo` collector
|
|
||||||
|
|
||||||
|
## `cpufreq_cpuinfo` collector
|
||||||
```json
|
```json
|
||||||
"cpufreq_cpuinfo": {}
|
"cpufreq_cpuinfo": {}
|
||||||
```
|
```
|
||||||
@@ -7,5 +7,4 @@
|
|||||||
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **hwthread** metrics.
|
The `cpufreq_cpuinfo` collector reads the clock frequency from `/proc/cpuinfo` and outputs a handful **hwthread** metrics.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
|
|
||||||
* `cpufreq`
|
* `cpufreq`
|
||||||
|
|||||||
@@ -9,23 +9,34 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
"github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CPUFreqCollectorTopology struct {
|
type CPUFreqCollectorTopology struct {
|
||||||
|
processor string // logical processor number (continuous, starting at 0)
|
||||||
|
coreID string // socket local core ID
|
||||||
|
coreID_int int64
|
||||||
|
physicalPackageID string // socket / package ID
|
||||||
|
physicalPackageID_int int64
|
||||||
|
numPhysicalPackages string // number of sockets / packages
|
||||||
|
numPhysicalPackages_int int64
|
||||||
|
isHT bool
|
||||||
|
numNonHT string // number of non hyperthreading processors
|
||||||
|
numNonHT_int int64
|
||||||
scalingCurFreqFile string
|
scalingCurFreqFile string
|
||||||
tagSet map[string]string
|
tagSet map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
// CPUFreqCollector
|
// CPUFreqCollector
|
||||||
// a metric collector to measure the current frequency of the CPUs
|
// a metric collector to measure the current frequency of the CPUs
|
||||||
// as obtained from the hardware (in KHz)
|
// as obtained from the hardware (in KHz)
|
||||||
// Only measure on the first hyper-thread
|
// Only measure on the first hyper thread
|
||||||
//
|
//
|
||||||
// See: https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html
|
// See: https://www.kernel.org/doc/html/latest/admin-guide/pm/cpufreq.html
|
||||||
|
//
|
||||||
type CPUFreqCollector struct {
|
type CPUFreqCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
topology []CPUFreqCollectorTopology
|
topology []CPUFreqCollectorTopology
|
||||||
@@ -55,43 +66,113 @@ func (m *CPUFreqCollector) Init(config json.RawMessage) error {
|
|||||||
"unit": "Hz",
|
"unit": "Hz",
|
||||||
}
|
}
|
||||||
|
|
||||||
m.topology = make([]CPUFreqCollectorTopology, 0)
|
// Loop for all CPU directories
|
||||||
for _, c := range ccTopology.CpuData() {
|
baseDir := "/sys/devices/system/cpu"
|
||||||
|
globPattern := filepath.Join(baseDir, "cpu[0-9]*")
|
||||||
|
cpuDirs, err := filepath.Glob(globPattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to glob files with pattern '%s': %v", globPattern, err)
|
||||||
|
}
|
||||||
|
if cpuDirs == nil {
|
||||||
|
return fmt.Errorf("unable to find any files with pattern '%s'", globPattern)
|
||||||
|
}
|
||||||
|
|
||||||
// Skip hyper threading CPUs
|
// Initialize CPU topology
|
||||||
if c.CpuID != c.CoreCPUsList[0] {
|
m.topology = make([]CPUFreqCollectorTopology, len(cpuDirs))
|
||||||
continue
|
for _, cpuDir := range cpuDirs {
|
||||||
|
processor := strings.TrimPrefix(cpuDir, "/sys/devices/system/cpu/cpu")
|
||||||
|
processor_int, err := strconv.ParseInt(processor, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert cpuID '%s' to int64: %v", processor, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read package ID
|
||||||
|
physicalPackageIDFile := filepath.Join(cpuDir, "topology", "physical_package_id")
|
||||||
|
line, err := os.ReadFile(physicalPackageIDFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read physical package ID from file '%s': %v", physicalPackageIDFile, err)
|
||||||
|
}
|
||||||
|
physicalPackageID := strings.TrimSpace(string(line))
|
||||||
|
physicalPackageID_int, err := strconv.ParseInt(physicalPackageID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert packageID '%s' to int64: %v", physicalPackageID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read core ID
|
||||||
|
coreIDFile := filepath.Join(cpuDir, "topology", "core_id")
|
||||||
|
line, err = os.ReadFile(coreIDFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read core ID from file '%s': %v", coreIDFile, err)
|
||||||
|
}
|
||||||
|
coreID := strings.TrimSpace(string(line))
|
||||||
|
coreID_int, err := strconv.ParseInt(coreID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert coreID '%s' to int64: %v", coreID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check access to current frequency file
|
// Check access to current frequency file
|
||||||
scalingCurFreqFile := filepath.Join("/sys/devices/system/cpu", fmt.Sprintf("cpu%d", c.CpuID), "cpufreq/scaling_cur_freq")
|
scalingCurFreqFile := filepath.Join(cpuDir, "cpufreq", "scaling_cur_freq")
|
||||||
err := unix.Access(scalingCurFreqFile, unix.R_OK)
|
err = unix.Access(scalingCurFreqFile, unix.R_OK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err)
|
return fmt.Errorf("unable to access file '%s': %v", scalingCurFreqFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.topology = append(m.topology,
|
t := &m.topology[processor_int]
|
||||||
CPUFreqCollectorTopology{
|
t.processor = processor
|
||||||
tagSet: map[string]string{
|
t.physicalPackageID = physicalPackageID
|
||||||
"type": "hwthread",
|
t.physicalPackageID_int = physicalPackageID_int
|
||||||
"type-id": fmt.Sprint(c.CpuID),
|
t.coreID = coreID
|
||||||
"package_id": fmt.Sprint(c.Socket),
|
t.coreID_int = coreID_int
|
||||||
},
|
t.scalingCurFreqFile = scalingCurFreqFile
|
||||||
scalingCurFreqFile: scalingCurFreqFile,
|
}
|
||||||
},
|
|
||||||
)
|
// is processor a hyperthread?
|
||||||
|
coreSeenBefore := make(map[string]bool)
|
||||||
|
for i := range m.topology {
|
||||||
|
t := &m.topology[i]
|
||||||
|
|
||||||
|
globalID := t.physicalPackageID + ":" + t.coreID
|
||||||
|
t.isHT = coreSeenBefore[globalID]
|
||||||
|
coreSeenBefore[globalID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// number of non hyper thread cores and packages / sockets
|
||||||
|
var numNonHT_int int64 = 0
|
||||||
|
var maxPhysicalPackageID int64 = 0
|
||||||
|
for i := range m.topology {
|
||||||
|
t := &m.topology[i]
|
||||||
|
|
||||||
|
// Update maxPackageID
|
||||||
|
if t.physicalPackageID_int > maxPhysicalPackageID {
|
||||||
|
maxPhysicalPackageID = t.physicalPackageID_int
|
||||||
|
}
|
||||||
|
|
||||||
|
if !t.isHT {
|
||||||
|
numNonHT_int++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numPhysicalPackageID_int := maxPhysicalPackageID + 1
|
||||||
|
numPhysicalPackageID := fmt.Sprint(numPhysicalPackageID_int)
|
||||||
|
numNonHT := fmt.Sprint(numNonHT_int)
|
||||||
|
for i := range m.topology {
|
||||||
|
t := &m.topology[i]
|
||||||
|
t.numPhysicalPackages = numPhysicalPackageID
|
||||||
|
t.numPhysicalPackages_int = numPhysicalPackageID_int
|
||||||
|
t.numNonHT = numNonHT
|
||||||
|
t.numNonHT_int = numNonHT_int
|
||||||
|
t.tagSet = map[string]string{
|
||||||
|
"type": "hwthread",
|
||||||
|
"type-id": t.processor,
|
||||||
|
"package_id": t.physicalPackageID,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialized
|
|
||||||
cclog.ComponentDebug(
|
|
||||||
m.name,
|
|
||||||
"initialized",
|
|
||||||
len(m.topology), "non-hyper-threading CPUs")
|
|
||||||
m.init = true
|
m.init = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Check if already initialized
|
// Check if already initialized
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
@@ -101,6 +182,11 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
for i := range m.topology {
|
for i := range m.topology {
|
||||||
t := &m.topology[i]
|
t := &m.topology[i]
|
||||||
|
|
||||||
|
// skip hyperthreads
|
||||||
|
if t.isHT {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Read current frequency
|
// Read current frequency
|
||||||
line, err := os.ReadFile(t.scalingCurFreqFile)
|
line, err := os.ReadFile(t.scalingCurFreqFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -117,7 +203,7 @@ func (m *CPUFreqCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if y, err := lp.NewMessage("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
|
if y, err := lp.New("cpufreq", t.tagSet, m.meta, map[string]interface{}{"value": cpuFreq}, now); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
## `cpufreq_cpuinfo` collector
|
## `cpufreq_cpuinfo` collector
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"cpufreq": {
|
"cpufreq": {
|
||||||
"exclude_metrics": []
|
"exclude_metrics": []
|
||||||
@@ -9,5 +8,4 @@
|
|||||||
The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **hwthread** metrics.
|
The `cpufreq` collector reads the clock frequency from `/sys/devices/system/cpu/cpu*/cpufreq` and outputs a handful **hwthread** metrics.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
|
|
||||||
* `cpufreq`
|
* `cpufreq`
|
||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
sysconf "github.com/tklauser/go-sysconf"
|
sysconf "github.com/tklauser/go-sysconf"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
|
|||||||
m.name = "CpustatCollector"
|
m.name = "CpustatCollector"
|
||||||
m.setup()
|
m.setup()
|
||||||
m.parallel = true
|
m.parallel = true
|
||||||
m.meta = map[string]string{"source": m.name, "group": "CPU"}
|
m.meta = map[string]string{"source": m.name, "group": "CPU", "unit": "Percent"}
|
||||||
m.nodetags = map[string]string{"type": "node"}
|
m.nodetags = map[string]string{"type": "node"}
|
||||||
if len(config) > 0 {
|
if len(config) > 0 {
|
||||||
err := json.Unmarshal(config, &m.config)
|
err := json.Unmarshal(config, &m.config)
|
||||||
@@ -105,7 +105,7 @@ func (m *CpustatCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
|
func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
|
||||||
values := make(map[string]float64)
|
values := make(map[string]float64)
|
||||||
clktck, _ := sysconf.Sysconf(sysconf.SC_CLK_TCK)
|
clktck, _ := sysconf.Sysconf(sysconf.SC_CLK_TCK)
|
||||||
for match, index := range m.matches {
|
for match, index := range m.matches {
|
||||||
@@ -119,26 +119,15 @@ func (m *CpustatCollector) parseStatLine(linefields []string, tags map[string]st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sum := float64(0)
|
|
||||||
for name, value := range values {
|
for name, value := range values {
|
||||||
sum += value
|
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
|
||||||
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value * 100}, now)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("unit", "Percent")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if v, ok := values["cpu_idle"]; ok {
|
|
||||||
sum -= v
|
|
||||||
y, err := lp.NewMessage("cpu_used", tags, m.meta, map[string]interface{}{"value": sum * 100}, now)
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("unit", "Percent")
|
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -164,7 +153,7 @@ func (m *CpustatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
num_cpus_metric, err := lp.NewMessage("num_cpus",
|
num_cpus_metric, err := lp.New("num_cpus",
|
||||||
m.nodetags,
|
m.nodetags,
|
||||||
m.meta,
|
m.meta,
|
||||||
map[string]interface{}{"value": int(num_cpus)},
|
map[string]interface{}{"value": int(num_cpus)},
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
|
|
||||||
## `cpustat` collector
|
## `cpustat` collector
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"cpustat": {
|
"cpustat": {
|
||||||
"exclude_metrics": [
|
"exclude_metrics": [
|
||||||
@@ -9,19 +8,16 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `cpustat` collector reads data from `/proc/stat` and outputs a handful **node** and **hwthread** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
The `cpustat` collector reads data from `/proc/stats` and outputs a handful **node** and **hwthread** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
|
* `cpu_user`
|
||||||
* `cpu_user` with `unit=Percent`
|
* `cpu_nice`
|
||||||
* `cpu_nice` with `unit=Percent`
|
* `cpu_system`
|
||||||
* `cpu_system` with `unit=Percent`
|
* `cpu_idle`
|
||||||
* `cpu_idle` with `unit=Percent`
|
* `cpu_iowait`
|
||||||
* `cpu_iowait` with `unit=Percent`
|
* `cpu_irq`
|
||||||
* `cpu_irq` with `unit=Percent`
|
* `cpu_softirq`
|
||||||
* `cpu_softirq` with `unit=Percent`
|
* `cpu_steal`
|
||||||
* `cpu_steal` with `unit=Percent`
|
* `cpu_guest`
|
||||||
* `cpu_guest` with `unit=Percent`
|
* `cpu_guest_nice`
|
||||||
* `cpu_guest_nice` with `unit=Percent`
|
|
||||||
* `cpu_used` = `cpu_* - cpu_idle` with `unit=Percent`
|
|
||||||
* `num_cpus`
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
influx "github.com/influxdata/line-protocol"
|
influx "github.com/influxdata/line-protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,7 +48,7 @@ func (m *CustomCmdCollector) Init(config json.RawMessage) error {
|
|||||||
command := exec.Command(cmdfields[0], strings.Join(cmdfields[1:], " "))
|
command := exec.Command(cmdfields[0], strings.Join(cmdfields[1:], " "))
|
||||||
command.Wait()
|
command.Wait()
|
||||||
_, err = command.Output()
|
_, err = command.Output()
|
||||||
if err == nil {
|
if err != nil {
|
||||||
m.commands = append(m.commands, c)
|
m.commands = append(m.commands, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -75,7 +75,7 @@ var DefaultTime = func() time.Time {
|
|||||||
return time.Unix(42, 0)
|
return time.Unix(42, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,10 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
output <- lp.FromInfluxMetric(c)
|
y := lp.FromInfluxMetric(c)
|
||||||
|
if err == nil {
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, file := range m.files {
|
for _, file := range m.files {
|
||||||
@@ -118,7 +121,10 @@ func (m *CustomCmdCollector) Read(interval time.Duration, output chan lp.CCMessa
|
|||||||
if skip {
|
if skip {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
output <- lp.FromInfluxMetric(f)
|
y := lp.FromInfluxMetric(f)
|
||||||
|
if err == nil {
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,21 +8,23 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// "log"
|
||||||
|
|
||||||
const MOUNTFILE = `/proc/self/mounts`
|
const MOUNTFILE = `/proc/self/mounts`
|
||||||
|
|
||||||
type DiskstatCollectorConfig struct {
|
type DiskstatCollectorConfig struct {
|
||||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||||
ExcludeMounts []string `json:"exclude_mounts,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DiskstatCollector struct {
|
type DiskstatCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
config DiskstatCollectorConfig
|
//matches map[string]int
|
||||||
allowedMetrics map[string]bool
|
config IOstatCollectorConfig
|
||||||
|
//devices map[string]IOstatCollectorEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
||||||
@@ -31,21 +33,12 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
|||||||
m.meta = map[string]string{"source": m.name, "group": "Disk"}
|
m.meta = map[string]string{"source": m.name, "group": "Disk"}
|
||||||
m.setup()
|
m.setup()
|
||||||
if len(config) > 0 {
|
if len(config) > 0 {
|
||||||
if err := json.Unmarshal(config, &m.config); err != nil {
|
err := json.Unmarshal(config, &m.config)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.allowedMetrics = map[string]bool{
|
file, err := os.Open(string(MOUNTFILE))
|
||||||
"disk_total": true,
|
|
||||||
"disk_free": true,
|
|
||||||
"part_max_used": true,
|
|
||||||
}
|
|
||||||
for _, excl := range m.config.ExcludeMetrics {
|
|
||||||
if _, ok := m.allowedMetrics[excl]; ok {
|
|
||||||
m.allowedMetrics[excl] = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file, err := os.Open(MOUNTFILE)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return err
|
return err
|
||||||
@@ -55,12 +48,12 @@ func (m *DiskstatCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(MOUNTFILE)
|
file, err := os.Open(string(MOUNTFILE))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return
|
return
|
||||||
@@ -69,7 +62,6 @@ func (m *DiskstatCollector) Read(interval time.Duration, output chan lp.CCMessag
|
|||||||
|
|
||||||
part_max_used := uint64(0)
|
part_max_used := uint64(0)
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
mountLoop:
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
@@ -85,17 +77,13 @@ mountLoop:
|
|||||||
if strings.Contains(linefields[1], "boot") {
|
if strings.Contains(linefields[1], "boot") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
path := strings.Replace(linefields[1], `\040`, " ", -1)
|
||||||
mountPath := strings.Replace(linefields[1], `\040`, " ", -1)
|
stat := syscall.Statfs_t{
|
||||||
|
Blocks: 0,
|
||||||
for _, excl := range m.config.ExcludeMounts {
|
Bsize: 0,
|
||||||
if strings.Contains(mountPath, excl) {
|
Bfree: 0,
|
||||||
continue mountLoop
|
|
||||||
}
|
}
|
||||||
}
|
err := syscall.Statfs(path, &stat)
|
||||||
|
|
||||||
stat := syscall.Statfs_t{}
|
|
||||||
err := syscall.Statfs(mountPath, &stat)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -104,21 +92,17 @@ mountLoop:
|
|||||||
}
|
}
|
||||||
tags := map[string]string{"type": "node", "device": linefields[0]}
|
tags := map[string]string{"type": "node", "device": linefields[0]}
|
||||||
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
|
total := (stat.Blocks * uint64(stat.Bsize)) / uint64(1000000000)
|
||||||
if m.allowedMetrics["disk_total"] {
|
y, err := lp.New("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
|
||||||
y, err := lp.NewMessage("disk_total", tags, m.meta, map[string]interface{}{"value": total}, time.Now())
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "GBytes")
|
y.AddMeta("unit", "GBytes")
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
|
||||||
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
|
free := (stat.Bfree * uint64(stat.Bsize)) / uint64(1000000000)
|
||||||
if m.allowedMetrics["disk_free"] {
|
y, err = lp.New("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
|
||||||
y, err := lp.NewMessage("disk_free", tags, m.meta, map[string]interface{}{"value": free}, time.Now())
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "GBytes")
|
y.AddMeta("unit", "GBytes")
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if total > 0 {
|
if total > 0 {
|
||||||
perc := (100 * (total - free)) / total
|
perc := (100 * (total - free)) / total
|
||||||
if perc > part_max_used {
|
if perc > part_max_used {
|
||||||
@@ -126,13 +110,11 @@ mountLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.allowedMetrics["part_max_used"] {
|
y, err := lp.New("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
|
||||||
y, err := lp.NewMessage("part_max_used", map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": int(part_max_used)}, time.Now())
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "percent")
|
y.AddMeta("unit", "percent")
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *DiskstatCollector) Close() {
|
func (m *DiskstatCollector) Close() {
|
||||||
|
|||||||
@@ -6,13 +6,10 @@
|
|||||||
"exclude_metrics": [
|
"exclude_metrics": [
|
||||||
"disk_total"
|
"disk_total"
|
||||||
],
|
],
|
||||||
"exclude_mounts": [
|
|
||||||
"slurm-tmpfs"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. Additionally, any mount point containing one of the strings specified in `exclude_mounts` will be skipped during metric collection.
|
The `diskstat` collector reads data from `/proc/self/mounts` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
||||||
|
|
||||||
Metrics per device (with `device` tag):
|
Metrics per device (with `device` tag):
|
||||||
* `disk_total` (unit `GBytes`)
|
* `disk_total` (unit `GBytes`)
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DEFAULT_GPFS_CMD = "mmpmon"
|
const DEFAULT_GPFS_CMD = "mmpmon"
|
||||||
@@ -31,7 +31,6 @@ type GpfsCollector struct {
|
|||||||
Mmpmon string `json:"mmpmon_path,omitempty"`
|
Mmpmon string `json:"mmpmon_path,omitempty"`
|
||||||
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
|
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
|
||||||
SendBandwidths bool `json:"send_bandwidths"`
|
SendBandwidths bool `json:"send_bandwidths"`
|
||||||
SendTotalValues bool `json:"send_total_values"`
|
|
||||||
}
|
}
|
||||||
skipFS map[string]struct{}
|
skipFS map[string]struct{}
|
||||||
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
|
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
|
||||||
@@ -94,7 +93,7 @@ func (m *GpfsCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Check if already initialized
|
// Check if already initialized
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
@@ -217,33 +216,13 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
|
fmt.Sprintf("Read(): Failed to convert bytes read '%s' to int64: %v", key_value["_br_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err :=
|
if y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
"gpfs_bytes_read",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": bytesRead,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes")
|
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
if m.config.SendBandwidths {
|
if m.config.SendBandwidths {
|
||||||
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
|
if lastBytesRead := m.lastState[filesystem].bytesRead; lastBytesRead >= 0 {
|
||||||
bwRead := float64(bytesRead-lastBytesRead) / timeDiff
|
bwRead := float64(bytesRead-lastBytesRead) / timeDiff
|
||||||
if y, err :=
|
if y, err := lp.New("gpfs_bw_read", m.tags, m.meta, map[string]interface{}{"value": bwRead}, timestamp); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
"gpfs_bw_read",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": bwRead,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes/sec")
|
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -257,33 +236,13 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
|
fmt.Sprintf("Read(): Failed to convert bytes written '%s' to int64: %v", key_value["_bw_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err :=
|
if y, err := lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
"gpfs_bytes_written",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": bytesWritten,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes")
|
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
if m.config.SendBandwidths {
|
if m.config.SendBandwidths {
|
||||||
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
|
if lastBytesWritten := m.lastState[filesystem].bytesRead; lastBytesWritten >= 0 {
|
||||||
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
|
bwWrite := float64(bytesWritten-lastBytesWritten) / timeDiff
|
||||||
if y, err :=
|
if y, err := lp.New("gpfs_bw_write", m.tags, m.meta, map[string]interface{}{"value": bwWrite}, timestamp); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
"gpfs_bw_write",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": bwWrite,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes/sec")
|
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -304,7 +263,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of opens '%s' to int64: %v", key_value["_oc_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -316,7 +275,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of closes: '%s' to int64: %v", key_value["_cc_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -328,7 +287,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of reads: '%s' to int64: %v", key_value["_rdc_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +299,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of writes: '%s' to int64: %v", key_value["_wc_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -352,7 +311,7 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of read directories: '%s' to int64: %v", key_value["_dir_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -364,50 +323,9 @@ func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
|
fmt.Sprintf("Read(): Failed to convert number of inode updates: '%s' to int: %v", key_value["_iu_"], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if y, err := lp.NewMessage("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
|
if y, err := lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|
||||||
// Total values
|
|
||||||
if m.config.SendTotalValues {
|
|
||||||
bytesTotal := bytesRead + bytesWritten
|
|
||||||
if y, err :=
|
|
||||||
lp.NewMessage("gpfs_bytes_total",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": bytesTotal,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
iops := numReads + numWrites
|
|
||||||
if y, err :=
|
|
||||||
lp.NewMessage("gpfs_iops",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": iops,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
metaops := numInodeUpdates + numCloses + numOpens + numReaddirs
|
|
||||||
if y, err :=
|
|
||||||
lp.NewMessage("gpfs_metaops",
|
|
||||||
m.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": metaops,
|
|
||||||
},
|
|
||||||
timestamp,
|
|
||||||
); err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,8 +6,7 @@
|
|||||||
"exclude_filesystem": [
|
"exclude_filesystem": [
|
||||||
"fs1"
|
"fs1"
|
||||||
],
|
],
|
||||||
"send_bandwidths": true,
|
"send_bandwidths" : true
|
||||||
"send_total_values": true
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -27,12 +26,8 @@ Metrics:
|
|||||||
* `gpfs_num_opens`
|
* `gpfs_num_opens`
|
||||||
* `gpfs_num_closes`
|
* `gpfs_num_closes`
|
||||||
* `gpfs_num_reads`
|
* `gpfs_num_reads`
|
||||||
* `gpfs_num_writes`
|
|
||||||
* `gpfs_num_readdirs`
|
* `gpfs_num_readdirs`
|
||||||
* `gpfs_num_inode_updates`
|
* `gpfs_num_inode_updates`
|
||||||
* `gpfs_bytes_total = gpfs_bytes_read + gpfs_bytes_written` (if `send_total_values == true`)
|
|
||||||
* `gpfs_iops = gpfs_num_reads + gpfs_num_writes` (if `send_total_values == true`)
|
|
||||||
* `gpfs_metaops = gpfs_num_inode_updates + gpfs_num_closes + gpfs_num_opens + gpfs_num_readdirs` (if `send_total_values == true`)
|
|
||||||
* `gpfs_bw_read` (if `send_bandwidths == true`)
|
* `gpfs_bw_read` (if `send_bandwidths == true`)
|
||||||
* `gpfs_bw_write` (if `send_bandwidths == true`)
|
* `gpfs_bw_write` (if `send_bandwidths == true`)
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -18,22 +18,18 @@ import (
|
|||||||
const IB_BASEPATH = "/sys/class/infiniband/"
|
const IB_BASEPATH = "/sys/class/infiniband/"
|
||||||
|
|
||||||
type InfinibandCollectorMetric struct {
|
type InfinibandCollectorMetric struct {
|
||||||
name string
|
|
||||||
path string
|
path string
|
||||||
unit string
|
unit string
|
||||||
scale int64
|
scale int64
|
||||||
addToIBTotal bool
|
|
||||||
addToIBTotalPkgs bool
|
|
||||||
currentState int64
|
|
||||||
lastState int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfinibandCollectorInfo struct {
|
type InfinibandCollectorInfo struct {
|
||||||
LID string // IB local Identifier (LID)
|
LID string // IB local Identifier (LID)
|
||||||
device string // IB device
|
device string // IB device
|
||||||
port string // IB device port
|
port string // IB device port
|
||||||
portCounterFiles []InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric
|
portCounterFiles map[string]InfinibandCollectorMetric // mapping counter name -> InfinibandCollectorMetric
|
||||||
tagSet map[string]string // corresponding tag list
|
tagSet map[string]string // corresponding tag list
|
||||||
|
lastState map[string]int64 // State from last measurement
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfinibandCollector struct {
|
type InfinibandCollector struct {
|
||||||
@@ -41,10 +37,9 @@ type InfinibandCollector struct {
|
|||||||
config struct {
|
config struct {
|
||||||
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
|
ExcludeDevices []string `json:"exclude_devices,omitempty"` // IB device to exclude e.g. mlx5_0
|
||||||
SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem
|
SendAbsoluteValues bool `json:"send_abs_values"` // Send absolut values as read from sys filesystem
|
||||||
SendTotalValues bool `json:"send_total_values"` // Send computed total values
|
|
||||||
SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates
|
SendDerivedValues bool `json:"send_derived_values"` // Send derived values e.g. rates
|
||||||
}
|
}
|
||||||
info []InfinibandCollectorInfo
|
info []*InfinibandCollectorInfo
|
||||||
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
|
lastTimestamp time.Time // Store time stamp of last tick to derive bandwidths
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,39 +112,11 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
|
|||||||
|
|
||||||
// Check access to counter files
|
// Check access to counter files
|
||||||
countersDir := filepath.Join(path, "counters")
|
countersDir := filepath.Join(path, "counters")
|
||||||
portCounterFiles := []InfinibandCollectorMetric{
|
portCounterFiles := map[string]InfinibandCollectorMetric{
|
||||||
{
|
"ib_recv": {path: filepath.Join(countersDir, "port_rcv_data"), unit: "bytes", scale: 4},
|
||||||
name: "ib_recv",
|
"ib_xmit": {path: filepath.Join(countersDir, "port_xmit_data"), unit: "bytes", scale: 4},
|
||||||
path: filepath.Join(countersDir, "port_rcv_data"),
|
"ib_recv_pkts": {path: filepath.Join(countersDir, "port_rcv_packets"), unit: "packets", scale: 1},
|
||||||
unit: "bytes",
|
"ib_xmit_pkts": {path: filepath.Join(countersDir, "port_xmit_packets"), unit: "packets", scale: 1},
|
||||||
scale: 4,
|
|
||||||
addToIBTotal: true,
|
|
||||||
lastState: -1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ib_xmit",
|
|
||||||
path: filepath.Join(countersDir, "port_xmit_data"),
|
|
||||||
unit: "bytes",
|
|
||||||
scale: 4,
|
|
||||||
addToIBTotal: true,
|
|
||||||
lastState: -1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ib_recv_pkts",
|
|
||||||
path: filepath.Join(countersDir, "port_rcv_packets"),
|
|
||||||
unit: "packets",
|
|
||||||
scale: 1,
|
|
||||||
addToIBTotalPkgs: true,
|
|
||||||
lastState: -1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ib_xmit_pkts",
|
|
||||||
path: filepath.Join(countersDir, "port_xmit_packets"),
|
|
||||||
unit: "packets",
|
|
||||||
scale: 1,
|
|
||||||
addToIBTotalPkgs: true,
|
|
||||||
lastState: -1,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, counter := range portCounterFiles {
|
for _, counter := range portCounterFiles {
|
||||||
err := unix.Access(counter.path, unix.R_OK)
|
err := unix.Access(counter.path, unix.R_OK)
|
||||||
@@ -158,8 +125,14 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize last state
|
||||||
|
lastState := make(map[string]int64)
|
||||||
|
for counter := range portCounterFiles {
|
||||||
|
lastState[counter] = -1
|
||||||
|
}
|
||||||
|
|
||||||
m.info = append(m.info,
|
m.info = append(m.info,
|
||||||
InfinibandCollectorInfo{
|
&InfinibandCollectorInfo{
|
||||||
LID: LID,
|
LID: LID,
|
||||||
device: device,
|
device: device,
|
||||||
port: port,
|
port: port,
|
||||||
@@ -170,6 +143,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
|
|||||||
"port": port,
|
"port": port,
|
||||||
"lid": LID,
|
"lid": LID,
|
||||||
},
|
},
|
||||||
|
lastState: lastState,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,7 +156,7 @@ func (m *InfinibandCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read reads Infiniband counter files below IB_BASEPATH
|
// Read reads Infiniband counter files below IB_BASEPATH
|
||||||
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
|
|
||||||
// Check if already initialized
|
// Check if already initialized
|
||||||
if !m.init {
|
if !m.init {
|
||||||
@@ -196,12 +170,8 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
|||||||
// Save current timestamp
|
// Save current timestamp
|
||||||
m.lastTimestamp = now
|
m.lastTimestamp = now
|
||||||
|
|
||||||
for i := range m.info {
|
for _, info := range m.info {
|
||||||
info := &m.info[i]
|
for counterName, counterDef := range info.portCounterFiles {
|
||||||
|
|
||||||
var ib_total, ib_total_pkts int64
|
|
||||||
for i := range info.portCounterFiles {
|
|
||||||
counterDef := &info.portCounterFiles[i]
|
|
||||||
|
|
||||||
// Read counter file
|
// Read counter file
|
||||||
line, err := os.ReadFile(counterDef.path)
|
line, err := os.ReadFile(counterDef.path)
|
||||||
@@ -218,26 +188,15 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(
|
cclog.ComponentError(
|
||||||
m.name,
|
m.name,
|
||||||
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterDef.name, data, err))
|
fmt.Sprintf("Read(): Failed to convert Infininiband metrice %s='%s' to int64: %v", counterName, data, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Scale raw value
|
// Scale raw value
|
||||||
v *= counterDef.scale
|
v *= counterDef.scale
|
||||||
|
|
||||||
// Save current state
|
|
||||||
counterDef.currentState = v
|
|
||||||
|
|
||||||
// Send absolut values
|
// Send absolut values
|
||||||
if m.config.SendAbsoluteValues {
|
if m.config.SendAbsoluteValues {
|
||||||
if y, err :=
|
if y, err := lp.New(counterName, info.tagSet, m.meta, map[string]interface{}{"value": v}, now); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
counterDef.name,
|
|
||||||
info.tagSet,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": counterDef.currentState,
|
|
||||||
},
|
|
||||||
now); err == nil {
|
|
||||||
y.AddMeta("unit", counterDef.unit)
|
y.AddMeta("unit", counterDef.unit)
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -245,64 +204,18 @@ func (m *InfinibandCollector) Read(interval time.Duration, output chan lp.CCMess
|
|||||||
|
|
||||||
// Send derived values
|
// Send derived values
|
||||||
if m.config.SendDerivedValues {
|
if m.config.SendDerivedValues {
|
||||||
if counterDef.lastState >= 0 {
|
if info.lastState[counterName] >= 0 {
|
||||||
rate := float64((counterDef.currentState - counterDef.lastState)) / timeDiff
|
rate := float64((v - info.lastState[counterName])) / timeDiff
|
||||||
if y, err :=
|
if y, err := lp.New(counterName+"_bw", info.tagSet, m.meta, map[string]interface{}{"value": rate}, now); err == nil {
|
||||||
lp.NewMessage(
|
|
||||||
counterDef.name+"_bw",
|
|
||||||
info.tagSet,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": rate,
|
|
||||||
},
|
|
||||||
now); err == nil {
|
|
||||||
y.AddMeta("unit", counterDef.unit+"/sec")
|
y.AddMeta("unit", counterDef.unit+"/sec")
|
||||||
output <- y
|
output <- y
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
counterDef.lastState = counterDef.currentState
|
// Save current state
|
||||||
}
|
info.lastState[counterName] = v
|
||||||
|
|
||||||
// Sum up total values
|
|
||||||
if m.config.SendTotalValues {
|
|
||||||
switch {
|
|
||||||
case counterDef.addToIBTotal:
|
|
||||||
ib_total += counterDef.currentState
|
|
||||||
case counterDef.addToIBTotalPkgs:
|
|
||||||
ib_total_pkts += counterDef.currentState
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send total values
|
|
||||||
if m.config.SendTotalValues {
|
|
||||||
if y, err :=
|
|
||||||
lp.NewMessage(
|
|
||||||
"ib_total",
|
|
||||||
info.tagSet,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": ib_total,
|
|
||||||
},
|
|
||||||
now); err == nil {
|
|
||||||
y.AddMeta("unit", "bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
|
|
||||||
if y, err :=
|
|
||||||
lp.NewMessage(
|
|
||||||
"ib_total_pkts",
|
|
||||||
info.tagSet,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": ib_total_pkts,
|
|
||||||
},
|
|
||||||
now); err == nil {
|
|
||||||
y.AddMeta("unit", "packets")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,16 +17,13 @@ LID file (`/sys/class/infiniband/<dev>/ports/<port>/lid`)
|
|||||||
|
|
||||||
The devices can be filtered with the `exclude_devices` option in the configuration.
|
The devices can be filtered with the `exclude_devices` option in the configuration.
|
||||||
|
|
||||||
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`. (See: <https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-class-infiniband>)
|
For each found LID the collector reads data through the sysfs files below `/sys/class/infiniband/<device>`.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
|
|
||||||
* `ib_recv`
|
* `ib_recv`
|
||||||
* `ib_xmit`
|
* `ib_xmit`
|
||||||
* `ib_recv_pkts`
|
* `ib_recv_pkts`
|
||||||
* `ib_xmit_pkts`
|
* `ib_xmit_pkts`
|
||||||
* `ib_total = ib_recv + ib_xmit` (if `send_total_values == true`)
|
|
||||||
* `ib_total_pkts = ib_recv_pkts + ib_xmit_pkts` (if `send_total_values == true`)
|
|
||||||
* `ib_recv_bw` (if `send_derived_values == true`)
|
* `ib_recv_bw` (if `send_derived_values == true`)
|
||||||
* `ib_xmit_bw` (if `send_derived_values == true`)
|
* `ib_xmit_bw` (if `send_derived_values == true`)
|
||||||
* `ib_recv_pkts_bw` (if `send_derived_values == true`)
|
* `ib_recv_pkts_bw` (if `send_derived_values == true`)
|
||||||
|
|||||||
@@ -2,24 +2,24 @@ package collectors
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
|
||||||
|
// "log"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Konstante für den Pfad zu /proc/diskstats
|
|
||||||
const IOSTATFILE = `/proc/diskstats`
|
const IOSTATFILE = `/proc/diskstats`
|
||||||
|
const IOSTAT_SYSFSPATH = `/sys/block`
|
||||||
|
|
||||||
type IOstatCollectorConfig struct {
|
type IOstatCollectorConfig struct {
|
||||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||||
// Neues Feld zum Ausschließen von Devices per JSON-Konfiguration
|
|
||||||
ExcludeDevices []string `json:"exclude_devices,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type IOstatCollectorEntry struct {
|
type IOstatCollectorEntry struct {
|
||||||
@@ -76,7 +76,7 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
|||||||
if len(m.matches) == 0 {
|
if len(m.matches) == 0 {
|
||||||
return errors.New("no metrics to collect")
|
return errors.New("no metrics to collect")
|
||||||
}
|
}
|
||||||
file, err := os.Open(IOSTATFILE)
|
file, err := os.Open(string(IOSTATFILE))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return err
|
return err
|
||||||
@@ -87,24 +87,17 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
|||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
linefields := strings.Fields(line)
|
linefields := strings.Fields(line)
|
||||||
if len(linefields) < 3 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
device := linefields[2]
|
device := linefields[2]
|
||||||
|
|
||||||
if strings.Contains(device, "loop") {
|
if strings.Contains(device, "loop") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, skip := stringArrayContains(m.config.ExcludeDevices, device); skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
values := make(map[string]int64)
|
values := make(map[string]int64)
|
||||||
for m := range m.matches {
|
for m := range m.matches {
|
||||||
values[m] = 0
|
values[m] = 0
|
||||||
}
|
}
|
||||||
m.devices[device] = IOstatCollectorEntry{
|
m.devices[device] = IOstatCollectorEntry{
|
||||||
tags: map[string]string{
|
tags: map[string]string{
|
||||||
"device": device,
|
"device": linefields[2],
|
||||||
"type": "node",
|
"type": "node",
|
||||||
},
|
},
|
||||||
lastValues: values,
|
lastValues: values,
|
||||||
@@ -114,12 +107,12 @@ func (m *IOstatCollector) Init(config json.RawMessage) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(IOSTATFILE)
|
file, err := os.Open(string(IOSTATFILE))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return
|
return
|
||||||
@@ -133,16 +126,10 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
linefields := strings.Fields(line)
|
linefields := strings.Fields(line)
|
||||||
if len(linefields) < 3 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
device := linefields[2]
|
device := linefields[2]
|
||||||
if strings.Contains(device, "loop") {
|
if strings.Contains(device, "loop") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, skip := stringArrayContains(m.config.ExcludeDevices, device); skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := m.devices[device]; !ok {
|
if _, ok := m.devices[device]; !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -152,7 +139,7 @@ func (m *IOstatCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
|||||||
x, err := strconv.ParseInt(linefields[idx], 0, 64)
|
x, err := strconv.ParseInt(linefields[idx], 0, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
diff := x - entry.lastValues[name]
|
diff := x - entry.lastValues[name]
|
||||||
y, err := lp.NewMessage(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
|
y, err := lp.New(name, entry.tags, m.meta, map[string]interface{}{"value": int(diff)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,17 +4,12 @@
|
|||||||
```json
|
```json
|
||||||
"iostat": {
|
"iostat": {
|
||||||
"exclude_metrics": [
|
"exclude_metrics": [
|
||||||
"io_read_ms"
|
"read_ms"
|
||||||
],
|
],
|
||||||
"exclude_devices": [
|
|
||||||
"nvme0n1p1",
|
|
||||||
"nvme0n1p2",
|
|
||||||
"md127"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric or device is not required, it can be excluded from forwarding it to the sink.
|
The `iostat` collector reads data from `/proc/diskstats` and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
* `io_reads`
|
* `io_reads`
|
||||||
|
|||||||
@@ -1,116 +1,86 @@
|
|||||||
package collectors
|
package collectors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const IPMITOOL_PATH = `ipmitool`
|
||||||
const IPMISENSORS_PATH = `ipmi-sensors`
|
const IPMISENSORS_PATH = `ipmi-sensors`
|
||||||
|
|
||||||
type IpmiCollector struct {
|
type IpmiCollectorConfig struct {
|
||||||
metricCollector
|
|
||||||
config struct {
|
|
||||||
ExcludeDevices []string `json:"exclude_devices"`
|
ExcludeDevices []string `json:"exclude_devices"`
|
||||||
IpmitoolPath string `json:"ipmitool_path"`
|
IpmitoolPath string `json:"ipmitool_path"`
|
||||||
IpmisensorsPath string `json:"ipmisensors_path"`
|
IpmisensorsPath string `json:"ipmisensors_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type IpmiCollector struct {
|
||||||
|
metricCollector
|
||||||
|
//tags map[string]string
|
||||||
|
//matches map[string]string
|
||||||
|
config IpmiCollectorConfig
|
||||||
ipmitool string
|
ipmitool string
|
||||||
ipmisensors string
|
ipmisensors string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IpmiCollector) Init(config json.RawMessage) error {
|
func (m *IpmiCollector) Init(config json.RawMessage) error {
|
||||||
// Check if already initialized
|
|
||||||
if m.init {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m.name = "IpmiCollector"
|
m.name = "IpmiCollector"
|
||||||
m.setup()
|
m.setup()
|
||||||
m.parallel = true
|
m.parallel = true
|
||||||
m.meta = map[string]string{
|
m.meta = map[string]string{"source": m.name, "group": "IPMI"}
|
||||||
"source": m.name,
|
m.config.IpmitoolPath = string(IPMITOOL_PATH)
|
||||||
"group": "IPMI",
|
m.config.IpmisensorsPath = string(IPMISENSORS_PATH)
|
||||||
}
|
m.ipmitool = ""
|
||||||
// default path to IPMI tools
|
m.ipmisensors = ""
|
||||||
m.config.IpmitoolPath = "ipmitool"
|
|
||||||
m.config.IpmisensorsPath = "ipmi-sensors"
|
|
||||||
if len(config) > 0 {
|
if len(config) > 0 {
|
||||||
err := json.Unmarshal(config, &m.config)
|
err := json.Unmarshal(config, &m.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check if executables ipmitool or ipmisensors are found
|
|
||||||
p, err := exec.LookPath(m.config.IpmitoolPath)
|
p, err := exec.LookPath(m.config.IpmitoolPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
command := exec.Command(p)
|
|
||||||
err := command.Run()
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
|
|
||||||
m.ipmitool = ""
|
|
||||||
} else {
|
|
||||||
m.ipmitool = p
|
m.ipmitool = p
|
||||||
}
|
}
|
||||||
}
|
|
||||||
p, err = exec.LookPath(m.config.IpmisensorsPath)
|
p, err = exec.LookPath(m.config.IpmisensorsPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
command := exec.Command(p)
|
|
||||||
err := command.Run()
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, fmt.Sprintf("Failed to execute %s: %v", p, err.Error()))
|
|
||||||
m.ipmisensors = ""
|
|
||||||
} else {
|
|
||||||
m.ipmisensors = p
|
m.ipmisensors = p
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
|
if len(m.ipmitool) == 0 && len(m.ipmisensors) == 0 {
|
||||||
return errors.New("no usable IPMI reader found")
|
return errors.New("no IPMI reader found")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.init = true
|
m.init = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
|
func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMetric) {
|
||||||
|
|
||||||
// Setup ipmitool command
|
|
||||||
command := exec.Command(cmd, "sensor")
|
command := exec.Command(cmd, "sensor")
|
||||||
stdout, _ := command.StdoutPipe()
|
command.Wait()
|
||||||
errBuf := new(bytes.Buffer)
|
stdout, err := command.Output()
|
||||||
command.Stderr = errBuf
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
// start command
|
|
||||||
if err := command.Start(); err != nil {
|
|
||||||
cclog.ComponentError(
|
|
||||||
m.name,
|
|
||||||
fmt.Sprintf("readIpmiTool(): Failed to start command \"%s\": %v", command.String(), err),
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read command output
|
ll := strings.Split(string(stdout), "\n")
|
||||||
scanner := bufio.NewScanner(stdout)
|
|
||||||
for scanner.Scan() {
|
for _, line := range ll {
|
||||||
lv := strings.Split(scanner.Text(), "|")
|
lv := strings.Split(line, "|")
|
||||||
if len(lv) < 3 {
|
if len(lv) < 3 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
v, err := strconv.ParseFloat(strings.TrimSpace(lv[1]), 64)
|
v, err := strconv.ParseFloat(strings.Trim(lv[1], " "), 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
name := strings.ToLower(strings.Replace(strings.TrimSpace(lv[0]), " ", "_", -1))
|
name := strings.ToLower(strings.Replace(strings.Trim(lv[0], " "), " ", "_", -1))
|
||||||
unit := strings.TrimSpace(lv[2])
|
unit := strings.Trim(lv[2], " ")
|
||||||
if unit == "Volts" {
|
if unit == "Volts" {
|
||||||
unit = "Volts"
|
unit = "Volts"
|
||||||
} else if unit == "degrees C" {
|
} else if unit == "degrees C" {
|
||||||
@@ -121,27 +91,16 @@ func (m *IpmiCollector) readIpmiTool(cmd string, output chan lp.CCMessage) {
|
|||||||
unit = "Watts"
|
unit = "Watts"
|
||||||
}
|
}
|
||||||
|
|
||||||
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", unit)
|
y.AddMeta("unit", unit)
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for command end
|
|
||||||
if err := command.Wait(); err != nil {
|
|
||||||
errMsg, _ := io.ReadAll(errBuf)
|
|
||||||
cclog.ComponentError(
|
|
||||||
m.name,
|
|
||||||
fmt.Sprintf("readIpmiTool(): Failed to wait for the end of command \"%s\": %v\n", command.String(), err),
|
|
||||||
)
|
|
||||||
cclog.ComponentError(m.name, fmt.Sprintf("readIpmiTool(): command stderr: \"%s\"\n", strings.TrimSpace(string(errMsg))))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMetric) {
|
||||||
|
|
||||||
command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate")
|
command := exec.Command(cmd, "--comma-separated-output", "--sdr-cache-recreate")
|
||||||
command.Wait()
|
command.Wait()
|
||||||
@@ -159,7 +118,7 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
|||||||
v, err := strconv.ParseFloat(lv[3], 64)
|
v, err := strconv.ParseFloat(lv[3], 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1))
|
name := strings.ToLower(strings.Replace(lv[1], " ", "_", -1))
|
||||||
y, err := lp.NewMessage(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
y, err := lp.New(name, map[string]string{"type": "node"}, m.meta, map[string]interface{}{"value": v}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(lv) > 4 {
|
if len(lv) > 4 {
|
||||||
y.AddMeta("unit", lv[4])
|
y.AddMeta("unit", lv[4])
|
||||||
@@ -171,18 +130,18 @@ func (m *IpmiCollector) readIpmiSensors(cmd string, output chan lp.CCMessage) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *IpmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
|
|
||||||
// Check if already initialized
|
|
||||||
if !m.init {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(m.config.IpmitoolPath) > 0 {
|
if len(m.config.IpmitoolPath) > 0 {
|
||||||
|
_, err := os.Stat(m.config.IpmitoolPath)
|
||||||
|
if err == nil {
|
||||||
m.readIpmiTool(m.config.IpmitoolPath, output)
|
m.readIpmiTool(m.config.IpmitoolPath, output)
|
||||||
|
}
|
||||||
} else if len(m.config.IpmisensorsPath) > 0 {
|
} else if len(m.config.IpmisensorsPath) > 0 {
|
||||||
|
_, err := os.Stat(m.config.IpmisensorsPath)
|
||||||
|
if err == nil {
|
||||||
m.readIpmiSensors(m.config.IpmisensorsPath, output)
|
m.readIpmiSensors(m.config.IpmisensorsPath, output)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *IpmiCollector) Close() {
|
func (m *IpmiCollector) Close() {
|
||||||
|
|||||||
@@ -11,3 +11,6 @@
|
|||||||
The `ipmistat` collector reads data from `ipmitool` (`ipmitool sensor`) or `ipmi-sensors` (`ipmi-sensors --sdr-cache-recreate --comma-separated-output`).
|
The `ipmistat` collector reads data from `ipmitool` (`ipmitool sensor`) or `ipmi-sensors` (`ipmi-sensors --sdr-cache-recreate --comma-separated-output`).
|
||||||
|
|
||||||
The metrics depend on the output of the underlying tools but contain temperature, power and energy metrics.
|
The metrics depend on the output of the underlying tools but contain temperature, power and energy metrics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"os/user"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -24,30 +23,24 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||||
"github.com/NVIDIA/go-nvml/pkg/dl"
|
"github.com/NVIDIA/go-nvml/pkg/dl"
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
"golang.design/x/thread"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LIKWID_LIB_NAME = "liblikwid.so"
|
LIKWID_LIB_NAME = "liblikwid.so"
|
||||||
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
|
LIKWID_LIB_DL_FLAGS = dl.RTLD_LAZY | dl.RTLD_GLOBAL
|
||||||
LIKWID_DEF_ACCESSMODE = "direct"
|
LIKWID_DEF_ACCESSMODE = "direct"
|
||||||
LIKWID_DEF_LOCKFILE = "/var/run/likwid.lock"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type LikwidCollectorMetricConfig struct {
|
type LikwidCollectorMetricConfig struct {
|
||||||
Name string `json:"name"` // Name of the metric
|
Name string `json:"name"` // Name of the metric
|
||||||
Calc string `json:"calc"` // Calculation for the metric using
|
Calc string `json:"calc"` // Calculation for the metric using
|
||||||
Type string `json:"type"` // Metric type (aka node, socket, hwthread, ...)
|
Type string `json:"type"` // Metric type (aka node, socket, cpu, ...)
|
||||||
Publish bool `json:"publish"`
|
Publish bool `json:"publish"`
|
||||||
SendCoreTotalVal bool `json:"send_core_total_values,omitempty"`
|
|
||||||
SendSocketTotalVal bool `json:"send_socket_total_values,omitempty"`
|
|
||||||
SendNodeTotalVal bool `json:"send_node_total_values,omitempty"`
|
|
||||||
Unit string `json:"unit"` // Unit of metric if any
|
Unit string `json:"unit"` // Unit of metric if any
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,7 +55,7 @@ type LikwidEventsetConfig struct {
|
|||||||
eorder []*C.char
|
eorder []*C.char
|
||||||
estr *C.char
|
estr *C.char
|
||||||
go_estr string
|
go_estr string
|
||||||
results map[int]map[string]float64
|
results map[int]map[string]interface{}
|
||||||
metrics map[int]map[string]float64
|
metrics map[int]map[string]float64
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +67,6 @@ type LikwidCollectorConfig struct {
|
|||||||
AccessMode string `json:"access_mode,omitempty"`
|
AccessMode string `json:"access_mode,omitempty"`
|
||||||
DaemonPath string `json:"accessdaemon_path,omitempty"`
|
DaemonPath string `json:"accessdaemon_path,omitempty"`
|
||||||
LibraryPath string `json:"liblikwid_path,omitempty"`
|
LibraryPath string `json:"liblikwid_path,omitempty"`
|
||||||
LockfilePath string `json:"lockfile_path,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type LikwidCollector struct {
|
type LikwidCollector struct {
|
||||||
@@ -82,20 +74,15 @@ type LikwidCollector struct {
|
|||||||
cpulist []C.int
|
cpulist []C.int
|
||||||
cpu2tid map[int]int
|
cpu2tid map[int]int
|
||||||
sock2tid map[int]int
|
sock2tid map[int]int
|
||||||
tid2core map[int]int
|
|
||||||
tid2socket map[int]int
|
|
||||||
metrics map[C.int]map[string]int
|
metrics map[C.int]map[string]int
|
||||||
groups []C.int
|
groups []C.int
|
||||||
config LikwidCollectorConfig
|
config LikwidCollectorConfig
|
||||||
|
gmresults map[int]map[string]float64
|
||||||
basefreq float64
|
basefreq float64
|
||||||
running bool
|
running bool
|
||||||
initialized bool
|
initialized bool
|
||||||
needs_reinit bool
|
|
||||||
myuid int
|
|
||||||
lock_err_once bool
|
|
||||||
likwidGroups map[C.int]LikwidEventsetConfig
|
likwidGroups map[C.int]LikwidEventsetConfig
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
measureThread thread.Thread
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type LikwidMetric struct {
|
type LikwidMetric struct {
|
||||||
@@ -105,18 +92,6 @@ type LikwidMetric struct {
|
|||||||
group_idx int
|
group_idx int
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkMetricType(t string) bool {
|
|
||||||
valid := map[string]bool{
|
|
||||||
"node": true,
|
|
||||||
"socket": true,
|
|
||||||
"hwthread": true,
|
|
||||||
"core": true,
|
|
||||||
"memoryDomain": true,
|
|
||||||
}
|
|
||||||
_, ok := valid[t]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func eventsToEventStr(events map[string]string) string {
|
func eventsToEventStr(events map[string]string) string {
|
||||||
elist := make([]string, 0)
|
elist := make([]string, 0)
|
||||||
for k, v := range events {
|
for k, v := range events {
|
||||||
@@ -140,10 +115,10 @@ func genLikwidEventSet(input LikwidCollectorEventsetConfig) LikwidEventsetConfig
|
|||||||
elist = append(elist, c_counter)
|
elist = append(elist, c_counter)
|
||||||
}
|
}
|
||||||
estr := strings.Join(tmplist, ",")
|
estr := strings.Join(tmplist, ",")
|
||||||
res := make(map[int]map[string]float64)
|
res := make(map[int]map[string]interface{})
|
||||||
met := make(map[int]map[string]float64)
|
met := make(map[int]map[string]float64)
|
||||||
for _, i := range topo.CpuList() {
|
for _, i := range topo.CpuList() {
|
||||||
res[i] = make(map[string]float64)
|
res[i] = make(map[string]interface{})
|
||||||
for k := range input.Events {
|
for k := range input.Events {
|
||||||
res[i][k] = 0.0
|
res[i][k] = 0.0
|
||||||
}
|
}
|
||||||
@@ -163,7 +138,7 @@ func genLikwidEventSet(input LikwidCollectorEventsetConfig) LikwidEventsetConfig
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testLikwidMetricFormula(formula string, params []string) bool {
|
func testLikwidMetricFormula(formula string, params []string) bool {
|
||||||
myparams := make(map[string]float64)
|
myparams := make(map[string]interface{})
|
||||||
for _, p := range params {
|
for _, p := range params {
|
||||||
myparams[p] = float64(1.0)
|
myparams[p] = float64(1.0)
|
||||||
}
|
}
|
||||||
@@ -204,12 +179,9 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
|||||||
m.name = "LikwidCollector"
|
m.name = "LikwidCollector"
|
||||||
m.parallel = false
|
m.parallel = false
|
||||||
m.initialized = false
|
m.initialized = false
|
||||||
m.needs_reinit = true
|
|
||||||
m.running = false
|
m.running = false
|
||||||
m.myuid = os.Getuid()
|
|
||||||
m.config.AccessMode = LIKWID_DEF_ACCESSMODE
|
m.config.AccessMode = LIKWID_DEF_ACCESSMODE
|
||||||
m.config.LibraryPath = LIKWID_LIB_NAME
|
m.config.LibraryPath = LIKWID_LIB_NAME
|
||||||
m.config.LockfilePath = LIKWID_DEF_LOCKFILE
|
|
||||||
if len(config) > 0 {
|
if len(config) > 0 {
|
||||||
err := json.Unmarshal(config, &m.config)
|
err := json.Unmarshal(config, &m.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -243,6 +215,13 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
|||||||
|
|
||||||
m.likwidGroups = make(map[C.int]LikwidEventsetConfig)
|
m.likwidGroups = make(map[C.int]LikwidEventsetConfig)
|
||||||
|
|
||||||
|
// m.results = make(map[int]map[int]map[string]interface{})
|
||||||
|
// m.mresults = make(map[int]map[int]map[string]float64)
|
||||||
|
m.gmresults = make(map[int]map[string]float64)
|
||||||
|
for _, tid := range m.cpu2tid {
|
||||||
|
m.gmresults[tid] = make(map[string]float64)
|
||||||
|
}
|
||||||
|
|
||||||
// This is for the global metrics computation test
|
// This is for the global metrics computation test
|
||||||
totalMetrics := 0
|
totalMetrics := 0
|
||||||
// Generate parameter list for the metric computing test
|
// Generate parameter list for the metric computing test
|
||||||
@@ -260,16 +239,12 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
for _, metric := range evset.Metrics {
|
for _, metric := range evset.Metrics {
|
||||||
// Try to evaluate the metric
|
// Try to evaluate the metric
|
||||||
cclog.ComponentDebug(m.name, "Checking", metric.Name)
|
if testLikwidMetricFormula(metric.Calc, params) {
|
||||||
if !checkMetricType(metric.Type) {
|
// Add the computable metric to the parameter list for the global metrics
|
||||||
cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
|
|
||||||
metric.Calc = ""
|
|
||||||
} else if !testLikwidMetricFormula(metric.Calc, params) {
|
|
||||||
cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
|
|
||||||
metric.Calc = ""
|
|
||||||
} else {
|
|
||||||
globalParams = append(globalParams, metric.Name)
|
globalParams = append(globalParams, metric.Name)
|
||||||
totalMetrics++
|
totalMetrics++
|
||||||
|
} else {
|
||||||
|
metric.Calc = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -279,14 +254,8 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
for _, metric := range m.config.Metrics {
|
for _, metric := range m.config.Metrics {
|
||||||
// Try to evaluate the global metric
|
// Try to evaluate the global metric
|
||||||
if !checkMetricType(metric.Type) {
|
if !testLikwidMetricFormula(metric.Calc, globalParams) {
|
||||||
cclog.ComponentError(m.name, "Metric", metric.Name, "uses invalid type", metric.Type)
|
cclog.ComponentError(m.name, "Calculation for metric", metric.Name, "failed")
|
||||||
metric.Calc = ""
|
|
||||||
} else if !testLikwidMetricFormula(metric.Calc, globalParams) {
|
|
||||||
cclog.ComponentError(m.name, "Metric", metric.Name, "cannot be calculated with given counters")
|
|
||||||
metric.Calc = ""
|
|
||||||
} else if !checkMetricType(metric.Type) {
|
|
||||||
cclog.ComponentError(m.name, "Metric", metric.Name, "has invalid type")
|
|
||||||
metric.Calc = ""
|
metric.Calc = ""
|
||||||
} else {
|
} else {
|
||||||
totalMetrics++
|
totalMetrics++
|
||||||
@@ -299,273 +268,76 @@ func (m *LikwidCollector) Init(config json.RawMessage) error {
|
|||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ret := C.topology_init()
|
|
||||||
if ret != 0 {
|
|
||||||
err := errors.New("failed to initialize topology module")
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.measureThread = thread.New()
|
|
||||||
switch m.config.AccessMode {
|
|
||||||
case "direct":
|
|
||||||
C.HPMmode(0)
|
|
||||||
case "accessdaemon":
|
|
||||||
if len(m.config.DaemonPath) > 0 {
|
|
||||||
p := os.Getenv("PATH")
|
|
||||||
os.Setenv("PATH", m.config.DaemonPath+":"+p)
|
|
||||||
}
|
|
||||||
C.HPMmode(1)
|
|
||||||
retCode := C.HPMinit()
|
|
||||||
if retCode != 0 {
|
|
||||||
err := fmt.Errorf("C.HPMinit() failed with return code %v", retCode)
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
}
|
|
||||||
for _, c := range m.cpulist {
|
|
||||||
m.measureThread.Call(
|
|
||||||
func() {
|
|
||||||
retCode := C.HPMaddThread(c)
|
|
||||||
if retCode != 0 {
|
|
||||||
err := fmt.Errorf("C.HPMaddThread(%v) failed with return code %v", c, retCode)
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.sock2tid = make(map[int]int)
|
|
||||||
tmp := make([]C.int, 1)
|
|
||||||
for _, sid := range topo.SocketList() {
|
|
||||||
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
|
|
||||||
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
|
|
||||||
if ret > 0 {
|
|
||||||
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
|
|
||||||
}
|
|
||||||
C.free(unsafe.Pointer(cstr))
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuData := topo.CpuData()
|
|
||||||
m.tid2core = make(map[int]int, len(cpuData))
|
|
||||||
m.tid2socket = make(map[int]int, len(cpuData))
|
|
||||||
for i := range cpuData {
|
|
||||||
c := &cpuData[i]
|
|
||||||
// Hardware thread ID to core ID mapping
|
|
||||||
if len(c.CoreCPUsList) > 0 {
|
|
||||||
m.tid2core[c.CpuID] = c.CoreCPUsList[0]
|
|
||||||
} else {
|
|
||||||
m.tid2core[c.CpuID] = c.CpuID
|
|
||||||
}
|
|
||||||
// Hardware thead ID to socket ID mapping
|
|
||||||
m.tid2socket[c.CpuID] = c.Socket
|
|
||||||
}
|
|
||||||
|
|
||||||
m.basefreq = getBaseFreq()
|
|
||||||
m.init = true
|
m.init = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// take a measurement for 'interval' seconds of event set index 'group'
|
// take a measurement for 'interval' seconds of event set index 'group'
|
||||||
func (m *LikwidCollector) takeMeasurement(evidx int, evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
|
func (m *LikwidCollector) takeMeasurement(evset LikwidEventsetConfig, interval time.Duration) (bool, error) {
|
||||||
var ret C.int
|
var ret C.int
|
||||||
var gid C.int = -1
|
|
||||||
sigchan := make(chan os.Signal, 1)
|
|
||||||
|
|
||||||
// Watch changes for the lock file ()
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
defer watcher.Close()
|
|
||||||
if len(m.config.LockfilePath) > 0 {
|
|
||||||
// Check if the lock file exists
|
|
||||||
info, err := os.Stat(m.config.LockfilePath)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
// Create the lock file if it does not exist
|
|
||||||
file, createErr := os.Create(m.config.LockfilePath)
|
|
||||||
if createErr != nil {
|
|
||||||
return true, fmt.Errorf("failed to create lock file: %v", createErr)
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
info, err = os.Stat(m.config.LockfilePath) // Recheck the file after creation
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
// Check file ownership
|
|
||||||
uid := info.Sys().(*syscall.Stat_t).Uid
|
|
||||||
if uid != uint32(m.myuid) {
|
|
||||||
usr, err := user.LookupId(fmt.Sprint(uid))
|
|
||||||
if err == nil {
|
|
||||||
err = fmt.Errorf("access to performance counters locked by %s", usr.Username)
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("access to performance counters locked by %d", uid)
|
|
||||||
}
|
|
||||||
// delete error if we already returned the error once.
|
|
||||||
if !m.lock_err_once {
|
|
||||||
m.lock_err_once = true
|
|
||||||
} else {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
// reset lock_err_once
|
|
||||||
m.lock_err_once = false
|
|
||||||
|
|
||||||
// Add the lock file to the watcher
|
|
||||||
err = watcher.Add(m.config.LockfilePath)
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer m.lock.Unlock()
|
if m.initialized {
|
||||||
|
ret = C.perfmon_setupCounters(evset.gid)
|
||||||
// Initialize the performance monitoring feature by creating basic data structures
|
|
||||||
select {
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
ret = -1
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return true, fmt.Errorf("failed to initialize library, error %d", ret)
|
var err error = nil
|
||||||
|
var skip bool = false
|
||||||
|
if ret == -37 {
|
||||||
|
skip = true
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
|
||||||
}
|
}
|
||||||
signal.Notify(sigchan, os.Interrupt)
|
m.lock.Unlock()
|
||||||
signal.Notify(sigchan, syscall.SIGCHLD)
|
return skip, err
|
||||||
|
|
||||||
// Add an event string to LIKWID
|
|
||||||
select {
|
|
||||||
case <-sigchan:
|
|
||||||
gid = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
gid = -1
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
gid = C.perfmon_addEventSet(evset.estr)
|
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
gid = C.perfmon_addEventSet(evset.estr)
|
|
||||||
}
|
|
||||||
if gid < 0 {
|
|
||||||
return true, fmt.Errorf("failed to add events %s, id %d, error %d", evset.go_estr, evidx, gid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup all performance monitoring counters of an eventSet
|
|
||||||
select {
|
|
||||||
case <-sigchan:
|
|
||||||
ret = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_setupCounters(gid)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
ret = C.perfmon_setupCounters(gid)
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
|
||||||
return true, fmt.Errorf("failed to setup events '%s', error %d", evset.go_estr, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start counters
|
|
||||||
select {
|
|
||||||
case <-sigchan:
|
|
||||||
ret = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_startCounters()
|
ret = C.perfmon_startCounters()
|
||||||
}
|
|
||||||
default:
|
|
||||||
ret = C.perfmon_startCounters()
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return true, fmt.Errorf("failed to start events '%s', error %d", evset.go_estr, ret)
|
var err error = nil
|
||||||
|
var skip bool = false
|
||||||
|
if ret == -37 {
|
||||||
|
skip = true
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
|
||||||
}
|
}
|
||||||
select {
|
m.lock.Unlock()
|
||||||
case <-sigchan:
|
return skip, err
|
||||||
ret = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_readCounters()
|
|
||||||
}
|
}
|
||||||
default:
|
m.running = true
|
||||||
ret = C.perfmon_readCounters()
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
|
||||||
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait
|
|
||||||
time.Sleep(interval)
|
time.Sleep(interval)
|
||||||
|
m.running = false
|
||||||
// Read counters
|
ret = C.perfmon_stopCounters()
|
||||||
select {
|
|
||||||
case <-sigchan:
|
|
||||||
ret = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_readCounters()
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
ret = C.perfmon_readCounters()
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return true, fmt.Errorf("failed to read events '%s', error %d", evset.go_estr, ret)
|
var err error = nil
|
||||||
|
var skip bool = false
|
||||||
|
if ret == -37 {
|
||||||
|
skip = true
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("failed to setup performance group %d", evset.gid)
|
||||||
}
|
}
|
||||||
|
m.lock.Unlock()
|
||||||
|
return skip, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.lock.Unlock()
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Store counters
|
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
|
||||||
|
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMetric) error {
|
||||||
|
invClock := float64(1.0 / m.basefreq)
|
||||||
|
|
||||||
|
// Go over events and get the results
|
||||||
for eidx, counter := range evset.eorder {
|
for eidx, counter := range evset.eorder {
|
||||||
gctr := C.GoString(counter)
|
gctr := C.GoString(counter)
|
||||||
for _, tid := range m.cpu2tid {
|
for _, tid := range m.cpu2tid {
|
||||||
res := C.perfmon_getLastResult(gid, C.int(eidx), C.int(tid))
|
res := C.perfmon_getLastResult(evset.gid, C.int(eidx), C.int(tid))
|
||||||
fres := float64(res)
|
fres := float64(res)
|
||||||
if m.config.InvalidToZero && (math.IsNaN(fres) || math.IsInf(fres, 0)) {
|
if m.config.InvalidToZero && (math.IsNaN(fres) || math.IsInf(fres, 0)) {
|
||||||
fres = 0.0
|
fres = 0.0
|
||||||
}
|
}
|
||||||
evset.results[tid][gctr] = fres
|
evset.results[tid][gctr] = fres
|
||||||
}
|
evset.results[tid]["time"] = interval.Seconds()
|
||||||
}
|
|
||||||
|
|
||||||
// Store time in seconds the event group was measured the last time
|
|
||||||
for _, tid := range m.cpu2tid {
|
|
||||||
evset.results[tid]["time"] = float64(C.perfmon_getLastTimeOfGroup(gid))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop counters
|
|
||||||
select {
|
|
||||||
case <-sigchan:
|
|
||||||
ret = -1
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
ret = C.perfmon_stopCounters()
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
ret = C.perfmon_stopCounters()
|
|
||||||
}
|
|
||||||
if ret != 0 {
|
|
||||||
return true, fmt.Errorf("failed to stop events '%s', error %d", evset.go_estr, ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocates all internal data that is used during performance monitoring
|
|
||||||
signal.Stop(sigchan)
|
|
||||||
select {
|
|
||||||
case e := <-watcher.Events:
|
|
||||||
if e.Op != fsnotify.Chmod {
|
|
||||||
C.perfmon_finalize()
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
C.perfmon_finalize()
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get all measurement results for an event set, derive the metric values out of the measurement results and send it
|
|
||||||
func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
|
|
||||||
invClock := float64(1.0 / m.basefreq)
|
|
||||||
|
|
||||||
for _, tid := range m.cpu2tid {
|
|
||||||
evset.results[tid]["inverseClock"] = invClock
|
evset.results[tid]["inverseClock"] = invClock
|
||||||
evset.results[tid]["gotime"] = interval.Seconds()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go over the event set metrics, derive the value out of the event:counter values and send it
|
// Go over the event set metrics, derive the value out of the event:counter values and send it
|
||||||
@@ -576,9 +348,6 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
|||||||
if metric.Type == "socket" {
|
if metric.Type == "socket" {
|
||||||
scopemap = m.sock2tid
|
scopemap = m.sock2tid
|
||||||
}
|
}
|
||||||
// Send all metrics with same time stamp
|
|
||||||
// This function does only computiation, counter measurement is done before
|
|
||||||
now := time.Now()
|
|
||||||
for domain, tid := range scopemap {
|
for domain, tid := range scopemap {
|
||||||
if tid >= 0 && len(metric.Calc) > 0 {
|
if tid >= 0 && len(metric.Calc) > 0 {
|
||||||
value, err := agg.EvalFloat64Condition(metric.Calc, evset.results[tid])
|
value, err := agg.EvalFloat64Condition(metric.Calc, evset.results[tid])
|
||||||
@@ -591,18 +360,10 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
|||||||
}
|
}
|
||||||
evset.metrics[tid][metric.Name] = value
|
evset.metrics[tid][metric.Name] = value
|
||||||
// Now we have the result, send it with the proper tags
|
// Now we have the result, send it with the proper tags
|
||||||
if !math.IsNaN(value) && metric.Publish {
|
if !math.IsNaN(value) {
|
||||||
|
if metric.Publish {
|
||||||
fields := map[string]interface{}{"value": value}
|
fields := map[string]interface{}{"value": value}
|
||||||
y, err :=
|
y, err := lp.New(metric.Name, map[string]string{"type": metric.Type}, m.meta, fields, time.Now())
|
||||||
lp.NewMessage(
|
|
||||||
metric.Name,
|
|
||||||
map[string]string{
|
|
||||||
"type": metric.Type,
|
|
||||||
},
|
|
||||||
m.meta,
|
|
||||||
fields,
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if metric.Type != "node" {
|
if metric.Type != "node" {
|
||||||
y.AddTag("type-id", fmt.Sprintf("%d", domain))
|
y.AddTag("type-id", fmt.Sprintf("%d", domain))
|
||||||
@@ -615,112 +376,6 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send per core aggregated values
|
|
||||||
if metric.SendCoreTotalVal {
|
|
||||||
totalCoreValues := make(map[int]float64)
|
|
||||||
for _, tid := range scopemap {
|
|
||||||
if tid >= 0 && len(metric.Calc) > 0 {
|
|
||||||
coreID := m.tid2core[tid]
|
|
||||||
value := evset.metrics[tid][metric.Name]
|
|
||||||
if !math.IsNaN(value) && metric.Publish {
|
|
||||||
totalCoreValues[coreID] += value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for coreID, value := range totalCoreValues {
|
|
||||||
y, err :=
|
|
||||||
lp.NewMessage(
|
|
||||||
metric.Name,
|
|
||||||
map[string]string{
|
|
||||||
"type": "core",
|
|
||||||
"type-id": fmt.Sprintf("%d", coreID),
|
|
||||||
},
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": value,
|
|
||||||
},
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(metric.Unit) > 0 {
|
|
||||||
y.AddMeta("unit", metric.Unit)
|
|
||||||
}
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send per socket aggregated values
|
|
||||||
if metric.SendSocketTotalVal {
|
|
||||||
totalSocketValues := make(map[int]float64)
|
|
||||||
for _, tid := range scopemap {
|
|
||||||
if tid >= 0 && len(metric.Calc) > 0 {
|
|
||||||
socketID := m.tid2socket[tid]
|
|
||||||
value := evset.metrics[tid][metric.Name]
|
|
||||||
if !math.IsNaN(value) && metric.Publish {
|
|
||||||
totalSocketValues[socketID] += value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for socketID, value := range totalSocketValues {
|
|
||||||
y, err :=
|
|
||||||
lp.NewMessage(
|
|
||||||
metric.Name,
|
|
||||||
map[string]string{
|
|
||||||
"type": "socket",
|
|
||||||
"type-id": fmt.Sprintf("%d", socketID),
|
|
||||||
},
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": value,
|
|
||||||
},
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(metric.Unit) > 0 {
|
|
||||||
y.AddMeta("unit", metric.Unit)
|
|
||||||
}
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send per node aggregated value
|
|
||||||
if metric.SendNodeTotalVal {
|
|
||||||
var totalNodeValue float64 = 0.0
|
|
||||||
for _, tid := range scopemap {
|
|
||||||
if tid >= 0 && len(metric.Calc) > 0 {
|
|
||||||
value := evset.metrics[tid][metric.Name]
|
|
||||||
if !math.IsNaN(value) && metric.Publish {
|
|
||||||
totalNodeValue += value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
y, err :=
|
|
||||||
lp.NewMessage(
|
|
||||||
metric.Name,
|
|
||||||
map[string]string{
|
|
||||||
"type": "node",
|
|
||||||
},
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": totalNodeValue,
|
|
||||||
},
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(metric.Unit) > 0 {
|
|
||||||
y.AddMeta("unit", metric.Unit)
|
|
||||||
}
|
|
||||||
output <- y
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -728,14 +383,8 @@ func (m *LikwidCollector) calcEventsetMetrics(evset LikwidEventsetConfig, interv
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Go over the global metrics, derive the value out of the event sets' metric values and send it
|
// Go over the global metrics, derive the value out of the event sets' metric values and send it
|
||||||
func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, interval time.Duration, output chan lp.CCMessage) error {
|
func (m *LikwidCollector) calcGlobalMetrics(interval time.Duration, output chan lp.CCMetric) error {
|
||||||
// Send all metrics with same time stamp
|
|
||||||
// This function does only computiation, counter measurement is done before
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
for _, metric := range m.config.Metrics {
|
for _, metric := range m.config.Metrics {
|
||||||
// The metric scope is determined in the Init() function
|
|
||||||
// Get the map scope-id -> tids
|
|
||||||
scopemap := m.cpu2tid
|
scopemap := m.cpu2tid
|
||||||
if metric.Type == "socket" {
|
if metric.Type == "socket" {
|
||||||
scopemap = m.sock2tid
|
scopemap = m.sock2tid
|
||||||
@@ -743,13 +392,12 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
|||||||
for domain, tid := range scopemap {
|
for domain, tid := range scopemap {
|
||||||
if tid >= 0 {
|
if tid >= 0 {
|
||||||
// Here we generate parameter list
|
// Here we generate parameter list
|
||||||
params := make(map[string]float64)
|
params := make(map[string]interface{})
|
||||||
for _, evset := range groups {
|
for _, evset := range m.likwidGroups {
|
||||||
for mname, mres := range evset.metrics[tid] {
|
for mname, mres := range evset.metrics[tid] {
|
||||||
params[mname] = mres
|
params[mname] = mres
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
params["gotime"] = interval.Seconds()
|
|
||||||
// Evaluate the metric
|
// Evaluate the metric
|
||||||
value, err := agg.EvalFloat64Condition(metric.Calc, params)
|
value, err := agg.EvalFloat64Condition(metric.Calc, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -759,21 +407,13 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
|||||||
if m.config.InvalidToZero && (math.IsNaN(value) || math.IsInf(value, 0)) {
|
if m.config.InvalidToZero && (math.IsNaN(value) || math.IsInf(value, 0)) {
|
||||||
value = 0.0
|
value = 0.0
|
||||||
}
|
}
|
||||||
|
m.gmresults[tid][metric.Name] = value
|
||||||
// Now we have the result, send it with the proper tags
|
// Now we have the result, send it with the proper tags
|
||||||
if !math.IsNaN(value) {
|
if !math.IsNaN(value) {
|
||||||
if metric.Publish {
|
if metric.Publish {
|
||||||
y, err :=
|
tags := map[string]string{"type": metric.Type}
|
||||||
lp.NewMessage(
|
fields := map[string]interface{}{"value": value}
|
||||||
metric.Name,
|
y, err := lp.New(metric.Name, tags, m.meta, fields, time.Now())
|
||||||
map[string]string{
|
|
||||||
"type": metric.Type,
|
|
||||||
},
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": value,
|
|
||||||
},
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if metric.Type != "node" {
|
if metric.Type != "node" {
|
||||||
y.AddTag("type-id", fmt.Sprintf("%d", domain))
|
y.AddTag("type-id", fmt.Sprintf("%d", domain))
|
||||||
@@ -791,17 +431,131 @@ func (m *LikwidCollector) calcGlobalMetrics(groups []LikwidEventsetConfig, inter
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMessage) {
|
func (m *LikwidCollector) LateInit() error {
|
||||||
var err error = nil
|
var ret C.int
|
||||||
groups := make([]LikwidEventsetConfig, 0)
|
if m.initialized {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch m.config.AccessMode {
|
||||||
|
case "direct":
|
||||||
|
C.HPMmode(0)
|
||||||
|
case "accessdaemon":
|
||||||
|
if len(m.config.DaemonPath) > 0 {
|
||||||
|
p := os.Getenv("PATH")
|
||||||
|
os.Setenv("PATH", m.config.DaemonPath+":"+p)
|
||||||
|
}
|
||||||
|
C.HPMmode(1)
|
||||||
|
}
|
||||||
|
cclog.ComponentDebug(m.name, "initialize LIKWID topology")
|
||||||
|
ret = C.topology_init()
|
||||||
|
if ret != 0 {
|
||||||
|
err := errors.New("failed to initialize LIKWID topology")
|
||||||
|
cclog.ComponentError(m.name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
for evidx, evset := range m.config.Eventsets {
|
m.sock2tid = make(map[int]int)
|
||||||
e := genLikwidEventSet(evset)
|
tmp := make([]C.int, 1)
|
||||||
e.internal = evidx
|
for _, sid := range topo.SocketList() {
|
||||||
|
cstr := C.CString(fmt.Sprintf("S%d:0", sid))
|
||||||
|
ret = C.cpustr_to_cpulist(cstr, &tmp[0], 1)
|
||||||
|
if ret > 0 {
|
||||||
|
m.sock2tid[sid] = m.cpu2tid[int(tmp[0])]
|
||||||
|
}
|
||||||
|
C.free(unsafe.Pointer(cstr))
|
||||||
|
}
|
||||||
|
|
||||||
|
m.basefreq = getBaseFreq()
|
||||||
|
cclog.ComponentDebug(m.name, "BaseFreq", m.basefreq)
|
||||||
|
|
||||||
|
cclog.ComponentDebug(m.name, "initialize LIKWID perfmon module")
|
||||||
|
ret = C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
|
||||||
|
if ret != 0 {
|
||||||
|
var err error = nil
|
||||||
|
C.topology_finalize()
|
||||||
|
if ret != -22 {
|
||||||
|
err = errors.New("failed to initialize LIKWID perfmon")
|
||||||
|
cclog.ComponentError(m.name, err.Error())
|
||||||
|
} else {
|
||||||
|
err = errors.New("access to LIKWID perfmon locked")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// While adding the events, we test the metrics whether they can be computed at all
|
||||||
|
for i, evset := range m.config.Eventsets {
|
||||||
|
var gid C.int
|
||||||
|
if len(evset.Events) > 0 {
|
||||||
skip := false
|
skip := false
|
||||||
|
likwidGroup := genLikwidEventSet(evset)
|
||||||
|
for _, g := range m.likwidGroups {
|
||||||
|
if likwidGroup.go_estr == g.go_estr {
|
||||||
|
skip = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Now we add the list of events to likwid
|
||||||
|
gid = C.perfmon_addEventSet(likwidGroup.estr)
|
||||||
|
if gid >= 0 {
|
||||||
|
likwidGroup.gid = gid
|
||||||
|
likwidGroup.internal = i
|
||||||
|
m.likwidGroups[gid] = likwidGroup
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cclog.ComponentError(m.name, "Invalid Likwid eventset config, no events given")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no event set could be added, shut down LikwidCollector
|
||||||
|
if len(m.likwidGroups) == 0 {
|
||||||
|
C.perfmon_finalize()
|
||||||
|
C.topology_finalize()
|
||||||
|
err := errors.New("no LIKWID performance group initialized")
|
||||||
|
cclog.ComponentError(m.name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sigchan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigchan, syscall.SIGCHLD)
|
||||||
|
signal.Notify(sigchan, os.Interrupt)
|
||||||
|
go func() {
|
||||||
|
<-sigchan
|
||||||
|
|
||||||
|
signal.Stop(sigchan)
|
||||||
|
m.initialized = false
|
||||||
|
}()
|
||||||
|
m.initialized = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// main read function taking multiple measurement rounds, each 'interval' seconds long
|
||||||
|
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
|
var skip bool = false
|
||||||
|
var err error
|
||||||
|
if !m.init {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.initialized {
|
||||||
|
m.lock.Lock()
|
||||||
|
err = m.LateInit()
|
||||||
|
if err != nil {
|
||||||
|
m.lock.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.initialized = true
|
||||||
|
m.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.initialized && !skip {
|
||||||
|
for _, evset := range m.likwidGroups {
|
||||||
if !skip {
|
if !skip {
|
||||||
// measure event set 'i' for 'interval' seconds
|
// measure event set 'i' for 'interval' seconds
|
||||||
skip, err = m.takeMeasurement(evidx, e, interval)
|
skip, err = m.takeMeasurement(evset, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return
|
return
|
||||||
@@ -810,33 +564,30 @@ func (m *LikwidCollector) ReadThread(interval time.Duration, output chan lp.CCMe
|
|||||||
|
|
||||||
if !skip {
|
if !skip {
|
||||||
// read measurements and derive event set metrics
|
// read measurements and derive event set metrics
|
||||||
m.calcEventsetMetrics(e, interval, output)
|
m.calcEventsetMetrics(evset, interval, output)
|
||||||
groups = append(groups, e)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(groups) > 0 {
|
if !skip {
|
||||||
// calculate global metrics
|
// use the event set metrics to derive the global metrics
|
||||||
m.calcGlobalMetrics(groups, interval, output)
|
m.calcGlobalMetrics(interval, output)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// main read function taking multiple measurement rounds, each 'interval' seconds long
|
|
||||||
func (m *LikwidCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|
||||||
if !m.init {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
m.measureThread.Call(func() {
|
|
||||||
m.ReadThread(interval, output)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LikwidCollector) Close() {
|
func (m *LikwidCollector) Close() {
|
||||||
if m.init {
|
if m.init {
|
||||||
m.init = false
|
m.init = false
|
||||||
|
cclog.ComponentDebug(m.name, "Closing ...")
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
m.measureThread.Terminate()
|
if m.initialized {
|
||||||
|
cclog.ComponentDebug(m.name, "Finalize LIKWID perfmon module")
|
||||||
|
C.perfmon_finalize()
|
||||||
m.initialized = false
|
m.initialized = false
|
||||||
|
}
|
||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
|
cclog.ComponentDebug(m.name, "Finalize LIKWID topology module")
|
||||||
C.topology_finalize()
|
C.topology_finalize()
|
||||||
|
|
||||||
|
cclog.ComponentDebug(m.name, "Closing done")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,12 +10,11 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
|
|||||||
"liblikwid_path" : "/path/to/liblikwid.so",
|
"liblikwid_path" : "/path/to/liblikwid.so",
|
||||||
"accessdaemon_path" : "/folder/that/contains/likwid-accessD",
|
"accessdaemon_path" : "/folder/that/contains/likwid-accessD",
|
||||||
"access_mode" : "direct or accessdaemon or perf_event",
|
"access_mode" : "direct or accessdaemon or perf_event",
|
||||||
"lockfile_path" : "/var/run/likwid.lock",
|
|
||||||
"eventsets": [
|
"eventsets": [
|
||||||
{
|
{
|
||||||
"events" : {
|
"events" : {
|
||||||
"COUNTER0": "EVENT0",
|
"COUNTER0": "EVENT0",
|
||||||
"COUNTER1": "EVENT1"
|
"COUNTER1": "EVENT1",
|
||||||
},
|
},
|
||||||
"metrics" : [
|
"metrics" : [
|
||||||
{
|
{
|
||||||
@@ -27,7 +26,7 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
"globalmetrics" : [
|
"globalmetrics" : [
|
||||||
{
|
{
|
||||||
"name": "global_sum",
|
"name": "global_sum",
|
||||||
@@ -41,39 +40,34 @@ The `likwid` collector is probably the most complicated collector. The LIKWID li
|
|||||||
```
|
```
|
||||||
|
|
||||||
The `likwid` configuration consists of two parts, the `eventsets` and `globalmetrics`:
|
The `likwid` configuration consists of two parts, the `eventsets` and `globalmetrics`:
|
||||||
|
|
||||||
- An event set list itself has two parts, the `events` and a set of derivable `metrics`. Each of the `events` is a `counter:event` pair in LIKWID's syntax. The `metrics` are a list of formulas to derive the metric value from the measurements of the `events`' values. Each metric has a name, the formula, a type and a publish flag. There is an optional `unit` field. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. You can optionally use `time` for the measurement time and `inverseClock` for `1.0/baseCpuFrequency`. The type tells the LikwidCollector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the LikwidCollector whether a metric should be sent to the router or is only used internally to compute a global metric.
|
- An event set list itself has two parts, the `events` and a set of derivable `metrics`. Each of the `events` is a `counter:event` pair in LIKWID's syntax. The `metrics` are a list of formulas to derive the metric value from the measurements of the `events`' values. Each metric has a name, the formula, a type and a publish flag. There is an optional `unit` field. Counter names can be used like variables in the formulas, so `PMC0+PMC1` sums the measurements for the both events configured in the counters `PMC0` and `PMC1`. You can optionally use `time` for the measurement time and `inverseClock` for `1.0/baseCpuFrequency`. The type tells the LikwidCollector whether it is a metric for each hardware thread (`cpu`) or each CPU socket (`socket`). You may specify a unit for the metric with `unit`. The last one is the publishing flag. It tells the LikwidCollector whether a metric should be sent to the router or is only used internally to compute a global metric.
|
||||||
- The `globalmetrics` are metrics which require data from multiple event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a type and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. Also `time` and `inverseClock` cannot be used anymore. So, the idea is to derive a metric in the `eventsets` section and reuse it in the `globalmetrics` part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`"publish": false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
|
- The `globalmetrics` are metrics which require data from multiple event set measurements to be derived. The inputs are the metrics in the event sets. Similar to the metrics in the event sets, the global metrics are defined by a name, a formula, a scope and a publish flag. See event set metrics for details. The only difference is that there is no access to the raw event measurements anymore but only to the metrics. Also `time` and `inverseClock` cannot be used anymore. So, the idea is to derive a metric in the `eventsets` section and reuse it in the `globalmetrics` part. If you need a metric only for deriving the global metrics, disable forwarding of the event set metrics (`"publish": false`). **Be aware** that the combination might be misleading because the "behavior" of a metric changes over time and the multiple measurements might count different computing phases. Similar to the metrics in the eventset, you can specify a metric unit with the `unit` field.
|
||||||
|
|
||||||
Additional options:
|
Additional options:
|
||||||
|
|
||||||
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
|
- `force_overwrite`: Same as setting `LIKWID_FORCE=1`. In case counters are already in-use, LIKWID overwrites their configuration to do its measurements
|
||||||
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`. See below in [seperate section](./likwidMetric.md#invalid_to_zero-option)
|
- `invalid_to_zero`: In some cases, the calculations result in `NaN` or `Inf`. With this option, all `NaN` and `Inf` values are replaces with `0.0`. See below in [seperate section](./likwidMetric.md#invalid_to_zero-option)
|
||||||
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested.
|
- `access_mode`: Specify LIKWID access mode: `direct` for direct register access as root user or `accessdaemon`. The access mode `perf_event` is current untested.
|
||||||
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`)
|
- `accessdaemon_path`: Folder of the accessDaemon `likwid-accessD` (like `/usr/local/sbin`)
|
||||||
- `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so`
|
- `liblikwid_path`: Location of `liblikwid.so` including file name like `/usr/local/lib/liblikwid.so`
|
||||||
- `lockfile_path`: Location of LIKWID's lock file if multiple tools should access the hardware counters. Default `/var/run/likwid.lock`
|
|
||||||
|
|
||||||
### Available metric types
|
### Available metric scopes
|
||||||
|
|
||||||
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the LikwidCollector provides the specification of a `type` for each metric.
|
Hardware performance counters are scattered all over the system nowadays. A counter coveres a specific part of the system. While there are hardware thread specific counter for CPU cycles, instructions and so on, some others are specific for a whole CPU socket/package. To address that, the LikwidCollector provides the specification of a `type` for each metric.
|
||||||
|
|
||||||
- `hwthread` : One metric per CPU hardware thread with the tags `"type" : "hwthread"` and `"type-id" : "$hwthread_id"`
|
- `hwthread` : One metric per CPU hardware thread with the tags `"type" : "hwthread"` and `"type-id" : "$hwthread_id"`
|
||||||
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
|
- `socket` : One metric per CPU socket/package with the tags `"type" : "socket"` and `"type-id" : "$socket_id"`
|
||||||
|
|
||||||
**Note:** You cannot specify `socket` type for a metric that is measured at `hwthread` type, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the type of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
|
**Note:** You cannot specify `socket` scope for a metric that is measured at `hwthread` scope, so some kind of expert knowledge or lookup work in the [Likwid Wiki](https://github.com/RRZE-HPC/likwid/wiki) is required. Get the scope of each counter from the *Architecture* pages and as soon as one counter in a metric is socket-specific, the whole metric is socket-specific.
|
||||||
|
|
||||||
As a guideline:
|
As a guideline:
|
||||||
|
- All counters `FIXCx`, `PMCy` and `TMAz` have the scope `hwthread`
|
||||||
- All counters `FIXCx`, `PMCy` and `TMAz` have the type `hwthread`
|
- All counters names containing `BOX` have the scope `socket`
|
||||||
- All counters names containing `BOX` have the type `socket`
|
- All `PWRx` counters have scope `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `hwthread` scope
|
||||||
- All `PWRx` counters have type `socket`, except `"PWR1" : "RAPL_CORE_ENERGY"` has `hwthread` type
|
- All `DFCx` counters have scope `socket`
|
||||||
- All `DFCx` counters have type `socket`
|
|
||||||
|
|
||||||
### Help with the configuration
|
### Help with the configuration
|
||||||
|
|
||||||
The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`):
|
The configuration for the `likwid` collector is quite complicated. Most users don't use LIKWID with the event:counter notation but rely on the performance groups defined by the LIKWID team for each architecture. In order to help with the `likwid` collector configuration, we included a script `scripts/likwid_perfgroup_to_cc_config.py` that creates the configuration of an `eventset` from a performance group (using a LIKWID installation in `$PATH`):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ likwid-perfctr -i
|
$ likwid-perfctr -i
|
||||||
[...]
|
[...]
|
||||||
@@ -99,7 +93,7 @@ $ scripts/likwid_perfgroup_to_cc_config.py ICX MEM_DP
|
|||||||
"name": "Runtime (RDTSC) [s]",
|
"name": "Runtime (RDTSC) [s]",
|
||||||
"publish": true,
|
"publish": true,
|
||||||
"unit": "seconds"
|
"unit": "seconds"
|
||||||
"type": "hwthread"
|
"scope": "hwthread"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"..." : "..."
|
"..." : "..."
|
||||||
@@ -115,31 +109,20 @@ You can copy this JSON and add it to the `eventsets` list. If you specify multip
|
|||||||
LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job):
|
LIKWID checks the file `/var/run/likwid.lock` before performing any interfering operations. Who is allowed to access the counters is determined by the owner of the file. If it does not exist, it is created for the current user. So, if you want to temporarly allow counter access to a user (e.g. in a job):
|
||||||
|
|
||||||
Before (SLURM prolog, ...)
|
Before (SLURM prolog, ...)
|
||||||
|
```
|
||||||
```bash
|
$ chown $JOBUSER /var/run/likwid.lock
|
||||||
chown $JOBUSER /var/run/likwid.lock
|
|
||||||
```
|
```
|
||||||
|
|
||||||
After (SLURM epilog, ...)
|
After (SLURM epilog, ...)
|
||||||
|
```
|
||||||
```bash
|
$ chown $CCUSER /var/run/likwid.lock
|
||||||
chown $CCUSER /var/run/likwid.lock
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### `invalid_to_zero` option
|
### `invalid_to_zero` option
|
||||||
|
|
||||||
In some cases LIKWID returns `0.0` for some events that are further used in processing and maybe used as divisor in a calculation. After evaluation of a metric, the result might be `NaN` or `+-Inf`. These resulting metrics are commonly not created and forwarded to the router because the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#float) does not support these special floating-point values. If you want to have them sent, this option forces these metric values to be `0.0` instead.
|
In some cases LIKWID returns `0.0` for some events that are further used in processing and maybe used as divisor in a calculation. After evaluation of a metric, the result might be `NaN` or `+-Inf`. These resulting metrics are commonly not created and forwarded to the router because the [InfluxDB line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#float) does not support these special floating-point values. If you want to have them sent, this option forces these metric values to be `0.0` instead.
|
||||||
|
|
||||||
One might think this does not happen often but often used metrics in the world of performance engineering like Instructions-per-Cycle (IPC) or more frequently the actual CPU clock are derived with events like `CPU_CLK_UNHALTED_CORE` (Intel) which do not increment in halted state (as the name implies). In there are different power management systems in a chip which can cause a hardware thread to go in such a state. Moreover, if no cycles are executed by the core, also many other events are not incremented as well (like `INSTR_RETIRED_ANY` for retired instructions and part of IPC).
|
One might think this does not happen often but often used metrics in the world of performance engineering like Instructions-per-Cycle (IPC) or more frequently the actual CPU clock are derived with events like `CPU_CLK_UNHALTED_CORE` (Intel) which do not increment in halted state (as the name implies). In there are different power management systems in a chip which can cause a hardware thread to go in such a state. Moreover, if no cycles are executed by the core, also many other events are not incremented as well (like `INSTR_RETIRED_ANY` for retired instructions and part of IPC).
|
||||||
|
|
||||||
### `lockfile_path` option
|
|
||||||
LIKWID can be configured with a lock file with which the access to the performance monitoring registers can be disabled (only the owner of the lock file is allowed to access the registers). When the `lockfile_path` option is set, the collector subscribes to changes to this file to stop monitoring if the owner of the lock file changes. This feature is useful when users should be able to perform own hardware performance counter measurements through LIKWID or any other tool.
|
|
||||||
|
|
||||||
### `send_*_total values` option
|
|
||||||
|
|
||||||
- `send_core_total_values`: Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU core.
|
|
||||||
- `send_socket_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per CPU socket.
|
|
||||||
- `send_node_total_values` Metrics, which are usually collected on a per hardware thread basis, are additionally summed up per node.
|
|
||||||
|
|
||||||
### Example configuration
|
### Example configuration
|
||||||
|
|
||||||
@@ -244,7 +227,6 @@ LIKWID can be configured with a lock file with which the access to the performan
|
|||||||
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
|
The `likwid` collector reads hardware performance counters at a **hwthread** and **socket** level. The configuration looks quite complicated but it is basically copy&paste from [LIKWID's performance groups](https://github.com/RRZE-HPC/likwid/tree/master/groups). The collector made multiple iterations and tried to use the performance groups but it lacked flexibility. The current way of configuration provides most flexibility.
|
||||||
|
|
||||||
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
|
The logic is as following: There are multiple eventsets, each consisting of a list of counters+events and a list of metrics. If you compare a common performance group with the example setting above, there is not much difference:
|
||||||
|
|
||||||
```
|
```
|
||||||
EVENTSET -> "events": {
|
EVENTSET -> "events": {
|
||||||
FIXC1 ACTUAL_CPU_CLOCK -> "FIXC1": "ACTUAL_CPU_CLOCK",
|
FIXC1 ACTUAL_CPU_CLOCK -> "FIXC1": "ACTUAL_CPU_CLOCK",
|
||||||
@@ -263,7 +245,7 @@ METRICS -> "metrics": [
|
|||||||
IPC PMC0/PMC1 -> {
|
IPC PMC0/PMC1 -> {
|
||||||
-> "name" : "IPC",
|
-> "name" : "IPC",
|
||||||
-> "calc" : "PMC0/PMC1",
|
-> "calc" : "PMC0/PMC1",
|
||||||
-> "type": "hwthread",
|
-> "scope": "hwthread",
|
||||||
-> "publish": true
|
-> "publish": true
|
||||||
-> }
|
-> }
|
||||||
-> ]
|
-> ]
|
||||||
|
|||||||
@@ -8,16 +8,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//
|
||||||
// LoadavgCollector collects:
|
// LoadavgCollector collects:
|
||||||
// * load average of last 1, 5 & 15 minutes
|
// * load average of last 1, 5 & 15 minutes
|
||||||
// * number of processes currently runnable
|
// * number of processes currently runnable
|
||||||
// * total number of processes in system
|
// * total number of processes in system
|
||||||
//
|
//
|
||||||
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
|
// See: https://www.kernel.org/doc/html/latest/filesystems/proc.html
|
||||||
|
//
|
||||||
const LOADAVGFILE = "/proc/loadavg"
|
const LOADAVGFILE = "/proc/loadavg"
|
||||||
|
|
||||||
type LoadavgCollector struct {
|
type LoadavgCollector struct {
|
||||||
@@ -66,15 +68,17 @@ func (m *LoadavgCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
buffer, err := os.ReadFile(LOADAVGFILE)
|
buffer, err := os.ReadFile(LOADAVGFILE)
|
||||||
|
if err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(
|
cclog.ComponentError(
|
||||||
m.name,
|
m.name,
|
||||||
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
|
fmt.Sprintf("Read(): Failed to read file '%s': %v", LOADAVGFILE, err))
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
@@ -92,7 +96,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
if m.load_skips[i] {
|
if m.load_skips[i] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -111,7 +115,7 @@ func (m *LoadavgCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
if m.proc_skips[i] {
|
if m.proc_skips[i] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": x}, now)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const LUSTRE_SYSFS = `/sys/fs/lustre`
|
const LUSTRE_SYSFS = `/sys/fs/lustre`
|
||||||
@@ -101,7 +101,7 @@ func getMetricData(lines []string, prefix string, offset int) (int64, error) {
|
|||||||
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
|
// llitedir := filepath.Join(LUSTRE_SYSFS, "llite")
|
||||||
// devdir := filepath.Join(llitedir, device)
|
// devdir := filepath.Join(llitedir, device)
|
||||||
// statsfile := filepath.Join(devdir, "stats")
|
// statsfile := filepath.Join(devdir, "stats")
|
||||||
// buffer, err := os.ReadFile(statsfile)
|
// buffer, err := ioutil.ReadFile(statsfile)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return make([]string, 0)
|
// return make([]string, 0)
|
||||||
// }
|
// }
|
||||||
@@ -377,7 +377,7 @@ func (m *LustreCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -388,7 +388,7 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
|||||||
for _, def := range m.definitions {
|
for _, def := range m.definitions {
|
||||||
var use_x int64
|
var use_x int64
|
||||||
var err error
|
var err error
|
||||||
var y lp.CCMessage
|
var y lp.CCMetric
|
||||||
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
|
x, err := getMetricData(data, def.lineprefix, def.lineoffset)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
use_x = x
|
use_x = x
|
||||||
@@ -399,19 +399,19 @@ func (m *LustreCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
|||||||
switch def.calc {
|
switch def.calc {
|
||||||
case "none":
|
case "none":
|
||||||
value = use_x
|
value = use_x
|
||||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
case "difference":
|
case "difference":
|
||||||
value = use_x - devData[def.name]
|
value = use_x - devData[def.name]
|
||||||
if value.(int64) < 0 {
|
if value.(int64) < 0 {
|
||||||
value = 0
|
value = 0
|
||||||
}
|
}
|
||||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
case "derivative":
|
case "derivative":
|
||||||
value = float64(use_x-devData[def.name]) / tdiff.Seconds()
|
value = float64(use_x-devData[def.name]) / tdiff.Seconds()
|
||||||
if value.(float64) < 0 {
|
if value.(float64) < 0 {
|
||||||
value = 0
|
value = 0
|
||||||
}
|
}
|
||||||
y, err = lp.NewMessage(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err = lp.New(def.name, m.tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("device", device)
|
y.AddTag("device", device)
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MEMSTATFILE = "/proc/meminfo"
|
const MEMSTATFILE = "/proc/meminfo"
|
||||||
@@ -159,7 +159,7 @@ func (m *MemstatCollector) Init(config json.RawMessage) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -175,7 +175,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
y, err := lp.NewMessage(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
y, err := lp.New(name, tags, m.meta, map[string]interface{}{"value": value}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(unit) > 0 {
|
if len(unit) > 0 {
|
||||||
y.AddMeta("unit", unit)
|
y.AddMeta("unit", unit)
|
||||||
@@ -208,7 +208,7 @@ func (m *MemstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
y, err := lp.NewMessage("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
|
y, err := lp.New("mem_used", tags, m.meta, map[string]interface{}{"value": memUsed}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(unit) > 0 {
|
if len(unit) > 0 {
|
||||||
y.AddMeta("unit", unit)
|
y.AddMeta("unit", unit)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetricCollector interface {
|
type MetricCollector interface {
|
||||||
@@ -13,7 +13,7 @@ type MetricCollector interface {
|
|||||||
Init(config json.RawMessage) error // Initialize metric collector
|
Init(config json.RawMessage) error // Initialize metric collector
|
||||||
Initialized() bool // Is metric collector initialized?
|
Initialized() bool // Is metric collector initialized?
|
||||||
Parallel() bool
|
Parallel() bool
|
||||||
Read(duration time.Duration, output chan lp.CCMessage) // Read metrics from metric collector
|
Read(duration time.Duration, output chan lp.CCMetric) // Read metrics from metric collector
|
||||||
Close() // Close / finish metric collector
|
Close() // Close / finish metric collector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const NETSTATFILE = "/proc/net/dev"
|
const NETSTATFILE = "/proc/net/dev"
|
||||||
@@ -19,7 +19,6 @@ type NetstatCollectorConfig struct {
|
|||||||
IncludeDevices []string `json:"include_devices"`
|
IncludeDevices []string `json:"include_devices"`
|
||||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
SendAbsoluteValues bool `json:"send_abs_values"`
|
||||||
SendDerivedValues bool `json:"send_derived_values"`
|
SendDerivedValues bool `json:"send_derived_values"`
|
||||||
InterfaceAliases map[string][]string `json:"interface_aliases,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type NetstatCollectorMetric struct {
|
type NetstatCollectorMetric struct {
|
||||||
@@ -34,27 +33,10 @@ type NetstatCollectorMetric struct {
|
|||||||
type NetstatCollector struct {
|
type NetstatCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
config NetstatCollectorConfig
|
config NetstatCollectorConfig
|
||||||
aliasToCanonical map[string]string
|
|
||||||
matches map[string][]NetstatCollectorMetric
|
matches map[string][]NetstatCollectorMetric
|
||||||
lastTimestamp time.Time
|
lastTimestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NetstatCollector) buildAliasMapping() {
|
|
||||||
m.aliasToCanonical = make(map[string]string)
|
|
||||||
for canon, aliases := range m.config.InterfaceAliases {
|
|
||||||
for _, alias := range aliases {
|
|
||||||
m.aliasToCanonical[alias] = canon
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCanonicalName(raw string, aliasToCanonical map[string]string) string {
|
|
||||||
if canon, ok := aliasToCanonical[raw]; ok {
|
|
||||||
return canon
|
|
||||||
}
|
|
||||||
return raw
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *NetstatCollector) Init(config json.RawMessage) error {
|
func (m *NetstatCollector) Init(config json.RawMessage) error {
|
||||||
m.name = "NetstatCollector"
|
m.name = "NetstatCollector"
|
||||||
m.parallel = true
|
m.parallel = true
|
||||||
@@ -95,8 +77,6 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.buildAliasMapping()
|
|
||||||
|
|
||||||
// Check access to net statistic file
|
// Check access to net statistic file
|
||||||
file, err := os.Open(NETSTATFILE)
|
file, err := os.Open(NETSTATFILE)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -117,20 +97,18 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
|||||||
// Split line into fields
|
// Split line into fields
|
||||||
f := strings.Fields(l)
|
f := strings.Fields(l)
|
||||||
|
|
||||||
// Get raw and canonical names
|
// Get net device entry
|
||||||
raw := strings.Trim(f[0], ": ")
|
dev := strings.Trim(f[0], ": ")
|
||||||
canonical := getCanonicalName(raw, m.aliasToCanonical)
|
|
||||||
|
|
||||||
// Check if device is a included device
|
// Check if device is a included device
|
||||||
if _, ok := stringArrayContains(m.config.IncludeDevices, canonical); ok {
|
if _, ok := stringArrayContains(m.config.IncludeDevices, dev); ok {
|
||||||
// Tag will contain original device name (raw).
|
tags := map[string]string{"device": dev, "type": "node"}
|
||||||
tags := map[string]string{"stype": "network", "stype-id": raw, "type": "node"}
|
|
||||||
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
|
meta_unit_byte := map[string]string{"source": m.name, "group": "Network", "unit": "bytes"}
|
||||||
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
|
meta_unit_byte_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "bytes/sec"}
|
||||||
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
|
meta_unit_pkts := map[string]string{"source": m.name, "group": "Network", "unit": "packets"}
|
||||||
meta_unit_pkts_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "packets/sec"}
|
meta_unit_pkts_per_sec := map[string]string{"source": m.name, "group": "Network", "unit": "packets/sec"}
|
||||||
|
|
||||||
m.matches[canonical] = []NetstatCollectorMetric{
|
m.matches[dev] = []NetstatCollectorMetric{
|
||||||
{
|
{
|
||||||
name: "net_bytes_in",
|
name: "net_bytes_in",
|
||||||
index: fieldReceiveBytes,
|
index: fieldReceiveBytes,
|
||||||
@@ -165,6 +143,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(m.matches) == 0 {
|
if len(m.matches) == 0 {
|
||||||
@@ -174,7 +153,7 @@ func (m *NetstatCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -185,7 +164,7 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
// Save current timestamp
|
// Save current timestamp
|
||||||
m.lastTimestamp = now
|
m.lastTimestamp = now
|
||||||
|
|
||||||
file, err := os.Open(NETSTATFILE)
|
file, err := os.Open(string(NETSTATFILE))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(m.name, err.Error())
|
cclog.ComponentError(m.name, err.Error())
|
||||||
return
|
return
|
||||||
@@ -204,12 +183,11 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
// Split line into fields
|
// Split line into fields
|
||||||
f := strings.Fields(l)
|
f := strings.Fields(l)
|
||||||
|
|
||||||
// Get raw and canonical names
|
// Get net device entry
|
||||||
raw := strings.Trim(f[0], ":")
|
dev := strings.Trim(f[0], ":")
|
||||||
canonical := getCanonicalName(raw, m.aliasToCanonical)
|
|
||||||
|
|
||||||
// Check if device is a included device
|
// Check if device is a included device
|
||||||
if devmetrics, ok := m.matches[canonical]; ok {
|
if devmetrics, ok := m.matches[dev]; ok {
|
||||||
for i := range devmetrics {
|
for i := range devmetrics {
|
||||||
metric := &devmetrics[i]
|
metric := &devmetrics[i]
|
||||||
|
|
||||||
@@ -219,14 +197,14 @@ func (m *NetstatCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if m.config.SendAbsoluteValues {
|
if m.config.SendAbsoluteValues {
|
||||||
if y, err := lp.NewMessage(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
|
if y, err := lp.New(metric.name, metric.tags, metric.meta, map[string]interface{}{"value": v}, now); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.config.SendDerivedValues {
|
if m.config.SendDerivedValues {
|
||||||
if metric.lastValue >= 0 {
|
if metric.lastValue >= 0 {
|
||||||
rate := float64(v-metric.lastValue) / timeDiff
|
rate := float64(v-metric.lastValue) / timeDiff
|
||||||
if y, err := lp.NewMessage(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
|
if y, err := lp.New(metric.name+"_bw", metric.tags, metric.meta_rates, map[string]interface{}{"value": rate}, now); err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,14 @@
|
|||||||
```json
|
```json
|
||||||
"netstat": {
|
"netstat": {
|
||||||
"include_devices": [
|
"include_devices": [
|
||||||
"eth0",
|
"eth0"
|
||||||
"eno1"
|
|
||||||
],
|
],
|
||||||
"send_abs_values": true,
|
"send_abs_values" : true,
|
||||||
"send_derived_values": true,
|
"send_derived_values" : true
|
||||||
"interface_aliases": {
|
|
||||||
"eno1": ["eno1np0", "eno1_alt"],
|
|
||||||
"eth0": ["eth0_alias"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list. Optionally, you can define an interface_aliases mapping. For each canonical device (as listed in include_devices), you may provide an array of aliases that may be reported by the system. When an alias is detected, it is preferred for matching, while the output tag stype-id always shows the actual system-reported name.
|
The `netstat` collector reads data from `/proc/net/dev` and outputs a handful **node** metrics. With the `include_devices` list you can specify which network devices should be measured. **Note**: Most other collectors use an _exclude_ list instead of an include list.
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
* `net_bytes_in` (`unit=bytes`)
|
* `net_bytes_in` (`unit=bytes`)
|
||||||
@@ -28,4 +23,5 @@ Metrics:
|
|||||||
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
* `net_pkts_in_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
||||||
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
* `net_pkts_out_bw` (`unit=packets/sec` if `send_derived_values == true`)
|
||||||
|
|
||||||
The device name is added as tag `stype=network,stype-id=<device>`.
|
The device name is added as tag `device`.
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// First part contains the code for the general NfsCollector.
|
// First part contains the code for the general NfsCollector.
|
||||||
@@ -118,7 +118,7 @@ func (m *nfsCollector) MainInit(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -140,7 +140,7 @@ func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
value := data.current - data.last
|
value := data.current - data.last
|
||||||
y, err := lp.NewMessage(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("version", m.version)
|
y.AddMeta("version", m.version)
|
||||||
output <- y
|
output <- y
|
||||||
|
|||||||
@@ -1,182 +0,0 @@
|
|||||||
package collectors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These are the fields we read from the JSON configuration
|
|
||||||
type NfsIOStatCollectorConfig struct {
|
|
||||||
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
|
||||||
ExcludeFilesystem []string `json:"exclude_filesystem,omitempty"`
|
|
||||||
UseServerAddressAsSType bool `json:"use_server_as_stype,omitempty"`
|
|
||||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
|
||||||
SendDerivedValues bool `json:"send_derived_values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// This contains all variables we need during execution and the variables
|
|
||||||
// defined by metricCollector (name, init, ...)
|
|
||||||
type NfsIOStatCollector struct {
|
|
||||||
metricCollector
|
|
||||||
config NfsIOStatCollectorConfig // the configuration structure
|
|
||||||
meta map[string]string // default meta information
|
|
||||||
tags map[string]string // default tags
|
|
||||||
data map[string]map[string]int64 // data storage for difference calculation
|
|
||||||
key string // which device info should be used as subtype ID? 'server' or 'mntpoint'
|
|
||||||
lastTimestamp time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
var deviceRegex = regexp.MustCompile(`device (?P<server>[^ ]+) mounted on (?P<mntpoint>[^ ]+) with fstype nfs(?P<version>\d*) statvers=[\d\.]+`)
|
|
||||||
var bytesRegex = regexp.MustCompile(`\s+bytes:\s+(?P<nread>[^ ]+) (?P<nwrite>[^ ]+) (?P<dread>[^ ]+) (?P<dwrite>[^ ]+) (?P<nfsread>[^ ]+) (?P<nfswrite>[^ ]+) (?P<pageread>[^ ]+) (?P<pagewrite>[^ ]+)`)
|
|
||||||
|
|
||||||
func resolve_regex_fields(s string, regex *regexp.Regexp) map[string]string {
|
|
||||||
fields := make(map[string]string)
|
|
||||||
groups := regex.SubexpNames()
|
|
||||||
for _, match := range regex.FindAllStringSubmatch(s, -1) {
|
|
||||||
for groupIdx, group := range match {
|
|
||||||
if len(groups[groupIdx]) > 0 {
|
|
||||||
fields[groups[groupIdx]] = group
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *NfsIOStatCollector) readNfsiostats() map[string]map[string]int64 {
|
|
||||||
data := make(map[string]map[string]int64)
|
|
||||||
filename := "/proc/self/mountstats"
|
|
||||||
stats, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(string(stats), "\n")
|
|
||||||
var current map[string]string = nil
|
|
||||||
for _, l := range lines {
|
|
||||||
// Is this a device line with mount point, remote target and NFS version?
|
|
||||||
dev := resolve_regex_fields(l, deviceRegex)
|
|
||||||
if len(dev) > 0 {
|
|
||||||
if _, ok := stringArrayContains(m.config.ExcludeFilesystem, dev[m.key]); !ok {
|
|
||||||
current = dev
|
|
||||||
if len(current["version"]) == 0 {
|
|
||||||
current["version"] = "3"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(current) > 0 {
|
|
||||||
// Byte line parsing (if found the device for it)
|
|
||||||
bytes := resolve_regex_fields(l, bytesRegex)
|
|
||||||
if len(bytes) > 0 {
|
|
||||||
data[current[m.key]] = make(map[string]int64)
|
|
||||||
for name, sval := range bytes {
|
|
||||||
if _, ok := stringArrayContains(m.config.ExcludeMetrics, name); !ok {
|
|
||||||
val, err := strconv.ParseInt(sval, 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
data[current[m.key]][name] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
current = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *NfsIOStatCollector) Init(config json.RawMessage) error {
|
|
||||||
var err error = nil
|
|
||||||
m.name = "NfsIOStatCollector"
|
|
||||||
m.setup()
|
|
||||||
m.parallel = true
|
|
||||||
m.meta = map[string]string{"source": m.name, "group": "NFS", "unit": "bytes"}
|
|
||||||
m.tags = map[string]string{"type": "node"}
|
|
||||||
m.config.UseServerAddressAsSType = false
|
|
||||||
// Set default configuration
|
|
||||||
m.config.SendAbsoluteValues = true
|
|
||||||
m.config.SendDerivedValues = false
|
|
||||||
if len(config) > 0 {
|
|
||||||
err = json.Unmarshal(config, &m.config)
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, "Error reading config:", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.key = "mntpoint"
|
|
||||||
if m.config.UseServerAddressAsSType {
|
|
||||||
m.key = "server"
|
|
||||||
}
|
|
||||||
m.data = m.readNfsiostats()
|
|
||||||
m.lastTimestamp = time.Now()
|
|
||||||
m.init = true
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *NfsIOStatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|
||||||
now := time.Now()
|
|
||||||
timeDiff := now.Sub(m.lastTimestamp).Seconds()
|
|
||||||
m.lastTimestamp = now
|
|
||||||
|
|
||||||
// Get the current values for all mountpoints
|
|
||||||
newdata := m.readNfsiostats()
|
|
||||||
|
|
||||||
for mntpoint, values := range newdata {
|
|
||||||
// Was the mount point already present in the last iteration
|
|
||||||
if old, ok := m.data[mntpoint]; ok {
|
|
||||||
for name, newVal := range values {
|
|
||||||
if m.config.SendAbsoluteValues {
|
|
||||||
msg, err := lp.NewMessage(fmt.Sprintf("nfsio_%s", name), m.tags, m.meta, map[string]interface{}{"value": newVal}, now)
|
|
||||||
if err == nil {
|
|
||||||
msg.AddTag("stype", "filesystem")
|
|
||||||
msg.AddTag("stype-id", mntpoint)
|
|
||||||
output <- msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.config.SendDerivedValues {
|
|
||||||
rate := float64(newVal-old[name]) / timeDiff
|
|
||||||
msg, err := lp.NewMessage(fmt.Sprintf("nfsio_%s_bw", name), m.tags, m.meta, map[string]interface{}{"value": rate}, now)
|
|
||||||
if err == nil {
|
|
||||||
if strings.HasPrefix(name, "page") {
|
|
||||||
msg.AddMeta("unit", "4K_pages/s")
|
|
||||||
} else {
|
|
||||||
msg.AddMeta("unit", "bytes/sec")
|
|
||||||
}
|
|
||||||
msg.AddTag("stype", "filesystem")
|
|
||||||
msg.AddTag("stype-id", mntpoint)
|
|
||||||
output <- msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
old[name] = newVal
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// First time we see this mount point, store all values
|
|
||||||
m.data[mntpoint] = values
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Reset entries that do not exist anymore
|
|
||||||
for mntpoint := range m.data {
|
|
||||||
found := false
|
|
||||||
for new := range newdata {
|
|
||||||
if new == mntpoint {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
m.data[mntpoint] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *NfsIOStatCollector) Close() {
|
|
||||||
// Unset flag
|
|
||||||
m.init = false
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
## `nfsiostat` collector
|
|
||||||
|
|
||||||
```json
|
|
||||||
"nfsiostat": {
|
|
||||||
"exclude_metrics": [
|
|
||||||
"oread", "pageread"
|
|
||||||
],
|
|
||||||
"exclude_filesystems": [
|
|
||||||
"/mnt"
|
|
||||||
],
|
|
||||||
"use_server_as_stype": false,
|
|
||||||
"send_abs_values": false,
|
|
||||||
"send_derived_values": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The `nfsiostat` collector reads data from `/proc/self/mountstats` and outputs a handful **node** metrics for each NFS filesystem. If a metric or filesystem is not required, it can be excluded from forwarding it to the sink. **Note:** When excluding metrics, you must provide the base metric name (e.g. pageread) without the nfsio_ prefix. This exclusion applies to both absolute and derived values.
|
|
||||||
|
|
||||||
Metrics:
|
|
||||||
* `nfsio_nread`: Bytes transferred by normal `read()` calls
|
|
||||||
* `nfsio_nwrite`: Bytes transferred by normal `write()` calls
|
|
||||||
* `nfsio_oread`: Bytes transferred by `read()` calls with `O_DIRECT`
|
|
||||||
* `nfsio_owrite`: Bytes transferred by `write()` calls with `O_DIRECT`
|
|
||||||
* `nfsio_pageread`: Pages transferred by `read()` calls
|
|
||||||
* `nfsio_pagewrite`: Pages transferred by `write()` calls
|
|
||||||
* `nfsio_nfsread`: Bytes transferred for reading from the server
|
|
||||||
* `nfsio_nfswrite`: Pages transferred by writing to the server
|
|
||||||
|
|
||||||
For each of these, if derived values are enabled, an additional metric is sent with the `_bw` suffix, which represents the rate:
|
|
||||||
|
|
||||||
* For normal byte metrics: `unit=bytes/sec`
|
|
||||||
* For page metrics: `unit=4K_pages/s`
|
|
||||||
|
|
||||||
The `nfsiostat` collector adds the mountpoint to the tags as `stype=filesystem,stype-id=<mountpoint>`. If the server address should be used instead of the mountpoint, use the `use_server_as_stype` config setting.
|
|
||||||
@@ -10,58 +10,41 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NUMAStatsCollectorConfig struct {
|
//
|
||||||
SendAbsoluteValues bool `json:"send_abs_values"`
|
// Numa policy hit/miss statistics
|
||||||
SendDerivedValues bool `json:"send_derived_values"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Non-Uniform Memory Access (NUMA) policy hit/miss statistics
|
|
||||||
//
|
//
|
||||||
// numa_hit:
|
// numa_hit:
|
||||||
//
|
|
||||||
// A process wanted to allocate memory from this node, and succeeded.
|
// A process wanted to allocate memory from this node, and succeeded.
|
||||||
//
|
|
||||||
// numa_miss:
|
// numa_miss:
|
||||||
//
|
|
||||||
// A process wanted to allocate memory from another node,
|
// A process wanted to allocate memory from another node,
|
||||||
// but ended up with memory from this node.
|
// but ended up with memory from this node.
|
||||||
//
|
|
||||||
// numa_foreign:
|
// numa_foreign:
|
||||||
//
|
|
||||||
// A process wanted to allocate on this node,
|
// A process wanted to allocate on this node,
|
||||||
// but ended up with memory from another node.
|
// but ended up with memory from another node.
|
||||||
//
|
|
||||||
// local_node:
|
// local_node:
|
||||||
//
|
|
||||||
// A process ran on this node's CPU,
|
// A process ran on this node's CPU,
|
||||||
// and got memory from this node.
|
// and got memory from this node.
|
||||||
//
|
|
||||||
// other_node:
|
// other_node:
|
||||||
//
|
|
||||||
// A process ran on a different node's CPU
|
// A process ran on a different node's CPU
|
||||||
// and got memory from this node.
|
// and got memory from this node.
|
||||||
//
|
|
||||||
// interleave_hit:
|
// interleave_hit:
|
||||||
//
|
|
||||||
// Interleaving wanted to allocate from this node
|
// Interleaving wanted to allocate from this node
|
||||||
// and succeeded.
|
// and succeeded.
|
||||||
//
|
//
|
||||||
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
|
// See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
|
||||||
|
//
|
||||||
type NUMAStatsCollectorTopolgy struct {
|
type NUMAStatsCollectorTopolgy struct {
|
||||||
file string
|
file string
|
||||||
tagSet map[string]string
|
tagSet map[string]string
|
||||||
previousValues map[string]int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type NUMAStatsCollector struct {
|
type NUMAStatsCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
topology []NUMAStatsCollectorTopolgy
|
topology []NUMAStatsCollectorTopolgy
|
||||||
config NUMAStatsCollectorConfig
|
|
||||||
lastTimestamp time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
||||||
@@ -96,42 +79,35 @@ func (m *NUMAStatsCollector) Init(config json.RawMessage) error {
|
|||||||
NUMAStatsCollectorTopolgy{
|
NUMAStatsCollectorTopolgy{
|
||||||
file: file,
|
file: file,
|
||||||
tagSet: map[string]string{"memoryDomain": node},
|
tagSet: map[string]string{"memoryDomain": node},
|
||||||
previousValues: make(map[string]int64),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialized
|
|
||||||
cclog.ComponentDebug(m.name, "initialized", len(m.topology), "NUMA domains")
|
|
||||||
m.init = true
|
m.init = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
timeDiff := now.Sub(m.lastTimestamp).Seconds()
|
|
||||||
m.lastTimestamp = now
|
|
||||||
|
|
||||||
for i := range m.topology {
|
for i := range m.topology {
|
||||||
// Loop for all NUMA domains
|
// Loop for all NUMA domains
|
||||||
t := &m.topology[i]
|
t := &m.topology[i]
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
file, err := os.Open(t.file)
|
file, err := os.Open(t.file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError(
|
cclog.ComponentError(
|
||||||
m.name,
|
m.name,
|
||||||
fmt.Sprintf("Read(): Failed to open file '%s': %v", t.file, err))
|
fmt.Sprintf("Read(): Failed to open file '%s': %v", t.file, err))
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
|
|
||||||
// Read line by line
|
// Read line by line
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
split := strings.Fields(scanner.Text())
|
||||||
split := strings.Fields(line)
|
|
||||||
if len(split) != 2 {
|
if len(split) != 2 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -143,9 +119,7 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
|
|||||||
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
|
fmt.Sprintf("Read(): Failed to convert %s='%s' to int64: %v", key, split[1], err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
y, err := lp.New(
|
||||||
if m.config.SendAbsoluteValues {
|
|
||||||
msg, err := lp.NewMessage(
|
|
||||||
"numastats_"+key,
|
"numastats_"+key,
|
||||||
t.tagSet,
|
t.tagSet,
|
||||||
m.meta,
|
m.meta,
|
||||||
@@ -153,28 +127,10 @@ func (m *NUMAStatsCollector) Read(interval time.Duration, output chan lp.CCMessa
|
|||||||
now,
|
now,
|
||||||
)
|
)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- msg
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.config.SendDerivedValues {
|
|
||||||
prev, ok := t.previousValues[key]
|
|
||||||
if ok {
|
|
||||||
rate := float64(value-prev) / timeDiff
|
|
||||||
msg, err := lp.NewMessage(
|
|
||||||
"numastats_"+key+"_rate",
|
|
||||||
t.tagSet,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{"value": rate},
|
|
||||||
now,
|
|
||||||
)
|
|
||||||
if err == nil {
|
|
||||||
output <- msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.previousValues[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,26 +1,15 @@
|
|||||||
|
|
||||||
## `numastat` collector
|
## `numastat` collector
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"numastats": {
|
"numastat": {}
|
||||||
"send_abs_values" : true,
|
|
||||||
"send_derived_values" : true
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: <https://www.kernel.org/doc/html/latest/admin-guide/numastat.html>
|
The `numastat` collector reads data from `/sys/devices/system/node/node*/numastat` and outputs a handful **memoryDomain** metrics. See: https://www.kernel.org/doc/html/latest/admin-guide/numastat.html
|
||||||
|
|
||||||
Metrics:
|
Metrics:
|
||||||
|
|
||||||
* `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded.
|
* `numastats_numa_hit`: A process wanted to allocate memory from this node, and succeeded.
|
||||||
* `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node.
|
* `numastats_numa_miss`: A process wanted to allocate memory from another node, but ended up with memory from this node.
|
||||||
* `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node.
|
* `numastats_numa_foreign`: A process wanted to allocate on this node, but ended up with memory from another node.
|
||||||
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
|
* `numastats_local_node`: A process ran on this node's CPU, and got memory from this node.
|
||||||
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
|
* `numastats_other_node`: A process ran on a different node's CPU, and got memory from this node.
|
||||||
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.
|
* `numastats_interleave_hit`: Interleaving wanted to allocate from this node and succeeded.
|
||||||
* `numastats_numa_hit_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
* `numastats_numa_miss_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
* `numastats_numa_foreign_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
* `numastats_local_node_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
* `numastats_other_node_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
* `numastats_interleave_hit_rate` (if `send_derived_values == true`): Derived rate value per second.
|
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
"github.com/NVIDIA/go-nvml/pkg/nvml"
|
"github.com/NVIDIA/go-nvml/pkg/nvml"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -71,14 +71,6 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
|
|||||||
|
|
||||||
// Initialize NVIDIA Management Library (NVML)
|
// Initialize NVIDIA Management Library (NVML)
|
||||||
ret := nvml.Init()
|
ret := nvml.Init()
|
||||||
|
|
||||||
// Error: NVML library not found
|
|
||||||
// (nvml.ErrorString can not be used in this case)
|
|
||||||
if ret == nvml.ERROR_LIBRARY_NOT_FOUND {
|
|
||||||
err = fmt.Errorf("NVML library not found")
|
|
||||||
cclog.ComponentError(m.name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
err = errors.New(nvml.ErrorString(ret))
|
err = errors.New(nvml.ErrorString(ret))
|
||||||
cclog.ComponentError(m.name, "Unable to initialize NVML", err.Error())
|
cclog.ComponentError(m.name, "Unable to initialize NVML", err.Error())
|
||||||
@@ -206,7 +198,7 @@ func (m *NvidiaCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_fb_mem_total"] || !device.excludeMetrics["nv_fb_mem_used"] || !device.excludeMetrics["nv_fb_mem_reserved"] {
|
if !device.excludeMetrics["nv_fb_mem_total"] || !device.excludeMetrics["nv_fb_mem_used"] || !device.excludeMetrics["nv_fb_mem_reserved"] {
|
||||||
var total uint64
|
var total uint64
|
||||||
var used uint64
|
var used uint64
|
||||||
@@ -222,7 +214,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
|
|
||||||
if !device.excludeMetrics["nv_fb_mem_total"] {
|
if !device.excludeMetrics["nv_fb_mem_total"] {
|
||||||
t := float64(total) / (1024 * 1024)
|
t := float64(total) / (1024 * 1024)
|
||||||
y, err := lp.NewMessage("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_fb_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MByte")
|
y.AddMeta("unit", "MByte")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -231,7 +223,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
|
|
||||||
if !device.excludeMetrics["nv_fb_mem_used"] {
|
if !device.excludeMetrics["nv_fb_mem_used"] {
|
||||||
f := float64(used) / (1024 * 1024)
|
f := float64(used) / (1024 * 1024)
|
||||||
y, err := lp.NewMessage("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
y, err := lp.New("nv_fb_mem_used", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MByte")
|
y.AddMeta("unit", "MByte")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -240,7 +232,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
|
|
||||||
if v2 && !device.excludeMetrics["nv_fb_mem_reserved"] {
|
if v2 && !device.excludeMetrics["nv_fb_mem_reserved"] {
|
||||||
r := float64(reserved) / (1024 * 1024)
|
r := float64(reserved) / (1024 * 1024)
|
||||||
y, err := lp.NewMessage("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
|
y, err := lp.New("nv_fb_mem_reserved", device.tags, device.meta, map[string]interface{}{"value": r}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MByte")
|
y.AddMeta("unit", "MByte")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -250,7 +242,7 @@ func readMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_bar1_mem_total"] || !device.excludeMetrics["nv_bar1_mem_used"] {
|
if !device.excludeMetrics["nv_bar1_mem_total"] || !device.excludeMetrics["nv_bar1_mem_used"] {
|
||||||
meminfo, ret := nvml.DeviceGetBAR1MemoryInfo(device.device)
|
meminfo, ret := nvml.DeviceGetBAR1MemoryInfo(device.device)
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
@@ -259,7 +251,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
}
|
}
|
||||||
if !device.excludeMetrics["nv_bar1_mem_total"] {
|
if !device.excludeMetrics["nv_bar1_mem_total"] {
|
||||||
t := float64(meminfo.Bar1Total) / (1024 * 1024)
|
t := float64(meminfo.Bar1Total) / (1024 * 1024)
|
||||||
y, err := lp.NewMessage("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_bar1_mem_total", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MByte")
|
y.AddMeta("unit", "MByte")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -267,7 +259,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
}
|
}
|
||||||
if !device.excludeMetrics["nv_bar1_mem_used"] {
|
if !device.excludeMetrics["nv_bar1_mem_used"] {
|
||||||
t := float64(meminfo.Bar1Used) / (1024 * 1024)
|
t := float64(meminfo.Bar1Used) / (1024 * 1024)
|
||||||
y, err := lp.NewMessage("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_bar1_mem_used", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MByte")
|
y.AddMeta("unit", "MByte")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -277,7 +269,7 @@ func readBarMemoryInfo(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
err := errors.New(nvml.ErrorString(ret))
|
err := errors.New(nvml.ErrorString(ret))
|
||||||
@@ -301,14 +293,14 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
util, ret := nvml.DeviceGetUtilizationRates(device.device)
|
util, ret := nvml.DeviceGetUtilizationRates(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
if !device.excludeMetrics["nv_util"] {
|
if !device.excludeMetrics["nv_util"] {
|
||||||
y, err := lp.NewMessage("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
|
y, err := lp.New("nv_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Gpu)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "%")
|
y.AddMeta("unit", "%")
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !device.excludeMetrics["nv_mem_util"] {
|
if !device.excludeMetrics["nv_mem_util"] {
|
||||||
y, err := lp.NewMessage("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
|
y, err := lp.New("nv_mem_util", device.tags, device.meta, map[string]interface{}{"value": float64(util.Memory)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "%")
|
y.AddMeta("unit", "%")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -319,7 +311,7 @@ func readUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readTemp(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_temp"] {
|
if !device.excludeMetrics["nv_temp"] {
|
||||||
// Retrieves the current temperature readings for the device, in degrees C.
|
// Retrieves the current temperature readings for the device, in degrees C.
|
||||||
//
|
//
|
||||||
@@ -328,7 +320,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
// * NVML_TEMPERATURE_COUNT
|
// * NVML_TEMPERATURE_COUNT
|
||||||
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
|
temp, ret := nvml.DeviceGetTemperature(device.device, nvml.TEMPERATURE_GPU)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
|
y, err := lp.New("nv_temp", device.tags, device.meta, map[string]interface{}{"value": float64(temp)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "degC")
|
y.AddMeta("unit", "degC")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -338,7 +330,7 @@ func readTemp(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readFan(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_fan"] {
|
if !device.excludeMetrics["nv_fan"] {
|
||||||
// Retrieves the intended operating speed of the device's fan.
|
// Retrieves the intended operating speed of the device's fan.
|
||||||
//
|
//
|
||||||
@@ -351,7 +343,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
// This value may exceed 100% in certain cases.
|
// This value may exceed 100% in certain cases.
|
||||||
fan, ret := nvml.DeviceGetFanSpeed(device.device)
|
fan, ret := nvml.DeviceGetFanSpeed(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "%")
|
y.AddMeta("unit", "%")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -361,14 +353,14 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
// func readFans(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
// if !device.excludeMetrics["nv_fan"] {
|
// if !device.excludeMetrics["nv_fan"] {
|
||||||
// numFans, ret := nvml.DeviceGetNumFans(device.device)
|
// numFans, ret := nvml.DeviceGetNumFans(device.device)
|
||||||
// if ret == nvml.SUCCESS {
|
// if ret == nvml.SUCCESS {
|
||||||
// for i := 0; i < numFans; i++ {
|
// for i := 0; i < numFans; i++ {
|
||||||
// fan, ret := nvml.DeviceGetFanSpeed_v2(device.device, i)
|
// fan, ret := nvml.DeviceGetFanSpeed_v2(device.device, i)
|
||||||
// if ret == nvml.SUCCESS {
|
// if ret == nvml.SUCCESS {
|
||||||
// y, err := lp.NewMessage("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
// y, err := lp.New("nv_fan", device.tags, device.meta, map[string]interface{}{"value": float64(fan)}, time.Now())
|
||||||
// if err == nil {
|
// if err == nil {
|
||||||
// y.AddMeta("unit", "%")
|
// y.AddMeta("unit", "%")
|
||||||
// y.AddTag("stype", "fan")
|
// y.AddTag("stype", "fan")
|
||||||
@@ -382,7 +374,7 @@ func readFan(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
// return nil
|
// return nil
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_ecc_mode"] {
|
if !device.excludeMetrics["nv_ecc_mode"] {
|
||||||
// Retrieves the current and pending ECC modes for the device.
|
// Retrieves the current and pending ECC modes for the device.
|
||||||
//
|
//
|
||||||
@@ -393,21 +385,21 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
// The "pending" ECC mode refers to the target mode following the next reboot.
|
// The "pending" ECC mode refers to the target mode following the next reboot.
|
||||||
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
|
_, ecc_pend, ret := nvml.DeviceGetEccMode(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
var y lp.CCMessage
|
var y lp.CCMetric
|
||||||
var err error
|
var err error
|
||||||
switch ecc_pend {
|
switch ecc_pend {
|
||||||
case nvml.FEATURE_DISABLED:
|
case nvml.FEATURE_DISABLED:
|
||||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
|
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "OFF"}, time.Now())
|
||||||
case nvml.FEATURE_ENABLED:
|
case nvml.FEATURE_ENABLED:
|
||||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
|
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "ON"}, time.Now())
|
||||||
default:
|
default:
|
||||||
y, err = lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
|
y, err = lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "UNKNOWN"}, time.Now())
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
} else if ret == nvml.ERROR_NOT_SUPPORTED {
|
} else if ret == nvml.ERROR_NOT_SUPPORTED {
|
||||||
y, err := lp.NewMessage("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
|
y, err := lp.New("nv_ecc_mode", device.tags, device.meta, map[string]interface{}{"value": "N/A"}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -416,7 +408,7 @@ func readEccMode(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_perf_state"] {
|
if !device.excludeMetrics["nv_perf_state"] {
|
||||||
// Retrieves the current performance state for the device.
|
// Retrieves the current performance state for the device.
|
||||||
//
|
//
|
||||||
@@ -427,7 +419,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
// 32: Unknown performance state.
|
// 32: Unknown performance state.
|
||||||
pState, ret := nvml.DeviceGetPerformanceState(device.device)
|
pState, ret := nvml.DeviceGetPerformanceState(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
|
y, err := lp.New("nv_perf_state", device.tags, device.meta, map[string]interface{}{"value": fmt.Sprintf("P%d", int(pState))}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -436,7 +428,7 @@ func readPerfState(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_power_usage"] {
|
if !device.excludeMetrics["nv_power_usage"] {
|
||||||
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
|
// Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory)
|
||||||
//
|
//
|
||||||
@@ -450,7 +442,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
if mode == nvml.FEATURE_ENABLED {
|
if mode == nvml.FEATURE_ENABLED {
|
||||||
power, ret := nvml.DeviceGetPowerUsage(device.device)
|
power, ret := nvml.DeviceGetPowerUsage(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
|
y, err := lp.New("nv_power_usage", device.tags, device.meta, map[string]interface{}{"value": float64(power) / 1000}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "watts")
|
y.AddMeta("unit", "watts")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -461,7 +453,7 @@ func readPowerUsage(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
// Retrieves the current clock speeds for the device.
|
// Retrieves the current clock speeds for the device.
|
||||||
//
|
//
|
||||||
// Available clock information:
|
// Available clock information:
|
||||||
@@ -471,7 +463,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
if !device.excludeMetrics["nv_graphics_clock"] {
|
if !device.excludeMetrics["nv_graphics_clock"] {
|
||||||
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
graphicsClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
|
y, err := lp.New("nv_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(graphicsClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -482,7 +474,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
if !device.excludeMetrics["nv_sm_clock"] {
|
if !device.excludeMetrics["nv_sm_clock"] {
|
||||||
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
smCock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
|
y, err := lp.New("nv_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(smCock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -493,7 +485,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
if !device.excludeMetrics["nv_mem_clock"] {
|
if !device.excludeMetrics["nv_mem_clock"] {
|
||||||
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
y, err := lp.New("nv_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -503,7 +495,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
if !device.excludeMetrics["nv_video_clock"] {
|
if !device.excludeMetrics["nv_video_clock"] {
|
||||||
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
memClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
y, err := lp.New("nv_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(memClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -513,7 +505,7 @@ func readClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
// Retrieves the maximum clock speeds for the device.
|
// Retrieves the maximum clock speeds for the device.
|
||||||
//
|
//
|
||||||
// Available clock information:
|
// Available clock information:
|
||||||
@@ -528,7 +520,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
if !device.excludeMetrics["nv_max_graphics_clock"] {
|
if !device.excludeMetrics["nv_max_graphics_clock"] {
|
||||||
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
max_gclk, ret := nvml.DeviceGetMaxClockInfo(device.device, nvml.CLOCK_GRAPHICS)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
|
y, err := lp.New("nv_max_graphics_clock", device.tags, device.meta, map[string]interface{}{"value": float64(max_gclk)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -539,7 +531,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
if !device.excludeMetrics["nv_max_sm_clock"] {
|
if !device.excludeMetrics["nv_max_sm_clock"] {
|
||||||
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
maxSmClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_SM)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
|
y, err := lp.New("nv_max_sm_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxSmClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -550,7 +542,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
if !device.excludeMetrics["nv_max_mem_clock"] {
|
if !device.excludeMetrics["nv_max_mem_clock"] {
|
||||||
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_MEM)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
y, err := lp.New("nv_max_mem_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -561,7 +553,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
if !device.excludeMetrics["nv_max_video_clock"] {
|
if !device.excludeMetrics["nv_max_video_clock"] {
|
||||||
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
maxMemClock, ret := nvml.DeviceGetClockInfo(device.device, nvml.CLOCK_VIDEO)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
y, err := lp.New("nv_max_video_clock", device.tags, device.meta, map[string]interface{}{"value": float64(maxMemClock)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "MHz")
|
y.AddMeta("unit", "MHz")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -571,7 +563,7 @@ func readMaxClocks(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_ecc_uncorrected_error"] {
|
if !device.excludeMetrics["nv_ecc_uncorrected_error"] {
|
||||||
// Retrieves the total ECC error counts for the device.
|
// Retrieves the total ECC error counts for the device.
|
||||||
//
|
//
|
||||||
@@ -584,7 +576,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
// i.e. the total set of errors across the entire device.
|
// i.e. the total set of errors across the entire device.
|
||||||
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
|
ecc_db, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_UNCORRECTED, nvml.AGGREGATE_ECC)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
|
y, err := lp.New("nv_ecc_uncorrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_db)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -593,7 +585,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
if !device.excludeMetrics["nv_ecc_corrected_error"] {
|
if !device.excludeMetrics["nv_ecc_corrected_error"] {
|
||||||
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
|
ecc_sb, ret := nvml.DeviceGetTotalEccErrors(device.device, nvml.MEMORY_ERROR_TYPE_CORRECTED, nvml.AGGREGATE_ECC)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
|
y, err := lp.New("nv_ecc_corrected_error", device.tags, device.meta, map[string]interface{}{"value": float64(ecc_sb)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -602,7 +594,7 @@ func readEccErrors(device NvidiaCollectorDevice, output chan lp.CCMessage) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_power_max_limit"] {
|
if !device.excludeMetrics["nv_power_max_limit"] {
|
||||||
// Retrieves the power management limit associated with this device.
|
// Retrieves the power management limit associated with this device.
|
||||||
//
|
//
|
||||||
@@ -612,7 +604,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
// If the card's total power draw reaches this limit the power management algorithm kicks in.
|
// If the card's total power draw reaches this limit the power management algorithm kicks in.
|
||||||
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
|
pwr_limit, ret := nvml.DeviceGetPowerManagementLimit(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
|
y, err := lp.New("nv_power_max_limit", device.tags, device.meta, map[string]interface{}{"value": float64(pwr_limit) / 1000}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "watts")
|
y.AddMeta("unit", "watts")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -622,7 +614,7 @@ func readPowerLimit(device NvidiaCollectorDevice, output chan lp.CCMessage) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
err := errors.New(nvml.ErrorString(ret))
|
err := errors.New(nvml.ErrorString(ret))
|
||||||
@@ -639,7 +631,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
||||||
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
|
enc_util, _, ret := nvml.DeviceGetEncoderUtilization(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
|
y, err := lp.New("nv_encoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(enc_util)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "%")
|
y.AddMeta("unit", "%")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -649,7 +641,7 @@ func readEncUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
isMig, ret := nvml.DeviceIsMigDeviceHandle(device.device)
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
err := errors.New(nvml.ErrorString(ret))
|
err := errors.New(nvml.ErrorString(ret))
|
||||||
@@ -666,7 +658,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
// Note: On MIG-enabled GPUs, querying encoder utilization is not currently supported.
|
||||||
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
|
dec_util, _, ret := nvml.DeviceGetDecoderUtilization(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
|
y, err := lp.New("nv_decoder_util", device.tags, device.meta, map[string]interface{}{"value": float64(dec_util)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "%")
|
y.AddMeta("unit", "%")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -676,7 +668,7 @@ func readDecUtilization(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_remapped_rows_corrected"] ||
|
if !device.excludeMetrics["nv_remapped_rows_corrected"] ||
|
||||||
!device.excludeMetrics["nv_remapped_rows_uncorrected"] ||
|
!device.excludeMetrics["nv_remapped_rows_uncorrected"] ||
|
||||||
!device.excludeMetrics["nv_remapped_rows_pending"] ||
|
!device.excludeMetrics["nv_remapped_rows_pending"] ||
|
||||||
@@ -693,13 +685,13 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
|||||||
corrected, uncorrected, pending, failure, ret := nvml.DeviceGetRemappedRows(device.device)
|
corrected, uncorrected, pending, failure, ret := nvml.DeviceGetRemappedRows(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
if !device.excludeMetrics["nv_remapped_rows_corrected"] {
|
if !device.excludeMetrics["nv_remapped_rows_corrected"] {
|
||||||
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
|
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(corrected)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !device.excludeMetrics["nv_remapped_rows_uncorrected"] {
|
if !device.excludeMetrics["nv_remapped_rows_uncorrected"] {
|
||||||
y, err := lp.NewMessage("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
|
y, err := lp.New("nv_remapped_rows_corrected", device.tags, device.meta, map[string]interface{}{"value": float64(uncorrected)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -709,7 +701,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
|||||||
if pending {
|
if pending {
|
||||||
p = 1
|
p = 1
|
||||||
}
|
}
|
||||||
y, err := lp.NewMessage("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
|
y, err := lp.New("nv_remapped_rows_pending", device.tags, device.meta, map[string]interface{}{"value": p}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -719,7 +711,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
|||||||
if failure {
|
if failure {
|
||||||
f = 1
|
f = 1
|
||||||
}
|
}
|
||||||
y, err := lp.NewMessage("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
y, err := lp.New("nv_remapped_rows_failure", device.tags, device.meta, map[string]interface{}{"value": f}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -729,7 +721,7 @@ func readRemappedRows(device NvidiaCollectorDevice, output chan lp.CCMessage) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
if !device.excludeMetrics["nv_compute_processes"] {
|
if !device.excludeMetrics["nv_compute_processes"] {
|
||||||
// Get information about processes with a compute context on a device
|
// Get information about processes with a compute context on a device
|
||||||
//
|
//
|
||||||
@@ -753,7 +745,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||||
procList, ret := nvml.DeviceGetComputeRunningProcesses(device.device)
|
procList, ret := nvml.DeviceGetComputeRunningProcesses(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
y, err := lp.New("nv_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -782,7 +774,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
// Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||||
procList, ret := nvml.DeviceGetGraphicsRunningProcesses(device.device)
|
procList, ret := nvml.DeviceGetGraphicsRunningProcesses(device.device)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
y, err := lp.New("nv_graphics_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -812,7 +804,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
// // Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
// // Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode.
|
||||||
// procList, ret := nvml.DeviceGetMPSComputeRunningProcesses(device.device)
|
// procList, ret := nvml.DeviceGetMPSComputeRunningProcesses(device.device)
|
||||||
// if ret == nvml.SUCCESS {
|
// if ret == nvml.SUCCESS {
|
||||||
// y, err := lp.NewMessage("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
// y, err := lp.New("nv_mps_compute_processes", device.tags, device.meta, map[string]interface{}{"value": len(procList)}, time.Now())
|
||||||
// if err == nil {
|
// if err == nil {
|
||||||
// output <- y
|
// output <- y
|
||||||
// }
|
// }
|
||||||
@@ -821,7 +813,7 @@ func readProcessCounts(device NvidiaCollectorDevice, output chan lp.CCMessage) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
var violTime nvml.ViolationTime
|
var violTime nvml.ViolationTime
|
||||||
var ret nvml.Return
|
var ret nvml.Return
|
||||||
|
|
||||||
@@ -840,7 +832,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_POWER)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_POWER)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_power", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -852,7 +844,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_THERMAL)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_THERMAL)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_thermal", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -864,7 +856,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_SYNC_BOOST)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_SYNC_BOOST)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_sync_boost", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -876,7 +868,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_BOARD_LIMIT)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_BOARD_LIMIT)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_board_limit", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -888,7 +880,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_LOW_UTILIZATION)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_LOW_UTILIZATION)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_low_util", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -900,7 +892,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_RELIABILITY)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_RELIABILITY)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_reliability", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -912,7 +904,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_APP_CLOCKS)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_APP_CLOCKS)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_below_app_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -924,7 +916,7 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_BASE_CLOCKS)
|
violTime, ret = nvml.DeviceGetViolationStatus(device.device, nvml.PERF_POLICY_TOTAL_BASE_CLOCKS)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
t := float64(violTime.ViolationTime) * 1e-9
|
t := float64(violTime.ViolationTime) * 1e-9
|
||||||
y, err := lp.NewMessage("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
y, err := lp.New("nv_violation_below_base_clock", device.tags, device.meta, map[string]interface{}{"value": t}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddMeta("unit", "sec")
|
y.AddMeta("unit", "sec")
|
||||||
output <- y
|
output <- y
|
||||||
@@ -935,18 +927,12 @@ func readViolationStats(device NvidiaCollectorDevice, output chan lp.CCMessage)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) error {
|
func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMetric) error {
|
||||||
// Retrieves the specified error counter value
|
// Retrieves the specified error counter value
|
||||||
// Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
|
// Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available
|
||||||
//
|
//
|
||||||
// For Pascal &tm; or newer fully supported devices.
|
// For Pascal &tm; or newer fully supported devices.
|
||||||
|
|
||||||
var aggregate_crc_errors uint64 = 0
|
|
||||||
var aggregate_ecc_errors uint64 = 0
|
|
||||||
var aggregate_replay_errors uint64 = 0
|
|
||||||
var aggregate_recovery_errors uint64 = 0
|
|
||||||
var aggregate_crc_flit_errors uint64 = 0
|
|
||||||
|
|
||||||
for i := 0; i < nvml.NVLINK_MAX_LINKS; i++ {
|
for i := 0; i < nvml.NVLINK_MAX_LINKS; i++ {
|
||||||
state, ret := nvml.DeviceGetNvLinkState(device.device, i)
|
state, ret := nvml.DeviceGetNvLinkState(device.device, i)
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
@@ -954,9 +940,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
|
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
|
||||||
// Data link receive data CRC error counter
|
// Data link receive data CRC error counter
|
||||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_DATA)
|
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_DATA)
|
||||||
aggregate_crc_errors = aggregate_crc_errors + count
|
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
y, err := lp.New("nv_nvlink_crc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "nvlink")
|
y.AddTag("stype", "nvlink")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
@@ -967,9 +952,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
|
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
|
||||||
// Data link receive data ECC error counter
|
// Data link receive data ECC error counter
|
||||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_ECC_DATA)
|
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_ECC_DATA)
|
||||||
aggregate_ecc_errors = aggregate_ecc_errors + count
|
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
y, err := lp.New("nv_nvlink_ecc_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "nvlink")
|
y.AddTag("stype", "nvlink")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
@@ -980,9 +964,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
|
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
|
||||||
// Data link transmit replay error counter
|
// Data link transmit replay error counter
|
||||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_REPLAY)
|
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_REPLAY)
|
||||||
aggregate_replay_errors = aggregate_replay_errors + count
|
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
y, err := lp.New("nv_nvlink_replay_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "nvlink")
|
y.AddTag("stype", "nvlink")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
@@ -993,9 +976,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
|
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
|
||||||
// Data link transmit recovery error counter
|
// Data link transmit recovery error counter
|
||||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_RECOVERY)
|
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_RECOVERY)
|
||||||
aggregate_recovery_errors = aggregate_recovery_errors + count
|
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
y, err := lp.New("nv_nvlink_recovery_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "nvlink")
|
y.AddTag("stype", "nvlink")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
@@ -1006,9 +988,8 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
|
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
|
||||||
// Data link receive flow control digit CRC error counter
|
// Data link receive flow control digit CRC error counter
|
||||||
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_FLIT)
|
count, ret := nvml.DeviceGetNvLinkErrorCounter(device.device, i, nvml.NVLINK_ERROR_DL_CRC_FLIT)
|
||||||
aggregate_crc_flit_errors = aggregate_crc_flit_errors + count
|
|
||||||
if ret == nvml.SUCCESS {
|
if ret == nvml.SUCCESS {
|
||||||
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
y, err := lp.New("nv_nvlink_crc_flit_errors", device.tags, device.meta, map[string]interface{}{"value": count}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "nvlink")
|
y.AddTag("stype", "nvlink")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
@@ -1019,58 +1000,16 @@ func readNVLinkStats(device NvidiaCollectorDevice, output chan lp.CCMessage) err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export aggegated values
|
|
||||||
if !device.excludeMetrics["nv_nvlink_crc_errors"] {
|
|
||||||
// Data link receive data CRC error counter
|
|
||||||
y, err := lp.NewMessage("nv_nvlink_crc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_errors}, time.Now())
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("stype", "nvlink")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !device.excludeMetrics["nv_nvlink_ecc_errors"] {
|
|
||||||
// Data link receive data ECC error counter
|
|
||||||
y, err := lp.NewMessage("nv_nvlink_ecc_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_ecc_errors}, time.Now())
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("stype", "nvlink")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !device.excludeMetrics["nv_nvlink_replay_errors"] {
|
|
||||||
// Data link transmit replay error counter
|
|
||||||
y, err := lp.NewMessage("nv_nvlink_replay_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_replay_errors}, time.Now())
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("stype", "nvlink")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !device.excludeMetrics["nv_nvlink_recovery_errors"] {
|
|
||||||
// Data link transmit recovery error counter
|
|
||||||
y, err := lp.NewMessage("nv_nvlink_recovery_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_recovery_errors}, time.Now())
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("stype", "nvlink")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !device.excludeMetrics["nv_nvlink_crc_flit_errors"] {
|
|
||||||
// Data link receive flow control digit CRC error counter
|
|
||||||
y, err := lp.NewMessage("nv_nvlink_crc_flit_errors_sum", device.tags, device.meta, map[string]interface{}{"value": aggregate_crc_flit_errors}, time.Now())
|
|
||||||
if err == nil {
|
|
||||||
y.AddTag("stype", "nvlink")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *NvidiaCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
var err error
|
var err error
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMessage) {
|
readAll := func(device NvidiaCollectorDevice, output chan lp.CCMetric) {
|
||||||
name, ret := nvml.DeviceGetName(device.device)
|
name, ret := nvml.DeviceGetName(device.device)
|
||||||
if ret != nvml.SUCCESS {
|
if ret != nvml.SUCCESS {
|
||||||
name = "NoName"
|
name = "NoName"
|
||||||
|
|||||||
@@ -1,262 +0,0 @@
|
|||||||
package collectors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// running average power limit (RAPL) monitoring attributes for a zone
|
|
||||||
type RAPLZoneInfo struct {
|
|
||||||
// tags describing the RAPL zone:
|
|
||||||
// * zone_name, subzone_name: e.g. psys, dram, core, uncore, package-0
|
|
||||||
// * zone_id: e.g. 0:1 (zone 0 sub zone 1)
|
|
||||||
tags map[string]string
|
|
||||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
|
||||||
energy int64 // current reading of the energy counter in micro joules
|
|
||||||
energyTimestamp time.Time // timestamp when energy counter was read
|
|
||||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
|
||||||
}
|
|
||||||
|
|
||||||
type RAPLCollector struct {
|
|
||||||
metricCollector
|
|
||||||
config struct {
|
|
||||||
// Exclude IDs for RAPL zones, e.g.
|
|
||||||
// * 0 for zone 0
|
|
||||||
// * 0:1 for zone 0 subzone 1
|
|
||||||
ExcludeByID []string `json:"exclude_device_by_id,omitempty"`
|
|
||||||
// Exclude names for RAPL zones, e.g. psys, dram, core, uncore, package-0
|
|
||||||
ExcludeByName []string `json:"exclude_device_by_name,omitempty"`
|
|
||||||
}
|
|
||||||
RAPLZoneInfo []RAPLZoneInfo
|
|
||||||
meta map[string]string // default meta information
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes the running average power limit (RAPL) collector
|
|
||||||
func (m *RAPLCollector) Init(config json.RawMessage) error {
|
|
||||||
|
|
||||||
// Check if already initialized
|
|
||||||
if m.init {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error = nil
|
|
||||||
m.name = "RAPLCollector"
|
|
||||||
m.setup()
|
|
||||||
m.parallel = true
|
|
||||||
m.meta = map[string]string{
|
|
||||||
"source": m.name,
|
|
||||||
"group": "energy",
|
|
||||||
"unit": "Watt",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read in the JSON configuration
|
|
||||||
if len(config) > 0 {
|
|
||||||
err = json.Unmarshal(config, &m.config)
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, "Error reading config:", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure excluded RAPL zones
|
|
||||||
isIDExcluded := make(map[string]bool)
|
|
||||||
if m.config.ExcludeByID != nil {
|
|
||||||
for _, ID := range m.config.ExcludeByID {
|
|
||||||
isIDExcluded[ID] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isNameExcluded := make(map[string]bool)
|
|
||||||
if m.config.ExcludeByName != nil {
|
|
||||||
for _, name := range m.config.ExcludeByName {
|
|
||||||
isNameExcluded[name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readZoneInfo reads RAPL monitoring attributes for a zone given by zonePath
|
|
||||||
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
|
|
||||||
readZoneInfo := func(zonePath string) (z struct {
|
|
||||||
name string // zones name e.g. psys, dram, core, uncore, package-0
|
|
||||||
energyFilepath string // path to a file containing the zones current energy counter in micro joules
|
|
||||||
energy int64 // current reading of the energy counter in micro joules
|
|
||||||
energyTimestamp time.Time // timestamp when energy counter was read
|
|
||||||
maxEnergyRange int64 // Range of the above energy counter in micro-joules
|
|
||||||
ok bool // Are all information available?
|
|
||||||
}) {
|
|
||||||
// zones name e.g. psys, dram, core, uncore, package-0
|
|
||||||
foundName := false
|
|
||||||
if v, err :=
|
|
||||||
os.ReadFile(
|
|
||||||
filepath.Join(zonePath, "name")); err == nil {
|
|
||||||
foundName = true
|
|
||||||
z.name = strings.TrimSpace(string(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// path to a file containing the zones current energy counter in micro joules
|
|
||||||
z.energyFilepath = filepath.Join(zonePath, "energy_uj")
|
|
||||||
|
|
||||||
// current reading of the energy counter in micro joules
|
|
||||||
foundEnergy := false
|
|
||||||
if v, err := os.ReadFile(z.energyFilepath); err == nil {
|
|
||||||
// timestamp when energy counter was read
|
|
||||||
z.energyTimestamp = time.Now()
|
|
||||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
|
||||||
foundEnergy = true
|
|
||||||
z.energy = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range of the above energy counter in micro-joules
|
|
||||||
foundMaxEnergyRange := false
|
|
||||||
if v, err :=
|
|
||||||
os.ReadFile(
|
|
||||||
filepath.Join(zonePath, "max_energy_range_uj")); err == nil {
|
|
||||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
|
||||||
foundMaxEnergyRange = true
|
|
||||||
z.maxEnergyRange = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are all information available?
|
|
||||||
z.ok = foundName && foundEnergy && foundMaxEnergyRange
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
powerCapPrefix := "/sys/devices/virtual/powercap"
|
|
||||||
controlType := "intel-rapl"
|
|
||||||
controlTypePath := filepath.Join(powerCapPrefix, controlType)
|
|
||||||
|
|
||||||
// Find all RAPL zones
|
|
||||||
zonePrefix := filepath.Join(controlTypePath, controlType+":")
|
|
||||||
zonesPath, err := filepath.Glob(zonePrefix + "*")
|
|
||||||
if err != nil || zonesPath == nil {
|
|
||||||
return fmt.Errorf("unable to find any zones under %s", controlTypePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, zonePath := range zonesPath {
|
|
||||||
zoneID := strings.TrimPrefix(zonePath, zonePrefix)
|
|
||||||
z := readZoneInfo(zonePath)
|
|
||||||
if z.ok &&
|
|
||||||
!isIDExcluded[zoneID] &&
|
|
||||||
!isNameExcluded[z.name] {
|
|
||||||
|
|
||||||
// Add RAPL monitoring attributes for a zone
|
|
||||||
m.RAPLZoneInfo =
|
|
||||||
append(
|
|
||||||
m.RAPLZoneInfo,
|
|
||||||
RAPLZoneInfo{
|
|
||||||
tags: map[string]string{
|
|
||||||
"id": zoneID,
|
|
||||||
"zone_name": z.name,
|
|
||||||
},
|
|
||||||
energyFilepath: z.energyFilepath,
|
|
||||||
energy: z.energy,
|
|
||||||
energyTimestamp: z.energyTimestamp,
|
|
||||||
maxEnergyRange: z.maxEnergyRange,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// find all sub zones for the given zone
|
|
||||||
subZonePrefix := filepath.Join(zonePath, controlType+":"+zoneID+":")
|
|
||||||
subZonesPath, err := filepath.Glob(subZonePrefix + "*")
|
|
||||||
if err != nil || subZonesPath == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, subZonePath := range subZonesPath {
|
|
||||||
subZoneID := strings.TrimPrefix(subZonePath, subZonePrefix)
|
|
||||||
sz := readZoneInfo(subZonePath)
|
|
||||||
if len(zoneID) > 0 && len(z.name) > 0 &&
|
|
||||||
sz.ok &&
|
|
||||||
!isIDExcluded[zoneID+":"+subZoneID] &&
|
|
||||||
!isNameExcluded[sz.name] {
|
|
||||||
m.RAPLZoneInfo =
|
|
||||||
append(
|
|
||||||
m.RAPLZoneInfo,
|
|
||||||
RAPLZoneInfo{
|
|
||||||
tags: map[string]string{
|
|
||||||
"id": zoneID + ":" + subZoneID,
|
|
||||||
"zone_name": z.name,
|
|
||||||
"sub_zone_name": sz.name,
|
|
||||||
},
|
|
||||||
energyFilepath: sz.energyFilepath,
|
|
||||||
energy: sz.energy,
|
|
||||||
energyTimestamp: sz.energyTimestamp,
|
|
||||||
maxEnergyRange: sz.maxEnergyRange,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.RAPLZoneInfo == nil {
|
|
||||||
return fmt.Errorf("no running average power limit (RAPL) device found in %s", controlTypePath)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialized
|
|
||||||
cclog.ComponentDebug(
|
|
||||||
m.name,
|
|
||||||
"initialized",
|
|
||||||
len(m.RAPLZoneInfo),
|
|
||||||
"zones with running average power limit (RAPL) monitoring attributes")
|
|
||||||
m.init = true
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads running average power limit (RAPL) monitoring attributes for all initialized zones
|
|
||||||
// See: https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes
|
|
||||||
func (m *RAPLCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|
||||||
|
|
||||||
for i := range m.RAPLZoneInfo {
|
|
||||||
p := &m.RAPLZoneInfo[i]
|
|
||||||
|
|
||||||
// Read current value of the energy counter in micro joules
|
|
||||||
if v, err := os.ReadFile(p.energyFilepath); err == nil {
|
|
||||||
energyTimestamp := time.Now()
|
|
||||||
if i, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil {
|
|
||||||
energy := i
|
|
||||||
|
|
||||||
// Compute average power (Δ energy / Δ time)
|
|
||||||
energyDiff := energy - p.energy
|
|
||||||
if energyDiff < 0 {
|
|
||||||
// Handle overflow:
|
|
||||||
// ( p.maxEnergyRange - p.energy ) + energy
|
|
||||||
// = p.maxEnergyRange + ( energy - p.energy )
|
|
||||||
// = p.maxEnergyRange + diffEnergy
|
|
||||||
energyDiff += p.maxEnergyRange
|
|
||||||
}
|
|
||||||
timeDiff := energyTimestamp.Sub(p.energyTimestamp)
|
|
||||||
averagePower := float64(energyDiff) / float64(timeDiff.Microseconds())
|
|
||||||
|
|
||||||
y, err := lp.NewMessage(
|
|
||||||
"rapl_average_power",
|
|
||||||
p.tags,
|
|
||||||
m.meta,
|
|
||||||
map[string]interface{}{"value": averagePower},
|
|
||||||
energyTimestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save current energy counter state
|
|
||||||
p.energy = energy
|
|
||||||
p.energyTimestamp = energyTimestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes running average power limit (RAPL) metric collector
|
|
||||||
func (m *RAPLCollector) Close() {
|
|
||||||
// Unset flag
|
|
||||||
m.init = false
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
## `rapl` collector
|
|
||||||
|
|
||||||
This collector reads running average power limit (RAPL) monitoring attributes to compute average power consumption metrics. See <https://www.kernel.org/doc/html/latest/power/powercap/powercap.html#monitoring-attributes>.
|
|
||||||
|
|
||||||
The Likwid metric collector provides similar functionality.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"rapl": {
|
|
||||||
"exclude_device_by_id": ["0:1", "0:2"],
|
|
||||||
"exclude_device_by_name": ["psys"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Metrics:
|
|
||||||
* `rapl_average_power`: average power consumption in Watt. The average is computed over the entire runtime from the last measurement to the current measurement
|
|
||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
|
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -162,7 +162,7 @@ func (m *RocmSmiCollector) Init(config json.RawMessage) error {
|
|||||||
|
|
||||||
// Read collects all metrics belonging to the sample collector
|
// Read collects all metrics belonging to the sample collector
|
||||||
// and sends them through the output channel to the collector manager
|
// and sends them through the output channel to the collector manager
|
||||||
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Create a sample metric
|
// Create a sample metric
|
||||||
timestamp := time.Now()
|
timestamp := time.Now()
|
||||||
|
|
||||||
@@ -175,119 +175,119 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
|
|
||||||
if !dev.excludeMetrics["rocm_gfx_util"] {
|
if !dev.excludeMetrics["rocm_gfx_util"] {
|
||||||
value := metrics.Average_gfx_activity
|
value := metrics.Average_gfx_activity
|
||||||
y, err := lp.NewMessage("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_umc_util"] {
|
if !dev.excludeMetrics["rocm_umc_util"] {
|
||||||
value := metrics.Average_umc_activity
|
value := metrics.Average_umc_activity
|
||||||
y, err := lp.NewMessage("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_mm_util"] {
|
if !dev.excludeMetrics["rocm_mm_util"] {
|
||||||
value := metrics.Average_mm_activity
|
value := metrics.Average_mm_activity
|
||||||
y, err := lp.NewMessage("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_avg_power"] {
|
if !dev.excludeMetrics["rocm_avg_power"] {
|
||||||
value := metrics.Average_socket_power
|
value := metrics.Average_socket_power
|
||||||
y, err := lp.NewMessage("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_mem"] {
|
if !dev.excludeMetrics["rocm_temp_mem"] {
|
||||||
value := metrics.Temperature_mem
|
value := metrics.Temperature_mem
|
||||||
y, err := lp.NewMessage("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_hotspot"] {
|
if !dev.excludeMetrics["rocm_temp_hotspot"] {
|
||||||
value := metrics.Temperature_hotspot
|
value := metrics.Temperature_hotspot
|
||||||
y, err := lp.NewMessage("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_edge"] {
|
if !dev.excludeMetrics["rocm_temp_edge"] {
|
||||||
value := metrics.Temperature_edge
|
value := metrics.Temperature_edge
|
||||||
y, err := lp.NewMessage("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
|
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
|
||||||
value := metrics.Temperature_vrgfx
|
value := metrics.Temperature_vrgfx
|
||||||
y, err := lp.NewMessage("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
|
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
|
||||||
value := metrics.Temperature_vrsoc
|
value := metrics.Temperature_vrsoc
|
||||||
y, err := lp.NewMessage("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_temp_vrmem"] {
|
if !dev.excludeMetrics["rocm_temp_vrmem"] {
|
||||||
value := metrics.Temperature_vrmem
|
value := metrics.Temperature_vrmem
|
||||||
y, err := lp.NewMessage("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_gfx_clock"] {
|
if !dev.excludeMetrics["rocm_gfx_clock"] {
|
||||||
value := metrics.Average_gfxclk_frequency
|
value := metrics.Average_gfxclk_frequency
|
||||||
y, err := lp.NewMessage("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_soc_clock"] {
|
if !dev.excludeMetrics["rocm_soc_clock"] {
|
||||||
value := metrics.Average_socclk_frequency
|
value := metrics.Average_socclk_frequency
|
||||||
y, err := lp.NewMessage("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_u_clock"] {
|
if !dev.excludeMetrics["rocm_u_clock"] {
|
||||||
value := metrics.Average_uclk_frequency
|
value := metrics.Average_uclk_frequency
|
||||||
y, err := lp.NewMessage("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_v0_clock"] {
|
if !dev.excludeMetrics["rocm_v0_clock"] {
|
||||||
value := metrics.Average_vclk0_frequency
|
value := metrics.Average_vclk0_frequency
|
||||||
y, err := lp.NewMessage("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_v1_clock"] {
|
if !dev.excludeMetrics["rocm_v1_clock"] {
|
||||||
value := metrics.Average_vclk1_frequency
|
value := metrics.Average_vclk1_frequency
|
||||||
y, err := lp.NewMessage("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_d0_clock"] {
|
if !dev.excludeMetrics["rocm_d0_clock"] {
|
||||||
value := metrics.Average_dclk0_frequency
|
value := metrics.Average_dclk0_frequency
|
||||||
y, err := lp.NewMessage("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !dev.excludeMetrics["rocm_d1_clock"] {
|
if !dev.excludeMetrics["rocm_d1_clock"] {
|
||||||
value := metrics.Average_dclk1_frequency
|
value := metrics.Average_dclk1_frequency
|
||||||
y, err := lp.NewMessage("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
@@ -295,7 +295,7 @@ func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMessage
|
|||||||
if !dev.excludeMetrics["rocm_temp_hbm"] {
|
if !dev.excludeMetrics["rocm_temp_hbm"] {
|
||||||
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
|
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
|
||||||
value := metrics.Temperature_hbm[i]
|
value := metrics.Temperature_hbm[i]
|
||||||
y, err := lp.NewMessage("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
y.AddTag("stype", "device")
|
y.AddTag("stype", "device")
|
||||||
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
y.AddTag("stype-id", fmt.Sprintf("%d", i))
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These are the fields we read from the JSON configuration
|
// These are the fields we read from the JSON configuration
|
||||||
@@ -17,7 +17,7 @@ type SampleCollectorConfig struct {
|
|||||||
// defined by metricCollector (name, init, ...)
|
// defined by metricCollector (name, init, ...)
|
||||||
type SampleCollector struct {
|
type SampleCollector struct {
|
||||||
metricCollector
|
metricCollector
|
||||||
config SampleCollectorConfig // the configuration structure
|
config SampleTimerCollectorConfig // the configuration structure
|
||||||
meta map[string]string // default meta information
|
meta map[string]string // default meta information
|
||||||
tags map[string]string // default tags
|
tags map[string]string // default tags
|
||||||
}
|
}
|
||||||
@@ -32,18 +32,18 @@ type SampleCollector struct {
|
|||||||
func (m *SampleCollector) Init(config json.RawMessage) error {
|
func (m *SampleCollector) Init(config json.RawMessage) error {
|
||||||
var err error = nil
|
var err error = nil
|
||||||
// Always set the name early in Init() to use it in cclog.Component* functions
|
// Always set the name early in Init() to use it in cclog.Component* functions
|
||||||
m.name = "SampleCollector"
|
m.name = "InternalCollector"
|
||||||
// This is for later use, also call it early
|
// This is for later use, also call it early
|
||||||
m.setup()
|
m.setup()
|
||||||
// Tell whether the collector should be run in parallel with others (reading files, ...)
|
// Tell whether the collector should be run in parallel with others (reading files, ...)
|
||||||
// or it should be run serially, mostly for collectors actually doing measurements
|
// or it should be run serially, mostly for collectors acutally doing measurements
|
||||||
// because they should not measure the execution of the other collectors
|
// because they should not measure the execution of the other collectors
|
||||||
m.parallel = true
|
m.parallel = true
|
||||||
// Define meta information sent with each metric
|
// Define meta information sent with each metric
|
||||||
// (Can also be dynamic or this is the basic set with extension through AddMeta())
|
// (Can also be dynamic or this is the basic set with extension through AddMeta())
|
||||||
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
|
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
|
||||||
// Define tags sent with each metric
|
// Define tags sent with each metric
|
||||||
// The 'type' tag is always needed, it defines the granularity of the metric
|
// The 'type' tag is always needed, it defines the granulatity of the metric
|
||||||
// node -> whole system
|
// node -> whole system
|
||||||
// socket -> CPU socket (requires socket ID as 'type-id' tag)
|
// socket -> CPU socket (requires socket ID as 'type-id' tag)
|
||||||
// die -> CPU die (requires CPU die ID as 'type-id' tag)
|
// die -> CPU die (requires CPU die ID as 'type-id' tag)
|
||||||
@@ -74,7 +74,7 @@ func (m *SampleCollector) Init(config json.RawMessage) error {
|
|||||||
|
|
||||||
// Read collects all metrics belonging to the sample collector
|
// Read collects all metrics belonging to the sample collector
|
||||||
// and sends them through the output channel to the collector manager
|
// and sends them through the output channel to the collector manager
|
||||||
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Create a sample metric
|
// Create a sample metric
|
||||||
timestamp := time.Now()
|
timestamp := time.Now()
|
||||||
|
|
||||||
@@ -85,7 +85,7 @@ func (m *SampleCollector) Read(interval time.Duration, output chan lp.CCMessage)
|
|||||||
// stop := readState()
|
// stop := readState()
|
||||||
// value = (stop - start) / interval.Seconds()
|
// value = (stop - start) / interval.Seconds()
|
||||||
|
|
||||||
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Send it to output channel
|
// Send it to output channel
|
||||||
output <- y
|
output <- y
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These are the fields we read from the JSON configuration
|
// These are the fields we read from the JSON configuration
|
||||||
@@ -25,7 +25,7 @@ type SampleTimerCollector struct {
|
|||||||
config SampleTimerCollectorConfig // the configuration structure
|
config SampleTimerCollectorConfig // the configuration structure
|
||||||
interval time.Duration // the interval parsed from configuration
|
interval time.Duration // the interval parsed from configuration
|
||||||
ticker *time.Ticker // own timer
|
ticker *time.Ticker // own timer
|
||||||
output chan lp.CCMessage // own internal output channel
|
output chan lp.CCMetric // own internal output channel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
|
func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
|
||||||
@@ -38,7 +38,7 @@ func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
|
|||||||
// (Can also be dynamic or this is the basic set with extension through AddMeta())
|
// (Can also be dynamic or this is the basic set with extension through AddMeta())
|
||||||
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
|
m.meta = map[string]string{"source": m.name, "group": "SAMPLE"}
|
||||||
// Define tags sent with each metric
|
// Define tags sent with each metric
|
||||||
// The 'type' tag is always needed, it defines the granularity of the metric
|
// The 'type' tag is always needed, it defines the granulatity of the metric
|
||||||
// node -> whole system
|
// node -> whole system
|
||||||
// socket -> CPU socket (requires socket ID as 'type-id' tag)
|
// socket -> CPU socket (requires socket ID as 'type-id' tag)
|
||||||
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
|
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
|
||||||
@@ -60,7 +60,7 @@ func (m *SampleTimerCollector) Init(name string, config json.RawMessage) error {
|
|||||||
|
|
||||||
// Storage for output channel
|
// Storage for output channel
|
||||||
m.output = nil
|
m.output = nil
|
||||||
// Management channel for the timer function.
|
// Mangement channel for the timer function.
|
||||||
m.done = make(chan bool)
|
m.done = make(chan bool)
|
||||||
// Create the own ticker
|
// Create the own ticker
|
||||||
m.ticker = time.NewTicker(m.interval)
|
m.ticker = time.NewTicker(m.interval)
|
||||||
@@ -94,20 +94,20 @@ func (m *SampleTimerCollector) ReadMetrics(timestamp time.Time) {
|
|||||||
|
|
||||||
value := 1.0
|
value := 1.0
|
||||||
|
|
||||||
// If you want to measure something for a specific amount of time, use interval
|
// If you want to measure something for a specific amout of time, use interval
|
||||||
// start := readState()
|
// start := readState()
|
||||||
// time.Sleep(interval)
|
// time.Sleep(interval)
|
||||||
// stop := readState()
|
// stop := readState()
|
||||||
// value = (stop - start) / interval.Seconds()
|
// value = (stop - start) / interval.Seconds()
|
||||||
|
|
||||||
y, err := lp.NewMessage("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
y, err := lp.New("sample_metric", m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
|
||||||
if err == nil && m.output != nil {
|
if err == nil && m.output != nil {
|
||||||
// Send it to output channel if we have a valid channel
|
// Send it to output channel if we have a valid channel
|
||||||
m.output <- y
|
m.output <- y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *SampleTimerCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
// Capture output channel
|
// Capture output channel
|
||||||
m.output = output
|
m.output = output
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const SCHEDSTATFILE = `/proc/schedstat`
|
const SCHEDSTATFILE = `/proc/schedstat`
|
||||||
@@ -96,7 +96,7 @@ func (m *SchedstatCollector) Init(config json.RawMessage) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMessage, now time.Time, tsdelta time.Duration) {
|
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
|
||||||
running, _ := strconv.ParseInt(linefields[7], 10, 64)
|
running, _ := strconv.ParseInt(linefields[7], 10, 64)
|
||||||
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
|
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
|
||||||
diff_running := running - m.olddata[linefields[0]]["running"]
|
diff_running := running - m.olddata[linefields[0]]["running"]
|
||||||
@@ -109,7 +109,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
|
|||||||
m.olddata[linefields[0]]["waiting"] = waiting
|
m.olddata[linefields[0]]["waiting"] = waiting
|
||||||
value := l_running + l_waiting
|
value := l_running + l_waiting
|
||||||
|
|
||||||
y, err := lp.NewMessage("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
|
y, err := lp.New("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Send it to output channel
|
// Send it to output channel
|
||||||
output <- y
|
output <- y
|
||||||
@@ -118,7 +118,7 @@ func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]
|
|||||||
|
|
||||||
// Read collects all metrics belonging to the sample collector
|
// Read collects all metrics belonging to the sample collector
|
||||||
// and sends them through the output channel to the collector manager
|
// and sends them through the output channel to the collector manager
|
||||||
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,144 +0,0 @@
|
|||||||
package collectors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"runtime"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SelfCollectorConfig struct {
|
|
||||||
MemStats bool `json:"read_mem_stats"`
|
|
||||||
GoRoutines bool `json:"read_goroutines"`
|
|
||||||
CgoCalls bool `json:"read_cgo_calls"`
|
|
||||||
Rusage bool `json:"read_rusage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SelfCollector struct {
|
|
||||||
metricCollector
|
|
||||||
config SelfCollectorConfig // the configuration structure
|
|
||||||
meta map[string]string // default meta information
|
|
||||||
tags map[string]string // default tags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SelfCollector) Init(config json.RawMessage) error {
|
|
||||||
var err error = nil
|
|
||||||
m.name = "SelfCollector"
|
|
||||||
m.setup()
|
|
||||||
m.parallel = true
|
|
||||||
m.meta = map[string]string{"source": m.name, "group": "Self"}
|
|
||||||
m.tags = map[string]string{"type": "node"}
|
|
||||||
if len(config) > 0 {
|
|
||||||
err = json.Unmarshal(config, &m.config)
|
|
||||||
if err != nil {
|
|
||||||
cclog.ComponentError(m.name, "Error reading config:", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.init = true
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SelfCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|
||||||
timestamp := time.Now()
|
|
||||||
|
|
||||||
if m.config.MemStats {
|
|
||||||
var memstats runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&memstats)
|
|
||||||
|
|
||||||
y, err := lp.NewMessage("total_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.TotalAlloc}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_alloc", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapAlloc}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_sys", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapSys}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_idle", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapIdle}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_inuse", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapInuse}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_released", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapReleased}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "Bytes")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("heap_objects", m.tags, m.meta, map[string]interface{}{"value": memstats.HeapObjects}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.config.GoRoutines {
|
|
||||||
y, err := lp.NewMessage("num_goroutines", m.tags, m.meta, map[string]interface{}{"value": runtime.NumGoroutine()}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.config.CgoCalls {
|
|
||||||
y, err := lp.NewMessage("num_cgo_calls", m.tags, m.meta, map[string]interface{}{"value": runtime.NumCgoCall()}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.config.Rusage {
|
|
||||||
var rusage syscall.Rusage
|
|
||||||
err := syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
|
||||||
if err == nil {
|
|
||||||
sec, nsec := rusage.Utime.Unix()
|
|
||||||
t := float64(sec) + (float64(nsec) * 1e-9)
|
|
||||||
y, err := lp.NewMessage("rusage_user_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "seconds")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
sec, nsec = rusage.Stime.Unix()
|
|
||||||
t = float64(sec) + (float64(nsec) * 1e-9)
|
|
||||||
y, err = lp.NewMessage("rusage_system_time", m.tags, m.meta, map[string]interface{}{"value": t}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
y.AddMeta("unit", "seconds")
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("rusage_vol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nvcsw}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("rusage_invol_ctx_switch", m.tags, m.meta, map[string]interface{}{"value": rusage.Nivcsw}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("rusage_signals", m.tags, m.meta, map[string]interface{}{"value": rusage.Nsignals}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("rusage_major_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Majflt}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
y, err = lp.NewMessage("rusage_minor_pgfaults", m.tags, m.meta, map[string]interface{}{"value": rusage.Minflt}, timestamp)
|
|
||||||
if err == nil {
|
|
||||||
output <- y
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *SelfCollector) Close() {
|
|
||||||
m.init = false
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
## `self` collector
|
|
||||||
|
|
||||||
```json
|
|
||||||
"self": {
|
|
||||||
"read_mem_stats" : true,
|
|
||||||
"read_goroutines" : true,
|
|
||||||
"read_cgo_calls" : true,
|
|
||||||
"read_rusage" : true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The `self` collector reads the data from the `runtime` and `syscall` packages, so monitors the execution of the cc-metric-collector itself.
|
|
||||||
|
|
||||||
Metrics:
|
|
||||||
* If `read_mem_stats == true`:
|
|
||||||
* `total_alloc`: The metric reports cumulative bytes allocated for heap objects.
|
|
||||||
* `heap_alloc`: The metric reports bytes of allocated heap objects.
|
|
||||||
* `heap_sys`: The metric reports bytes of heap memory obtained from the OS.
|
|
||||||
* `heap_idle`: The metric reports bytes in idle (unused) spans.
|
|
||||||
* `heap_inuse`: The metric reports bytes in in-use spans.
|
|
||||||
* `heap_released`: The metric reports bytes of physical memory returned to the OS.
|
|
||||||
* `heap_objects`: The metric reports the number of allocated heap objects.
|
|
||||||
* If `read_goroutines == true`:
|
|
||||||
* `num_goroutines`: The metric reports the number of goroutines that currently exist.
|
|
||||||
* If `read_cgo_calls == true`:
|
|
||||||
* `num_cgo_calls`: The metric reports the number of cgo calls made by the current process.
|
|
||||||
* If `read_rusage == true`:
|
|
||||||
* `rusage_user_time`: The metric reports the amount of time that this process has been scheduled in user mode.
|
|
||||||
* `rusage_system_time`: The metric reports the amount of time that this process has been scheduled in kernel mode.
|
|
||||||
* `rusage_vol_ctx_switch`: The metric reports the amount of voluntary context switches.
|
|
||||||
* `rusage_invol_ctx_switch`: The metric reports the amount of involuntary context switches.
|
|
||||||
* `rusage_signals`: The metric reports the number of signals received.
|
|
||||||
* `rusage_major_pgfaults`: The metric reports the number of major faults the process has made which have required loading a memory page from disk.
|
|
||||||
* `rusage_minor_pgfaults`: The metric reports the number of minor faults the process has made which have not required loading a memory page from disk.
|
|
||||||
220
collectors/smartmonMetric.go
Normal file
220
collectors/smartmonMetric.go
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
package collectors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os/exec"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SmartMonCollectorConfig struct {
|
||||||
|
UseSudo bool `json:"use_sudo"`
|
||||||
|
ExcludeDevices []string `json:"exclude_devices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartMonCollector struct {
|
||||||
|
metricCollector
|
||||||
|
config SmartMonCollectorConfig // the configuration structure
|
||||||
|
meta map[string]string // default meta information
|
||||||
|
tags map[string]string // default tags
|
||||||
|
devices []string // smartmon devices
|
||||||
|
sudoCmd string // Full path to 'sudo' command
|
||||||
|
smartCtlCmd string // Full path to 'smartctl' command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartMonCollector) getSmartmonDevices() error {
|
||||||
|
var command *exec.Cmd
|
||||||
|
var scan struct {
|
||||||
|
Devices []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
} `json:"devices"`
|
||||||
|
}
|
||||||
|
m.devices = make([]string, 0)
|
||||||
|
if m.config.UseSudo {
|
||||||
|
command = exec.Command(m.sudoCmd, m.smartCtlCmd, "--scan", "-j")
|
||||||
|
} else {
|
||||||
|
command = exec.Command(m.smartCtlCmd, "--scan", "-j")
|
||||||
|
}
|
||||||
|
command.Wait()
|
||||||
|
stdout, err := command.Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(stdout, &scan)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, d := range scan.Devices {
|
||||||
|
if len(d.Name) > 0 {
|
||||||
|
m.devices = append(m.devices, d.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartMonCollector) Init(config json.RawMessage) error {
|
||||||
|
var err error = nil
|
||||||
|
m.name = "SmartMonCollector"
|
||||||
|
m.setup()
|
||||||
|
m.parallel = true
|
||||||
|
m.meta = map[string]string{"source": m.name, "group": "Disk"}
|
||||||
|
m.tags = map[string]string{"type": "node", "stype": "disk"}
|
||||||
|
// Read in the JSON configuration
|
||||||
|
if len(config) > 0 {
|
||||||
|
err = json.Unmarshal(config, &m.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(m.name, "Error reading config:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.config.UseSudo {
|
||||||
|
p, err := exec.LookPath("sudo")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.sudoCmd = p
|
||||||
|
}
|
||||||
|
p, err := exec.LookPath("smartctl")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.smartCtlCmd = p
|
||||||
|
err = m.getSmartmonDevices()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.init = true
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartMonData struct {
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
UserCapacity struct {
|
||||||
|
Blocks int `json:"blocks"`
|
||||||
|
Bytes int `json:"bytes"`
|
||||||
|
} `json:"user_capacity"`
|
||||||
|
HealthLog struct {
|
||||||
|
Temperature int `json:"temperature"`
|
||||||
|
PercentageUsed int `json:"percentage_used"`
|
||||||
|
AvailableSpare int `json:"available_spare"`
|
||||||
|
DataUnitsRead int `json:"data_units_read"`
|
||||||
|
DataUnitsWrite int `json:"data_units_written"`
|
||||||
|
HostReads int `json:"host_reads"`
|
||||||
|
HostWrites int `json:"host_writes"`
|
||||||
|
PowerCycles int `json:"power_cycles"`
|
||||||
|
PowerOnHours int `json:"power_on_hours"`
|
||||||
|
UnsafeShutdowns int `json:"unsafe_shutdowns"`
|
||||||
|
MediaErrors int `json:"media_errors"`
|
||||||
|
NumErrorLogEntries int `json:"num_err_log_entries"`
|
||||||
|
WarnTempTime int `json:"warning_temp_time"`
|
||||||
|
CriticalTempTime int `json:"critical_comp_time"`
|
||||||
|
} `json:"nvme_smart_health_information_log"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartMonCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
|
timestamp := time.Now()
|
||||||
|
for _, d := range m.devices {
|
||||||
|
var command *exec.Cmd
|
||||||
|
var data SmartMonData
|
||||||
|
if m.config.UseSudo {
|
||||||
|
command = exec.Command(m.sudoCmd, m.smartCtlCmd, "-j", "-a", d)
|
||||||
|
} else {
|
||||||
|
command = exec.Command(m.smartCtlCmd, "-j", "-a", d)
|
||||||
|
}
|
||||||
|
command.Wait()
|
||||||
|
stdout, err := command.Output()
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(m.name, "cannot read data for device", d)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(stdout, &data)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(m.name, "cannot unmarshal data for device", d)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
y, err := lp.New("smartmon_temp", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.Temperature}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
y.AddMeta("unit", "degC")
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_percent_used", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.PercentageUsed}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
y.AddMeta("unit", "percent")
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_avail_spare", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.AvailableSpare}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
y.AddMeta("unit", "percent")
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_data_units_read", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.DataUnitsRead}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_data_units_write", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.DataUnitsWrite}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_host_reads", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.HostReads}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_host_writes", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.HostWrites}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_power_cycles", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.PowerCycles}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_power_on", m.tags, m.meta, map[string]interface{}{"value": int64(data.HealthLog.PowerOnHours) * 3600}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
y.AddMeta("unit", "seconds")
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_unsafe_shutdowns", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.UnsafeShutdowns}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_media_errors", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.MediaErrors}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_errlog_entries", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.NumErrorLogEntries}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_warn_temp_time", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.WarnTempTime}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
y, err = lp.New("smartmon_crit_temp_time", m.tags, m.meta, map[string]interface{}{"value": data.HealthLog.CriticalTempTime}, timestamp)
|
||||||
|
if err == nil {
|
||||||
|
y.AddTag("stype-id", d)
|
||||||
|
output <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartMonCollector) Close() {
|
||||||
|
m.init = false
|
||||||
|
}
|
||||||
29
collectors/smartmonMetric.md
Normal file
29
collectors/smartmonMetric.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
## `smartmon` collector
|
||||||
|
|
||||||
|
```json
|
||||||
|
"smartmon": {
|
||||||
|
"use_sudo" : true,
|
||||||
|
"exclude_devices": [
|
||||||
|
"/dev/sda",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `smartmon` collector reads the data from the command `smartctl`. It retrieves S.M.A.R.T data from disks
|
||||||
|
|
||||||
|
Metrics:
|
||||||
|
* `smartmon_temp`: Temperature of the device (`unit=degC`)
|
||||||
|
* `smartmon_avail_spare`: Amount of spare left (`unit=percent`)
|
||||||
|
* `smartmon_percent_used`: Percentage of the device is used (`unit=percent`)
|
||||||
|
* `smartmon_data_units_read`: Read data units
|
||||||
|
* `smartmon_data_units_write`: Written data units
|
||||||
|
* `smartmon_host_reads`: Read operations
|
||||||
|
* `smartmon_host_writes`: Write operations
|
||||||
|
* `smartmon_power_cycles`: Number of power cycles
|
||||||
|
* `smartmon_power_on`: Seconds the device is powered on (`unit=seconds`)
|
||||||
|
* `smartmon_unsafe_shutdowns`: Count of unsafe shutdowns
|
||||||
|
* `smartmon_media_errors`: Media errors of the device
|
||||||
|
* `smartmon_errlog_entries`: Error log entries
|
||||||
|
* `smartmon_warn_temp_time`: Time above the warning temperature threshold
|
||||||
|
* `smartmon_crit_temp_time`: Time above the critical temperature threshold
|
||||||
|
|
||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
|
// See: https://www.kernel.org/doc/html/latest/hwmon/sysfs-interface.html
|
||||||
@@ -171,7 +171,7 @@ func (m *TempCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
|
|
||||||
for _, sensor := range m.sensors {
|
for _, sensor := range m.sensors {
|
||||||
// Read sensor file
|
// Read sensor file
|
||||||
@@ -190,7 +190,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
x /= 1000
|
x /= 1000
|
||||||
y, err := lp.NewMessage(
|
y, err := lp.New(
|
||||||
sensor.metricName,
|
sensor.metricName,
|
||||||
sensor.tags,
|
sensor.tags,
|
||||||
m.meta,
|
m.meta,
|
||||||
@@ -203,7 +203,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
|
|
||||||
// max temperature
|
// max temperature
|
||||||
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
|
if m.config.ReportMaxTemp && sensor.maxTemp != 0 {
|
||||||
y, err := lp.NewMessage(
|
y, err := lp.New(
|
||||||
sensor.maxTempName,
|
sensor.maxTempName,
|
||||||
sensor.tags,
|
sensor.tags,
|
||||||
m.meta,
|
m.meta,
|
||||||
@@ -217,7 +217,7 @@ func (m *TempCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
|||||||
|
|
||||||
// critical temperature
|
// critical temperature
|
||||||
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
|
if m.config.ReportCriticalTemp && sensor.critTemp != 0 {
|
||||||
y, err := lp.NewMessage(
|
y, err := lp.New(
|
||||||
sensor.critTempName,
|
sensor.critTempName,
|
||||||
sensor.tags,
|
sensor.tags,
|
||||||
m.meta,
|
m.meta,
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MAX_NUM_PROCS = 10
|
const MAX_NUM_PROCS = 10
|
||||||
@@ -53,7 +53,7 @@ func (m *TopProcsCollector) Init(config json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessage) {
|
func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||||
if !m.init {
|
if !m.init {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -68,7 +68,7 @@ func (m *TopProcsCollector) Read(interval time.Duration, output chan lp.CCMessag
|
|||||||
lines := strings.Split(string(stdout), "\n")
|
lines := strings.Split(string(stdout), "\n")
|
||||||
for i := 1; i < m.config.Num_procs+1; i++ {
|
for i := 1; i < m.config.Num_procs+1; i++ {
|
||||||
name := fmt.Sprintf("topproc%d", i)
|
name := fmt.Sprintf("topproc%d", i)
|
||||||
y, err := lp.NewMessage(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
|
y, err := lp.New(name, m.tags, m.meta, map[string]interface{}{"value": string(lines[i])}, time.Now())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
output <- y
|
output <- y
|
||||||
}
|
}
|
||||||
|
|||||||
10
config.json
10
config.json
@@ -1,10 +1,8 @@
|
|||||||
{
|
{
|
||||||
"sinks-file": "./sinks.json",
|
"sinks": "./sinks.json",
|
||||||
"collectors-file" : "./collectors.json",
|
"collectors" : "./collectors.json",
|
||||||
"receivers-file" : "./receivers.json",
|
"receivers" : "./receivers.json",
|
||||||
"router-file" : "./router.json",
|
"router" : "./router.json",
|
||||||
"main" : {
|
|
||||||
"interval": "10s",
|
"interval": "10s",
|
||||||
"duration": "1s"
|
"duration": "1s"
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,9 +37,7 @@ $ install --mode 644 \
|
|||||||
$ systemctl enable cc-metric-collector
|
$ systemctl enable cc-metric-collector
|
||||||
```
|
```
|
||||||
|
|
||||||
## Packaging
|
## RPM
|
||||||
|
|
||||||
### RPM
|
|
||||||
|
|
||||||
In order to get a RPM packages for cc-metric-collector, just use:
|
In order to get a RPM packages for cc-metric-collector, just use:
|
||||||
|
|
||||||
@@ -49,7 +47,7 @@ $ make RPM
|
|||||||
|
|
||||||
It uses the RPM SPEC file `scripts/cc-metric-collector.spec` and requires the RPM tools (`rpm` and `rpmspec`) and `git`.
|
It uses the RPM SPEC file `scripts/cc-metric-collector.spec` and requires the RPM tools (`rpm` and `rpmspec`) and `git`.
|
||||||
|
|
||||||
### DEB
|
## DEB
|
||||||
|
|
||||||
In order to get very simple Debian packages for cc-metric-collector, just use:
|
In order to get very simple Debian packages for cc-metric-collector, just use:
|
||||||
|
|
||||||
@@ -60,15 +58,3 @@ $ make DEB
|
|||||||
It uses the DEB control file `scripts/cc-metric-collector.control` and requires `dpkg-deb`, `awk`, `sed` and `git`. It creates only a binary deb package.
|
It uses the DEB control file `scripts/cc-metric-collector.control` and requires `dpkg-deb`, `awk`, `sed` and `git`. It creates only a binary deb package.
|
||||||
|
|
||||||
_This option is not well tested and therefore experimental_
|
_This option is not well tested and therefore experimental_
|
||||||
|
|
||||||
### Customizing RPMs or DEB packages
|
|
||||||
|
|
||||||
If you want to customize the RPMs or DEB packages for your local system, use the following workflow.
|
|
||||||
|
|
||||||
- (if there is already a fork in the private account, delete it and wait until Github realizes the deletion)
|
|
||||||
- Fork the cc-metric-collector repository (if Github hasn't realized it, it creates a fork named cc-metric-collector2)
|
|
||||||
- Go to private cc-metric-collector repository and enable Github Actions
|
|
||||||
- Do changes to the scripts, code, ... Commit and push your changes.
|
|
||||||
- Tag the new commit with `v0.x.y-<myversion>` (`git tag v0.x.y-<myversion>`)
|
|
||||||
- Push tags to repository (`git push --tags`)
|
|
||||||
- Wait until the Release action finishes. It creates fresh RPMs and DEBs in your private repository on the Releases page.
|
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ The global file contains the paths to the other four files and some global optio
|
|||||||
"collectors" : "collectors.json",
|
"collectors" : "collectors.json",
|
||||||
"receivers" : "receivers.json",
|
"receivers" : "receivers.json",
|
||||||
"router" : "router.json",
|
"router" : "router.json",
|
||||||
"interval": "10s",
|
"interval": 10,
|
||||||
"duration": "1s"
|
"duration": 1
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
62
go.mod
62
go.mod
@@ -1,48 +1,40 @@
|
|||||||
module github.com/ClusterCockpit/cc-metric-collector
|
module github.com/ClusterCockpit/cc-metric-collector
|
||||||
|
|
||||||
go 1.23.4
|
go 1.18
|
||||||
|
|
||||||
toolchain go1.23.7
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ClusterCockpit/cc-lib v0.1.1
|
github.com/ClusterCockpit/cc-units v0.3.0
|
||||||
github.com/ClusterCockpit/go-rocm-smi v0.3.0
|
github.com/ClusterCockpit/go-rocm-smi v0.3.0
|
||||||
github.com/NVIDIA/go-nvml v0.12.0-2
|
github.com/NVIDIA/go-nvml v0.11.6-0
|
||||||
github.com/PaesslerAG/gval v1.2.2
|
github.com/PaesslerAG/gval v1.2.0
|
||||||
github.com/fsnotify/fsnotify v1.7.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/influxdata/influxdb-client-go/v2 v2.9.1
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0
|
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1
|
github.com/nats-io/nats.go v1.16.0
|
||||||
github.com/nats-io/nats.go v1.39.0
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/prometheus/client_golang v1.20.5
|
github.com/stmcginnis/gofish v0.13.0
|
||||||
github.com/stmcginnis/gofish v0.15.0
|
github.com/tklauser/go-sysconf v0.3.10
|
||||||
github.com/tklauser/go-sysconf v0.3.13
|
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e
|
||||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1
|
|
||||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f
|
|
||||||
golang.org/x/sys v0.30.0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ClusterCockpit/cc-backend v1.4.2 // indirect
|
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0 // indirect
|
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/expr-lang/expr v1.17.0 // indirect
|
github.com/deepmap/oapi-codegen v1.11.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/klauspost/compress v1.17.9 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.9 // indirect
|
github.com/nats-io/nats-server/v2 v2.8.4 // indirect
|
||||||
|
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/oapi-codegen/runtime v1.1.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.55.0 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
|
|
||||||
github.com/shopspring/decimal v1.3.1 // indirect
|
github.com/shopspring/decimal v1.3.1 // indirect
|
||||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||||
golang.org/x/crypto v0.35.0 // indirect
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
|
||||||
golang.org/x/net v0.36.0 // indirect
|
golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect
|
||||||
google.golang.org/protobuf v1.35.2 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
695
go.sum
695
go.sum
@@ -1,139 +1,624 @@
|
|||||||
github.com/ClusterCockpit/cc-backend v1.4.2 h1:kTOzqkh9N0564N9nqQThnSs7TAfg8RLgvSm00e5HtIc=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/ClusterCockpit/cc-backend v1.4.2/go.mod h1:g8TNHXe4AXej26snu2//jO3mUF980elT93iV/k11O/c=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/ClusterCockpit/cc-lib v0.1.0-beta.1 h1:dz9j0g2cod8+SMDjuoIY6ISpiHHeekhX6yQaeiwiwJw=
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
github.com/ClusterCockpit/cc-lib v0.1.0-beta.1/go.mod h1:kXMskla1i5ZSfXW0vVRIHgGeXMU5zu2PzYOYnUaOr80=
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
github.com/ClusterCockpit/cc-lib v0.1.1 h1:AXZWYUzgTaE/WdxLNSWPR7FJoA5WlzvYZxw4gIw3gNw=
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
github.com/ClusterCockpit/cc-lib v0.1.1/go.mod h1:SHKcWW/+kN+pcofAtHJFxvmx1FV0VIJuQv5PuT0HDcc=
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0 h1:zP5DOu99GmErW0tCDf0gcLrlWt42RQ9dpoONEOh4cI0=
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
github.com/ClusterCockpit/cc-units v0.4.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
|
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||||
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/ClusterCockpit/cc-units v0.3.0 h1:JEKgEyvN4GABheKIReW2siDXgpYf2zf4STXV2ip418Y=
|
||||||
|
github.com/ClusterCockpit/cc-units v0.3.0/go.mod h1:3S3PAhAayS3pbgcT4q9Vn9VJw22Op51X0YimtG77zBw=
|
||||||
github.com/ClusterCockpit/go-rocm-smi v0.3.0 h1:1qZnSpG7/NyLtc7AjqnUL9Jb8xtqG1nMVgp69rJfaR8=
|
github.com/ClusterCockpit/go-rocm-smi v0.3.0 h1:1qZnSpG7/NyLtc7AjqnUL9Jb8xtqG1nMVgp69rJfaR8=
|
||||||
github.com/ClusterCockpit/go-rocm-smi v0.3.0/go.mod h1:+I3UMeX3OlizXDf1WpGD43W4KGZZGVSGmny6rTeOnWA=
|
github.com/ClusterCockpit/go-rocm-smi v0.3.0/go.mod h1:+I3UMeX3OlizXDf1WpGD43W4KGZZGVSGmny6rTeOnWA=
|
||||||
|
github.com/NVIDIA/go-nvml v0.11.6-0 h1:tugQzmaX84Y/6+03wZ/MAgcpfSKDkvkAWeuxFNLHmxY=
|
||||||
github.com/NVIDIA/go-nvml v0.11.6-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
|
github.com/NVIDIA/go-nvml v0.11.6-0/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
|
||||||
github.com/NVIDIA/go-nvml v0.12.0-2 h1:Sg239yy7jmopu/cuvYauoMj9fOpcGMngxVxxS1EBXeY=
|
github.com/PaesslerAG/gval v1.2.0 h1:DA7PsxmtzlUU4bYxV35MKp9KDDVWcrJJRhlaCohMhsM=
|
||||||
github.com/NVIDIA/go-nvml v0.12.0-2/go.mod h1:7ruy85eOM73muOc/I37euONSwEyFqZsv5ED9AogD4G0=
|
github.com/PaesslerAG/gval v1.2.0/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
|
||||||
github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E=
|
|
||||||
github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac=
|
|
||||||
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
|
github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI=
|
||||||
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
|
github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8=
|
||||||
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
|
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||||
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
|
||||||
github.com/expr-lang/expr v1.17.0 h1:+vpszOyzKLQXC9VF+wA8cVA0tlA984/Wabc/1hF9Whg=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||||
github.com/expr-lang/expr v1.17.0/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A=
|
||||||
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM=
|
||||||
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q=
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||||
|
github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
|
||||||
|
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
|
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
|
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||||
|
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||||
|
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||||
|
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||||
|
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||||
|
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||||
|
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.9.1 h1:5kbH226fmmiV0MMTs7a8L7/ECCKdJWBi1QZNNv4/TkI=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.9.1/go.mod h1:x7Jo5UHHl+w8wu8UnGiNobDDHygojXwJX4mx7rXGKMk=
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY=
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY=
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||||
github.com/nats-io/nats.go v1.39.0 h1:2/yg2JQjiYYKLwDuBzV0FbB2sIV+eFNkEevlRi4n9lI=
|
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
|
||||||
github.com/nats-io/nats.go v1.39.0/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
|
||||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
|
||||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
|
||||||
|
github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
|
||||||
|
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
|
||||||
|
github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
|
||||||
|
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
|
github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
|
||||||
|
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||||
|
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||||
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4=
|
||||||
|
github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4=
|
||||||
|
github.com/nats-io/nats.go v1.16.0 h1:zvLE7fGBQYW6MWaFaRdsgm9qT39PJDQoju+DS8KsO1g=
|
||||||
|
github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||||
|
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||||
|
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4=
|
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
|
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||||
|
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||||
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/stmcginnis/gofish v0.15.0 h1:8TG41+lvJk/0Nf8CIIYErxbMlQUy80W0JFRZP3Ld82A=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/stmcginnis/gofish v0.15.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
github.com/stmcginnis/gofish v0.13.0 h1:qq6q3yNt9vw7ZuJxiw87hq9+BdPLsuRQBwl+XoZSz60=
|
||||||
|
github.com/stmcginnis/gofish v0.13.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||||
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
|
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||||
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
|
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1 h1:P7S/GeHBAFEZIYp0ePPs2kHXoazz8q2KsyxHyQVGCJg=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
golang.design/x/thread v0.0.0-20210122121316-335e9adffdf1/go.mod h1:9CWpnTUmlQkfdpdutA1nNf4iE5lAVt3QZOu0Z6hahBE=
|
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f h1:oFMYAjX0867ZD2jcNiLBrI9BdpmEkvPyi5YrBGXbamg=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
golang.org/x/exp v0.0.0-20250215185904-eff6e970281f/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/sys v0.0.0-20210122093101-04d7465088b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
|
||||||
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
|
||||||
|
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
|
||||||
|
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w=
|
||||||
|
golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||||
|
|
||||||
"github.com/PaesslerAG/gval"
|
"github.com/PaesslerAG/gval"
|
||||||
@@ -31,14 +31,14 @@ type metricAggregator struct {
|
|||||||
functions []*MetricAggregatorIntervalConfig
|
functions []*MetricAggregatorIntervalConfig
|
||||||
constants map[string]interface{}
|
constants map[string]interface{}
|
||||||
language gval.Language
|
language gval.Language
|
||||||
output chan lp.CCMessage
|
output chan lp.CCMetric
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricAggregator interface {
|
type MetricAggregator interface {
|
||||||
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
||||||
DeleteAggregation(name string) error
|
DeleteAggregation(name string) error
|
||||||
Init(output chan lp.CCMessage) error
|
Init(output chan lp.CCMetric) error
|
||||||
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage)
|
Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric)
|
||||||
}
|
}
|
||||||
|
|
||||||
var metricCacheLanguage = gval.NewLanguage(
|
var metricCacheLanguage = gval.NewLanguage(
|
||||||
@@ -74,7 +74,7 @@ var evaluables = struct {
|
|||||||
mapping: make(map[string]gval.Evaluable),
|
mapping: make(map[string]gval.Evaluable),
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *metricAggregator) Init(output chan lp.CCMessage) error {
|
func (c *metricAggregator) Init(output chan lp.CCMetric) error {
|
||||||
c.output = output
|
c.output = output
|
||||||
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
|
c.functions = make([]*MetricAggregatorIntervalConfig, 0)
|
||||||
c.constants = make(map[string]interface{})
|
c.constants = make(map[string]interface{})
|
||||||
@@ -112,7 +112,7 @@ func (c *metricAggregator) Init(output chan lp.CCMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMessage) {
|
func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics []lp.CCMetric) {
|
||||||
vars := make(map[string]interface{})
|
vars := make(map[string]interface{})
|
||||||
for k, v := range c.constants {
|
for k, v := range c.constants {
|
||||||
vars[k] = v
|
vars[k] = v
|
||||||
@@ -121,13 +121,8 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
|||||||
vars["endtime"] = endtime
|
vars["endtime"] = endtime
|
||||||
for _, f := range c.functions {
|
for _, f := range c.functions {
|
||||||
cclog.ComponentDebug("MetricCache", "COLLECT", f.Name, "COND", f.Condition)
|
cclog.ComponentDebug("MetricCache", "COLLECT", f.Name, "COND", f.Condition)
|
||||||
var valuesFloat64 []float64
|
values := make([]float64, 0)
|
||||||
var valuesFloat32 []float32
|
matches := make([]lp.CCMetric, 0)
|
||||||
var valuesInt []int
|
|
||||||
var valuesInt32 []int32
|
|
||||||
var valuesInt64 []int64
|
|
||||||
var valuesBool []bool
|
|
||||||
matches := make([]lp.CCMessage, 0)
|
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
vars["metric"] = m
|
vars["metric"] = m
|
||||||
//value, err := gval.Evaluate(f.Condition, vars, c.language)
|
//value, err := gval.Evaluate(f.Condition, vars, c.language)
|
||||||
@@ -141,17 +136,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
|||||||
if valid {
|
if valid {
|
||||||
switch x := v.(type) {
|
switch x := v.(type) {
|
||||||
case float64:
|
case float64:
|
||||||
valuesFloat64 = append(valuesFloat64, x)
|
values = append(values, x)
|
||||||
case float32:
|
case float32:
|
||||||
valuesFloat32 = append(valuesFloat32, x)
|
|
||||||
case int:
|
case int:
|
||||||
valuesInt = append(valuesInt, x)
|
|
||||||
case int32:
|
|
||||||
valuesInt32 = append(valuesInt32, x)
|
|
||||||
case int64:
|
case int64:
|
||||||
valuesInt64 = append(valuesInt64, x)
|
values = append(values, float64(x))
|
||||||
case bool:
|
case bool:
|
||||||
valuesBool = append(valuesBool, x)
|
if x {
|
||||||
|
values = append(values, float64(1.0))
|
||||||
|
} else {
|
||||||
|
values = append(values, float64(0.0))
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
cclog.ComponentError("MetricCache", "COLLECT ADD VALUE", v, "FAILED")
|
cclog.ComponentError("MetricCache", "COLLECT ADD VALUE", v, "FAILED")
|
||||||
}
|
}
|
||||||
@@ -160,63 +155,17 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(vars, "metric")
|
delete(vars, "metric")
|
||||||
|
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function)
|
||||||
// Check, that only values of one type were collected
|
vars["values"] = values
|
||||||
countValueTypes := 0
|
|
||||||
if len(valuesFloat64) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if len(valuesFloat32) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if len(valuesInt) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if len(valuesInt32) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if len(valuesInt64) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if len(valuesBool) > 0 {
|
|
||||||
countValueTypes += 1
|
|
||||||
}
|
|
||||||
if countValueTypes > 1 {
|
|
||||||
cclog.ComponentError("MetricCache", "Collected values of different types")
|
|
||||||
}
|
|
||||||
|
|
||||||
var len_values int
|
|
||||||
switch {
|
|
||||||
case len(valuesFloat64) > 0:
|
|
||||||
vars["values"] = valuesFloat64
|
|
||||||
len_values = len(valuesFloat64)
|
|
||||||
case len(valuesFloat32) > 0:
|
|
||||||
vars["values"] = valuesFloat32
|
|
||||||
len_values = len(valuesFloat32)
|
|
||||||
case len(valuesInt) > 0:
|
|
||||||
vars["values"] = valuesInt
|
|
||||||
len_values = len(valuesInt)
|
|
||||||
case len(valuesInt32) > 0:
|
|
||||||
vars["values"] = valuesInt32
|
|
||||||
len_values = len(valuesInt32)
|
|
||||||
case len(valuesInt64) > 0:
|
|
||||||
vars["values"] = valuesInt64
|
|
||||||
len_values = len(valuesInt64)
|
|
||||||
case len(valuesBool) > 0:
|
|
||||||
vars["values"] = valuesBool
|
|
||||||
len_values = len(valuesBool)
|
|
||||||
}
|
|
||||||
cclog.ComponentDebug("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function)
|
|
||||||
|
|
||||||
vars["metrics"] = matches
|
vars["metrics"] = matches
|
||||||
if len_values > 0 {
|
if len(values) > 0 {
|
||||||
value, err := gval.Evaluate(f.Function, vars, c.language)
|
value, err := gval.Evaluate(f.Function, vars, c.language)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len_values, "CALC", f.Function, ":", err.Error())
|
cclog.ComponentError("MetricCache", "EVALUATE", f.Name, "METRICS", len(values), "CALC", f.Function, ":", err.Error())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
copy_tags := func(tags map[string]string, metrics []lp.CCMessage) map[string]string {
|
copy_tags := func(tags map[string]string, metrics []lp.CCMetric) map[string]string {
|
||||||
out := make(map[string]string)
|
out := make(map[string]string)
|
||||||
for key, value := range tags {
|
for key, value := range tags {
|
||||||
switch value {
|
switch value {
|
||||||
@@ -233,7 +182,7 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
|||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
copy_meta := func(meta map[string]string, metrics []lp.CCMessage) map[string]string {
|
copy_meta := func(meta map[string]string, metrics []lp.CCMetric) map[string]string {
|
||||||
out := make(map[string]string)
|
out := make(map[string]string)
|
||||||
for key, value := range meta {
|
for key, value := range meta {
|
||||||
switch value {
|
switch value {
|
||||||
@@ -253,18 +202,18 @@ func (c *metricAggregator) Eval(starttime time.Time, endtime time.Time, metrics
|
|||||||
tags := copy_tags(f.Tags, matches)
|
tags := copy_tags(f.Tags, matches)
|
||||||
meta := copy_meta(f.Meta, matches)
|
meta := copy_meta(f.Meta, matches)
|
||||||
|
|
||||||
var m lp.CCMessage
|
var m lp.CCMetric
|
||||||
switch t := value.(type) {
|
switch t := value.(type) {
|
||||||
case float64:
|
case float64:
|
||||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||||
case float32:
|
case float32:
|
||||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||||
case int:
|
case int:
|
||||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||||
case int64:
|
case int64:
|
||||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||||
case string:
|
case string:
|
||||||
m, err = lp.NewMessage(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
m, err = lp.New(f.Name, tags, meta, map[string]interface{}{"value": t}, starttime)
|
||||||
default:
|
default:
|
||||||
cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name)
|
cclog.ComponentError("MetricCache", "Gval returned invalid type", t, "skipping metric", f.Name)
|
||||||
}
|
}
|
||||||
@@ -367,7 +316,7 @@ func EvalBoolCondition(condition string, params map[string]interface{}) (bool, e
|
|||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func EvalFloat64Condition(condition string, params map[string]float64) (float64, error) {
|
func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
|
||||||
evaluables.mutex.Lock()
|
evaluables.mutex.Lock()
|
||||||
evaluable, ok := evaluables.mapping[condition]
|
evaluable, ok := evaluables.mapping[condition]
|
||||||
evaluables.mutex.Unlock()
|
evaluables.mutex.Unlock()
|
||||||
@@ -389,7 +338,7 @@ func EvalFloat64Condition(condition string, params map[string]float64) (float64,
|
|||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAggregator(output chan lp.CCMessage) (MetricAggregator, error) {
|
func NewAggregator(output chan lp.CCMetric) (MetricAggregator, error) {
|
||||||
a := new(metricAggregator)
|
a := new(metricAggregator)
|
||||||
err := a.Init(output)
|
err := a.Init(output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -3,11 +3,12 @@ package metricAggregator
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/exp/slices"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
|
||||||
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
topo "github.com/ClusterCockpit/cc-metric-collector/pkg/ccTopology"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,155 +16,149 @@ import (
|
|||||||
* Arithmetic functions on value arrays
|
* Arithmetic functions on value arrays
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func sumAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
|
// Sum up values
|
||||||
if len(values) == 0 {
|
func sumfunc(args ...interface{}) (interface{}, error) {
|
||||||
return 0.0, errors.New("sum function requires at least one argument")
|
s := 0.0
|
||||||
|
values, ok := args[0].([]float64)
|
||||||
|
if ok {
|
||||||
|
cclog.ComponentDebug("MetricCache", "SUM FUNC START")
|
||||||
|
for _, x := range values {
|
||||||
|
s += x
|
||||||
}
|
}
|
||||||
var sum T
|
cclog.ComponentDebug("MetricCache", "SUM FUNC END", s)
|
||||||
for _, value := range values {
|
} else {
|
||||||
sum += value
|
cclog.ComponentDebug("MetricCache", "SUM FUNC CAST FAILED")
|
||||||
}
|
}
|
||||||
return sum, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sum up values
|
// Get the minimum value
|
||||||
func sumfunc(args interface{}) (interface{}, error) {
|
func minfunc(args ...interface{}) (interface{}, error) {
|
||||||
|
var err error = nil
|
||||||
var err error
|
switch values := args[0].(type) {
|
||||||
switch values := args.(type) {
|
|
||||||
case []float64:
|
case []float64:
|
||||||
return sumAnyType(values)
|
var s float64 = math.MaxFloat64
|
||||||
|
for _, x := range values {
|
||||||
|
if x < s {
|
||||||
|
s = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
case []float32:
|
case []float32:
|
||||||
return sumAnyType(values)
|
var s float32 = math.MaxFloat32
|
||||||
|
for _, x := range values {
|
||||||
|
if x < s {
|
||||||
|
s = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
case []int:
|
case []int:
|
||||||
return sumAnyType(values)
|
var s int = int(math.MaxInt32)
|
||||||
|
for _, x := range values {
|
||||||
|
if x < s {
|
||||||
|
s = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
case []int64:
|
case []int64:
|
||||||
return sumAnyType(values)
|
var s int64 = math.MaxInt64
|
||||||
|
for _, x := range values {
|
||||||
|
if x < s {
|
||||||
|
s = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
case []int32:
|
case []int32:
|
||||||
return sumAnyType(values)
|
var s int32 = math.MaxInt32
|
||||||
|
for _, x := range values {
|
||||||
|
if x < s {
|
||||||
|
s = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
default:
|
default:
|
||||||
err = errors.New("function 'sum' only on list of values (float64, float32, int, int32, int64)")
|
err = errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0.0, err
|
return 0.0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func minAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return 0.0, errors.New("min function requires at least one argument")
|
|
||||||
}
|
|
||||||
return slices.Min(values), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the minimum value
|
|
||||||
func minfunc(args interface{}) (interface{}, error) {
|
|
||||||
switch values := args.(type) {
|
|
||||||
case []float64:
|
|
||||||
return minAnyType(values)
|
|
||||||
case []float32:
|
|
||||||
return minAnyType(values)
|
|
||||||
case []int:
|
|
||||||
return minAnyType(values)
|
|
||||||
case []int64:
|
|
||||||
return minAnyType(values)
|
|
||||||
case []int32:
|
|
||||||
return minAnyType(values)
|
|
||||||
default:
|
|
||||||
return 0.0, errors.New("function 'min' only on list of values (float64, float32, int, int32, int64)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func avgAnyType[T float64 | float32 | int | int32 | int64](values []T) (float64, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return 0.0, errors.New("average function requires at least one argument")
|
|
||||||
}
|
|
||||||
sum, err := sumAnyType[T](values)
|
|
||||||
return float64(sum) / float64(len(values)), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the average or mean value
|
// Get the average or mean value
|
||||||
func avgfunc(args interface{}) (interface{}, error) {
|
func avgfunc(args ...interface{}) (interface{}, error) {
|
||||||
switch values := args.(type) {
|
switch values := args[0].(type) {
|
||||||
case []float64:
|
case []float64:
|
||||||
return avgAnyType(values)
|
var s float64 = 0
|
||||||
|
for _, x := range values {
|
||||||
|
s += x
|
||||||
|
}
|
||||||
|
return s / float64(len(values)), nil
|
||||||
case []float32:
|
case []float32:
|
||||||
return avgAnyType(values)
|
var s float32 = 0
|
||||||
|
for _, x := range values {
|
||||||
|
s += x
|
||||||
|
}
|
||||||
|
return s / float32(len(values)), nil
|
||||||
case []int:
|
case []int:
|
||||||
return avgAnyType(values)
|
var s int = 0
|
||||||
|
for _, x := range values {
|
||||||
|
s += x
|
||||||
|
}
|
||||||
|
return s / len(values), nil
|
||||||
case []int64:
|
case []int64:
|
||||||
return avgAnyType(values)
|
var s int64 = 0
|
||||||
case []int32:
|
for _, x := range values {
|
||||||
return avgAnyType(values)
|
s += x
|
||||||
default:
|
|
||||||
return 0.0, errors.New("function 'average' only on list of values (float64, float32, int, int32, int64)")
|
|
||||||
}
|
}
|
||||||
}
|
return s / int64(len(values)), nil
|
||||||
|
|
||||||
func maxAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return 0.0, errors.New("max function requires at least one argument")
|
|
||||||
}
|
}
|
||||||
return slices.Max(values), nil
|
return 0.0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the maximum value
|
// Get the maximum value
|
||||||
func maxfunc(args interface{}) (interface{}, error) {
|
func maxfunc(args ...interface{}) (interface{}, error) {
|
||||||
switch values := args.(type) {
|
s := 0.0
|
||||||
case []float64:
|
values, ok := args[0].([]float64)
|
||||||
return maxAnyType(values)
|
if ok {
|
||||||
case []float32:
|
for _, x := range values {
|
||||||
return maxAnyType(values)
|
if x > s {
|
||||||
case []int:
|
s = x
|
||||||
return maxAnyType(values)
|
|
||||||
case []int64:
|
|
||||||
return maxAnyType(values)
|
|
||||||
case []int32:
|
|
||||||
return maxAnyType(values)
|
|
||||||
default:
|
|
||||||
return 0.0, errors.New("function 'max' only on list of values (float64, float32, int, int32, int64)")
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func medianAnyType[T float64 | float32 | int | int32 | int64](values []T) (T, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return 0.0, errors.New("median function requires at least one argument")
|
|
||||||
}
|
}
|
||||||
slices.Sort(values)
|
|
||||||
var median T
|
|
||||||
if midPoint := len(values) % 2; midPoint == 0 {
|
|
||||||
median = (values[midPoint-1] + values[midPoint]) / 2
|
|
||||||
} else {
|
|
||||||
median = values[midPoint]
|
|
||||||
}
|
}
|
||||||
return median, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the median value
|
// Get the median value
|
||||||
func medianfunc(args interface{}) (interface{}, error) {
|
func medianfunc(args ...interface{}) (interface{}, error) {
|
||||||
switch values := args.(type) {
|
switch values := args[0].(type) {
|
||||||
case []float64:
|
case []float64:
|
||||||
return medianAnyType(values)
|
sort.Float64s(values)
|
||||||
case []float32:
|
return values[len(values)/2], nil
|
||||||
return medianAnyType(values)
|
// case []float32:
|
||||||
|
// sort.Float64s(values)
|
||||||
|
// return values[len(values)/2], nil
|
||||||
case []int:
|
case []int:
|
||||||
return medianAnyType(values)
|
sort.Ints(values)
|
||||||
case []int64:
|
return values[len(values)/2], nil
|
||||||
return medianAnyType(values)
|
|
||||||
case []int32:
|
// case []int64:
|
||||||
return medianAnyType(values)
|
// sort.Ints(values)
|
||||||
default:
|
// return values[len(values)/2], nil
|
||||||
return 0.0, errors.New("function 'median' only on list of values (float64, float32, int, int32, int64)")
|
// case []int32:
|
||||||
|
// sort.Ints(values)
|
||||||
|
// return values[len(values)/2], nil
|
||||||
}
|
}
|
||||||
|
return 0.0, errors.New("function 'median()' only on lists of type float64 and int")
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get number of values in list. Returns always an int
|
* Get number of values in list. Returns always an int
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func lenfunc(args interface{}) (interface{}, error) {
|
func lenfunc(args ...interface{}) (interface{}, error) {
|
||||||
var err error = nil
|
var err error = nil
|
||||||
var length int = 0
|
var length int = 0
|
||||||
switch values := args.(type) {
|
switch values := args[0].(type) {
|
||||||
case []float64:
|
case []float64:
|
||||||
length = len(values)
|
length = len(values)
|
||||||
case []float32:
|
case []float32:
|
||||||
@@ -248,8 +243,8 @@ func matchfunc(args ...interface{}) (interface{}, error) {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// for a given cpuid, it returns the core id
|
// for a given cpuid, it returns the core id
|
||||||
func getCpuCoreFunc(args interface{}) (interface{}, error) {
|
func getCpuCoreFunc(args ...interface{}) (interface{}, error) {
|
||||||
switch cpuid := args.(type) {
|
switch cpuid := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
return topo.GetHwthreadCore(cpuid), nil
|
return topo.GetHwthreadCore(cpuid), nil
|
||||||
}
|
}
|
||||||
@@ -257,8 +252,8 @@ func getCpuCoreFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given cpuid, it returns the socket id
|
// for a given cpuid, it returns the socket id
|
||||||
func getCpuSocketFunc(args interface{}) (interface{}, error) {
|
func getCpuSocketFunc(args ...interface{}) (interface{}, error) {
|
||||||
switch cpuid := args.(type) {
|
switch cpuid := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
return topo.GetHwthreadSocket(cpuid), nil
|
return topo.GetHwthreadSocket(cpuid), nil
|
||||||
}
|
}
|
||||||
@@ -266,8 +261,8 @@ func getCpuSocketFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given cpuid, it returns the id of the NUMA node
|
// for a given cpuid, it returns the id of the NUMA node
|
||||||
func getCpuNumaDomainFunc(args interface{}) (interface{}, error) {
|
func getCpuNumaDomainFunc(args ...interface{}) (interface{}, error) {
|
||||||
switch cpuid := args.(type) {
|
switch cpuid := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
return topo.GetHwthreadNumaDomain(cpuid), nil
|
return topo.GetHwthreadNumaDomain(cpuid), nil
|
||||||
}
|
}
|
||||||
@@ -275,8 +270,8 @@ func getCpuNumaDomainFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given cpuid, it returns the id of the CPU die
|
// for a given cpuid, it returns the id of the CPU die
|
||||||
func getCpuDieFunc(args interface{}) (interface{}, error) {
|
func getCpuDieFunc(args ...interface{}) (interface{}, error) {
|
||||||
switch cpuid := args.(type) {
|
switch cpuid := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
return topo.GetHwthreadDie(cpuid), nil
|
return topo.GetHwthreadDie(cpuid), nil
|
||||||
}
|
}
|
||||||
@@ -284,13 +279,13 @@ func getCpuDieFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given core id, it returns the list of cpuids
|
// for a given core id, it returns the list of cpuids
|
||||||
func getCpuListOfCoreFunc(args interface{}) (interface{}, error) {
|
func getCpuListOfCoreFunc(args ...interface{}) (interface{}, error) {
|
||||||
cpulist := make([]int, 0)
|
cpulist := make([]int, 0)
|
||||||
switch in := args.(type) {
|
switch in := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
for _, c := range topo.CpuData() {
|
for _, c := range topo.CpuData() {
|
||||||
if c.Core == in {
|
if c.Core == in {
|
||||||
cpulist = append(cpulist, c.CpuID)
|
cpulist = append(cpulist, c.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -298,13 +293,13 @@ func getCpuListOfCoreFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given socket id, it returns the list of cpuids
|
// for a given socket id, it returns the list of cpuids
|
||||||
func getCpuListOfSocketFunc(args interface{}) (interface{}, error) {
|
func getCpuListOfSocketFunc(args ...interface{}) (interface{}, error) {
|
||||||
cpulist := make([]int, 0)
|
cpulist := make([]int, 0)
|
||||||
switch in := args.(type) {
|
switch in := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
for _, c := range topo.CpuData() {
|
for _, c := range topo.CpuData() {
|
||||||
if c.Socket == in {
|
if c.Socket == in {
|
||||||
cpulist = append(cpulist, c.CpuID)
|
cpulist = append(cpulist, c.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -312,13 +307,13 @@ func getCpuListOfSocketFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given id of a NUMA domain, it returns the list of cpuids
|
// for a given id of a NUMA domain, it returns the list of cpuids
|
||||||
func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) {
|
func getCpuListOfNumaDomainFunc(args ...interface{}) (interface{}, error) {
|
||||||
cpulist := make([]int, 0)
|
cpulist := make([]int, 0)
|
||||||
switch in := args.(type) {
|
switch in := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
for _, c := range topo.CpuData() {
|
for _, c := range topo.CpuData() {
|
||||||
if c.NumaDomain == in {
|
if c.Numadomain == in {
|
||||||
cpulist = append(cpulist, c.CpuID)
|
cpulist = append(cpulist, c.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -326,13 +321,13 @@ func getCpuListOfNumaDomainFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for a given CPU die id, it returns the list of cpuids
|
// for a given CPU die id, it returns the list of cpuids
|
||||||
func getCpuListOfDieFunc(args interface{}) (interface{}, error) {
|
func getCpuListOfDieFunc(args ...interface{}) (interface{}, error) {
|
||||||
cpulist := make([]int, 0)
|
cpulist := make([]int, 0)
|
||||||
switch in := args.(type) {
|
switch in := args[0].(type) {
|
||||||
case int:
|
case int:
|
||||||
for _, c := range topo.CpuData() {
|
for _, c := range topo.CpuData() {
|
||||||
if c.Die == in {
|
if c.Die == in {
|
||||||
cpulist = append(cpulist, c.CpuID)
|
cpulist = append(cpulist, c.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -340,7 +335,7 @@ func getCpuListOfDieFunc(args interface{}) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wrapper function to get a list of all cpuids of the node
|
// wrapper function to get a list of all cpuids of the node
|
||||||
func getCpuListOfNode() (interface{}, error) {
|
func getCpuListOfNode(args ...interface{}) (interface{}, error) {
|
||||||
return topo.HwthreadList(), nil
|
return topo.HwthreadList(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,15 @@
|
|||||||
# CC Metric Router
|
# CC Metric Router
|
||||||
|
|
||||||
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMessages](https://pkg.go.dev/github.com/ClusterCockpit/cc-energy-manager@v0.0.0-20240919152819-92a17f2da4f7/pkg/cc-message.
|
The CCMetric router sits in between the collectors and the sinks and can be used to add and remove tags to/from traversing [CCMetrics](../ccMetric/README.md).
|
||||||
|
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) with option `process_messages`.
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"num_cache_intervals" : 1,
|
"num_cache_intervals" : 1,
|
||||||
"interval_timestamp" : true,
|
"interval_timestamp" : true,
|
||||||
"hostname_tag" : "hostname",
|
"hostname_tag" : "hostname",
|
||||||
"max_forward" : 50,
|
"max_forward" : 50,
|
||||||
"process_messages": {
|
|
||||||
"see": "pkg/messageProcessor/README.md"
|
|
||||||
},
|
|
||||||
"add_tags" : [
|
"add_tags" : [
|
||||||
{
|
{
|
||||||
"key" : "cluster",
|
"key" : "cluster",
|
||||||
@@ -69,8 +63,6 @@ The CCMetric router sits in between the collectors and the sinks and can be used
|
|||||||
|
|
||||||
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
|
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
|
||||||
|
|
||||||
**Note**: Use the [message processor configuration](../../pkg/messageProcessor/README.md) (option `process_messages`) instead of `add_tags`, `delete_tags`, `drop_metrics`, `drop_metrics_if`, `rename_metrics`, `normalize_units` and `change_unit_prefix`. These options are deprecated and will be removed in future versions. Until then, they are added to the message processor.
|
|
||||||
|
|
||||||
# Processing order in the router
|
# Processing order in the router
|
||||||
|
|
||||||
- Add the `hostname_tag` tag (if sent by collectors or cache)
|
- Add the `hostname_tag` tag (if sent by collectors or cache)
|
||||||
@@ -104,8 +96,6 @@ Every time the router receives a metric through any of the channels, it tries to
|
|||||||
|
|
||||||
# The `rename_metrics` option
|
# The `rename_metrics` option
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
|
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -117,8 +107,6 @@ In the ClusterCockpit world we specified a set of standard metrics. Since some c
|
|||||||
|
|
||||||
# Conditional manipulation of tags (`add_tags` and `del_tags`)
|
# Conditional manipulation of tags (`add_tags` and `del_tags`)
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
Common config format:
|
Common config format:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -130,8 +118,6 @@ Common config format:
|
|||||||
|
|
||||||
## The `del_tags` option
|
## The `del_tags` option
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
|
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
|
||||||
|
|
||||||
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
|
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
|
||||||
@@ -143,8 +129,6 @@ Never delete tags:
|
|||||||
|
|
||||||
## The `add_tags` option
|
## The `add_tags` option
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
|
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
|
||||||
|
|
||||||
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
|
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
|
||||||
@@ -186,8 +170,6 @@ In some cases, you want to drop a metric and don't get it forwarded to the sinks
|
|||||||
|
|
||||||
## The `drop_metrics` section
|
## The `drop_metrics` section
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
|
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -203,8 +185,6 @@ The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
|
|||||||
|
|
||||||
## The `drop_metrics_if` section
|
## The `drop_metrics_if` section
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
|
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -220,22 +200,15 @@ The first line is comparable with the example in `drop_metrics`, it drops all me
|
|||||||
# Manipulating the metric units
|
# Manipulating the metric units
|
||||||
|
|
||||||
## The `normalize_units` option
|
## The `normalize_units` option
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
|
|
||||||
The cc-metric-collector tries to read the data from the system as it is reported. If available, it tries to read the metric unit from the system as well (e.g. from `/proc/meminfo`). The problem is that, depending on the source, the metric units are named differently. Just think about `byte`, `Byte`, `B`, `bytes`, ...
|
The cc-metric-collector tries to read the data from the system as it is reported. If available, it tries to read the metric unit from the system as well (e.g. from `/proc/meminfo`). The problem is that, depending on the source, the metric units are named differently. Just think about `byte`, `Byte`, `B`, `bytes`, ...
|
||||||
The [cc-units](https://github.com/ClusterCockpit/cc-units) package provides us a normalization option to use the same metric unit name for all metrics. It this option is set to true, all `unit` meta tags are normalized.
|
The [cc-units](https://github.com/ClusterCockpit/cc-units) package provides us a normalization option to use the same metric unit name for all metrics. It this option is set to true, all `unit` meta tags are normalized.
|
||||||
|
|
||||||
## The `change_unit_prefix` section
|
## The `change_unit_prefix` section
|
||||||
|
|
||||||
__deprecated__
|
|
||||||
|
|
||||||
It is often the case that metrics are reported by the system using a rather outdated unit prefix (like `/proc/meminfo` still uses kByte despite current memory sizes are in the GByte range). If you want to change the prefix of a unit, you can do that with the help of [cc-units](https://github.com/ClusterCockpit/cc-units). The setting works on the metric name and requires the new prefix for the metric. The cc-units package determines the scaling factor.
|
It is often the case that metrics are reported by the system using a rather outdated unit prefix (like `/proc/meminfo` still uses kByte despite current memory sizes are in the GByte range). If you want to change the prefix of a unit, you can do that with the help of [cc-units](https://github.com/ClusterCockpit/cc-units). The setting works on the metric name and requires the new prefix for the metric. The cc-units package determines the scaling factor.
|
||||||
|
|
||||||
# Aggregate metric values of the current interval with the `interval_aggregates` option
|
# Aggregate metric values of the current interval with the `interval_aggregates` option
|
||||||
|
|
||||||
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0 and is **experimental**
|
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
|
||||||
|
|
||||||
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
|
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ type metricCachePeriod struct {
|
|||||||
stopstamp time.Time
|
stopstamp time.Time
|
||||||
numMetrics int
|
numMetrics int
|
||||||
sizeMetrics int
|
sizeMetrics int
|
||||||
metrics []lp.CCMessage
|
metrics []lp.CCMetric
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metric cache data structure
|
// Metric cache data structure
|
||||||
@@ -29,21 +29,21 @@ type metricCache struct {
|
|||||||
ticker mct.MultiChanTicker
|
ticker mct.MultiChanTicker
|
||||||
tickchan chan time.Time
|
tickchan chan time.Time
|
||||||
done chan bool
|
done chan bool
|
||||||
output chan lp.CCMessage
|
output chan lp.CCMetric
|
||||||
aggEngine agg.MetricAggregator
|
aggEngine agg.MetricAggregator
|
||||||
}
|
}
|
||||||
|
|
||||||
type MetricCache interface {
|
type MetricCache interface {
|
||||||
Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
|
Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
|
||||||
Start()
|
Start()
|
||||||
Add(metric lp.CCMessage)
|
Add(metric lp.CCMetric)
|
||||||
GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage)
|
GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric)
|
||||||
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
AddAggregation(name, function, condition string, tags, meta map[string]string) error
|
||||||
DeleteAggregation(name string) error
|
DeleteAggregation(name string) error
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
|
func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
|
||||||
var err error = nil
|
var err error = nil
|
||||||
c.done = make(chan bool)
|
c.done = make(chan bool)
|
||||||
c.wg = wg
|
c.wg = wg
|
||||||
@@ -55,7 +55,7 @@ func (c *metricCache) Init(output chan lp.CCMessage, ticker mct.MultiChanTicker,
|
|||||||
p := new(metricCachePeriod)
|
p := new(metricCachePeriod)
|
||||||
p.numMetrics = 0
|
p.numMetrics = 0
|
||||||
p.sizeMetrics = 0
|
p.sizeMetrics = 0
|
||||||
p.metrics = make([]lp.CCMessage, 0)
|
p.metrics = make([]lp.CCMetric, 0)
|
||||||
c.intervals = append(c.intervals, p)
|
c.intervals = append(c.intervals, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,7 +124,7 @@ func (c *metricCache) Start() {
|
|||||||
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
|
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
|
||||||
// The intervals list is used as round-robin buffer and the metric list grows dynamically and
|
// The intervals list is used as round-robin buffer and the metric list grows dynamically and
|
||||||
// to avoid reallocations
|
// to avoid reallocations
|
||||||
func (c *metricCache) Add(metric lp.CCMessage) {
|
func (c *metricCache) Add(metric lp.CCMetric) {
|
||||||
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
|
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
p := c.intervals[c.curPeriod]
|
p := c.intervals[c.curPeriod]
|
||||||
@@ -153,10 +153,10 @@ func (c *metricCache) DeleteAggregation(name string) error {
|
|||||||
// Get all metrics of a interval. The index is the difference to the current interval, so index=0
|
// Get all metrics of a interval. The index is the difference to the current interval, so index=0
|
||||||
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
|
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
|
||||||
// is given (negative index, index larger than configured number of total intervals, ...)
|
// is given (negative index, index larger than configured number of total intervals, ...)
|
||||||
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage) {
|
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
|
||||||
var start time.Time = time.Now()
|
var start time.Time = time.Now()
|
||||||
var stop time.Time = time.Now()
|
var stop time.Time = time.Now()
|
||||||
var metrics []lp.CCMessage
|
var metrics []lp.CCMetric
|
||||||
if index >= 0 && index < c.numPeriods {
|
if index >= 0 && index < c.numPeriods {
|
||||||
pindex := c.curPeriod - index
|
pindex := c.curPeriod - index
|
||||||
if pindex < 0 {
|
if pindex < 0 {
|
||||||
@@ -168,10 +168,10 @@ func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMessage
|
|||||||
metrics = c.intervals[pindex].metrics
|
metrics = c.intervals[pindex].metrics
|
||||||
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
|
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
|
||||||
} else {
|
} else {
|
||||||
metrics = make([]lp.CCMessage, 0)
|
metrics = make([]lp.CCMetric, 0)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
metrics = make([]lp.CCMessage, 0)
|
metrics = make([]lp.CCMetric, 0)
|
||||||
}
|
}
|
||||||
return start, stop, metrics
|
return start, stop, metrics
|
||||||
}
|
}
|
||||||
@@ -182,7 +182,7 @@ func (c *metricCache) Close() {
|
|||||||
c.done <- true
|
c.done <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCache(output chan lp.CCMessage, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
|
func NewCache(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
|
||||||
c := new(metricCache)
|
c := new(metricCache)
|
||||||
err := c.Init(output, ticker, wg, numPeriods)
|
err := c.Init(output, ticker, wg, numPeriods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -2,18 +2,17 @@ package metricRouter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
|
||||||
lp "github.com/ClusterCockpit/cc-lib/ccMessage"
|
|
||||||
mp "github.com/ClusterCockpit/cc-lib/messageProcessor"
|
|
||||||
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
|
||||||
|
units "github.com/ClusterCockpit/cc-units"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ROUTER_MAX_FORWARD = 50
|
const ROUTER_MAX_FORWARD = 50
|
||||||
@@ -39,17 +38,16 @@ type metricRouterConfig struct {
|
|||||||
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
|
MaxForward int `json:"max_forward"` // Number of maximal forwarded metrics at one select
|
||||||
NormalizeUnits bool `json:"normalize_units"` // Check unit meta flag and normalize it using cc-units
|
NormalizeUnits bool `json:"normalize_units"` // Check unit meta flag and normalize it using cc-units
|
||||||
ChangeUnitPrefix map[string]string `json:"change_unit_prefix"` // Add prefix that should be applied to the metrics
|
ChangeUnitPrefix map[string]string `json:"change_unit_prefix"` // Add prefix that should be applied to the metrics
|
||||||
// dropMetrics map[string]bool // Internal map for O(1) lookup
|
dropMetrics map[string]bool // Internal map for O(1) lookup
|
||||||
MessageProcessor json.RawMessage `json:"process_messages,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metric router data structure
|
// Metric router data structure
|
||||||
type metricRouter struct {
|
type metricRouter struct {
|
||||||
hostname string // Hostname used in tags
|
hostname string // Hostname used in tags
|
||||||
coll_input chan lp.CCMessage // Input channel from CollectorManager
|
coll_input chan lp.CCMetric // Input channel from CollectorManager
|
||||||
recv_input chan lp.CCMessage // Input channel from ReceiveManager
|
recv_input chan lp.CCMetric // Input channel from ReceiveManager
|
||||||
cache_input chan lp.CCMessage // Input channel from MetricCache
|
cache_input chan lp.CCMetric // Input channel from MetricCache
|
||||||
outputs []chan lp.CCMessage // List of all output channels
|
outputs []chan lp.CCMetric // List of all output channels
|
||||||
done chan bool // channel to finish / stop metric router
|
done chan bool // channel to finish / stop metric router
|
||||||
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
|
wg *sync.WaitGroup // wait group for all goroutines in cc-metric-collector
|
||||||
timestamp time.Time // timestamp periodically updated by ticker each interval
|
timestamp time.Time // timestamp periodically updated by ticker each interval
|
||||||
@@ -58,15 +56,14 @@ type metricRouter struct {
|
|||||||
cache MetricCache // pointer to MetricCache
|
cache MetricCache // pointer to MetricCache
|
||||||
cachewg sync.WaitGroup // wait group for MetricCache
|
cachewg sync.WaitGroup // wait group for MetricCache
|
||||||
maxForward int // number of metrics to forward maximally in one iteration
|
maxForward int // number of metrics to forward maximally in one iteration
|
||||||
mp mp.MessageProcessor
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetricRouter access functions
|
// MetricRouter access functions
|
||||||
type MetricRouter interface {
|
type MetricRouter interface {
|
||||||
Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfig json.RawMessage) error
|
Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error
|
||||||
AddCollectorInput(input chan lp.CCMessage)
|
AddCollectorInput(input chan lp.CCMetric)
|
||||||
AddReceiverInput(input chan lp.CCMessage)
|
AddReceiverInput(input chan lp.CCMetric)
|
||||||
AddOutput(output chan lp.CCMessage)
|
AddOutput(output chan lp.CCMetric)
|
||||||
Start()
|
Start()
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
@@ -77,10 +74,10 @@ type MetricRouter interface {
|
|||||||
// * wait group synchronization (from variable wg)
|
// * wait group synchronization (from variable wg)
|
||||||
// * ticker (from variable ticker)
|
// * ticker (from variable ticker)
|
||||||
// * configuration (read from config file in variable routerConfigFile)
|
// * configuration (read from config file in variable routerConfigFile)
|
||||||
func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfig json.RawMessage) error {
|
func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) error {
|
||||||
r.outputs = make([]chan lp.CCMessage, 0)
|
r.outputs = make([]chan lp.CCMetric, 0)
|
||||||
r.done = make(chan bool)
|
r.done = make(chan bool)
|
||||||
r.cache_input = make(chan lp.CCMessage)
|
r.cache_input = make(chan lp.CCMetric)
|
||||||
r.wg = wg
|
r.wg = wg
|
||||||
r.ticker = ticker
|
r.ticker = ticker
|
||||||
r.config.MaxForward = ROUTER_MAX_FORWARD
|
r.config.MaxForward = ROUTER_MAX_FORWARD
|
||||||
@@ -95,7 +92,15 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
|
|||||||
// Drop domain part of host name
|
// Drop domain part of host name
|
||||||
r.hostname = strings.SplitN(hostname, `.`, 2)[0]
|
r.hostname = strings.SplitN(hostname, `.`, 2)[0]
|
||||||
|
|
||||||
err = json.Unmarshal(routerConfig, &r.config)
|
// Read metric router config file
|
||||||
|
configFile, err := os.Open(routerConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError("MetricRouter", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer configFile.Close()
|
||||||
|
jsonParser := json.NewDecoder(configFile)
|
||||||
|
err = jsonParser.Decode(&r.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclog.ComponentError("MetricRouter", err.Error())
|
cclog.ComponentError("MetricRouter", err.Error())
|
||||||
return err
|
return err
|
||||||
@@ -114,56 +119,14 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
|
|||||||
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
|
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p, err := mp.NewMessageProcessor()
|
r.config.dropMetrics = make(map[string]bool)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("initialization of message processor failed: %v", err.Error())
|
|
||||||
}
|
|
||||||
r.mp = p
|
|
||||||
|
|
||||||
if len(r.config.MessageProcessor) > 0 {
|
|
||||||
err = r.mp.FromConfigJSON(r.config.MessageProcessor)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed parsing JSON for message processor: %v", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, mname := range r.config.DropMetrics {
|
for _, mname := range r.config.DropMetrics {
|
||||||
r.mp.AddDropMessagesByName(mname)
|
r.config.dropMetrics[mname] = true
|
||||||
}
|
}
|
||||||
for _, cond := range r.config.DropMetricsIf {
|
|
||||||
r.mp.AddDropMessagesByCondition(cond)
|
|
||||||
}
|
|
||||||
for _, data := range r.config.AddTags {
|
|
||||||
cond := data.Condition
|
|
||||||
if cond == "*" {
|
|
||||||
cond = "true"
|
|
||||||
}
|
|
||||||
r.mp.AddAddTagsByCondition(cond, data.Key, data.Value)
|
|
||||||
}
|
|
||||||
for _, data := range r.config.DelTags {
|
|
||||||
cond := data.Condition
|
|
||||||
if cond == "*" {
|
|
||||||
cond = "true"
|
|
||||||
}
|
|
||||||
r.mp.AddDeleteTagsByCondition(cond, data.Key, data.Value)
|
|
||||||
}
|
|
||||||
for oldname, newname := range r.config.RenameMetrics {
|
|
||||||
r.mp.AddRenameMetricByName(oldname, newname)
|
|
||||||
}
|
|
||||||
for metricName, prefix := range r.config.ChangeUnitPrefix {
|
|
||||||
r.mp.AddChangeUnitPrefix(fmt.Sprintf("name == '%s'", metricName), prefix)
|
|
||||||
}
|
|
||||||
r.mp.SetNormalizeUnits(r.config.NormalizeUnits)
|
|
||||||
|
|
||||||
r.mp.AddAddTagsByCondition("true", r.config.HostnameTagName, r.hostname)
|
|
||||||
|
|
||||||
// r.config.dropMetrics = make(map[string]bool)
|
|
||||||
// for _, mname := range r.config.DropMetrics {
|
|
||||||
// r.config.dropMetrics[mname] = true
|
|
||||||
// }
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getParamMap(point lp.CCMessage) map[string]interface{} {
|
func getParamMap(point lp.CCMetric) map[string]interface{} {
|
||||||
params := make(map[string]interface{})
|
params := make(map[string]interface{})
|
||||||
params["metric"] = point
|
params["metric"] = point
|
||||||
params["name"] = point.Name()
|
params["name"] = point.Name()
|
||||||
@@ -181,7 +144,7 @@ func getParamMap(point lp.CCMessage) map[string]interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DoAddTags adds a tag when condition is fullfiled
|
// DoAddTags adds a tag when condition is fullfiled
|
||||||
func (r *metricRouter) DoAddTags(point lp.CCMessage) {
|
func (r *metricRouter) DoAddTags(point lp.CCMetric) {
|
||||||
var conditionMatches bool
|
var conditionMatches bool
|
||||||
for _, m := range r.config.AddTags {
|
for _, m := range r.config.AddTags {
|
||||||
if m.Condition == "*" {
|
if m.Condition == "*" {
|
||||||
@@ -203,81 +166,81 @@ func (r *metricRouter) DoAddTags(point lp.CCMessage) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DoDelTags removes a tag when condition is fullfiled
|
// DoDelTags removes a tag when condition is fullfiled
|
||||||
// func (r *metricRouter) DoDelTags(point lp.CCMessage) {
|
func (r *metricRouter) DoDelTags(point lp.CCMetric) {
|
||||||
// var conditionMatches bool
|
var conditionMatches bool
|
||||||
// for _, m := range r.config.DelTags {
|
for _, m := range r.config.DelTags {
|
||||||
// if m.Condition == "*" {
|
if m.Condition == "*" {
|
||||||
// // Condition is always matched
|
// Condition is always matched
|
||||||
// conditionMatches = true
|
conditionMatches = true
|
||||||
// } else {
|
} else {
|
||||||
// // Evaluate condition
|
// Evaluate condition
|
||||||
// var err error
|
var err error
|
||||||
// conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
|
conditionMatches, err = agg.EvalBoolCondition(m.Condition, getParamMap(point))
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// cclog.ComponentError("MetricRouter", err.Error())
|
cclog.ComponentError("MetricRouter", err.Error())
|
||||||
// conditionMatches = false
|
conditionMatches = false
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// if conditionMatches {
|
if conditionMatches {
|
||||||
// point.RemoveTag(m.Key)
|
point.RemoveTag(m.Key)
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// Conditional test whether a metric should be dropped
|
// Conditional test whether a metric should be dropped
|
||||||
// func (r *metricRouter) dropMetric(point lp.CCMessage) bool {
|
func (r *metricRouter) dropMetric(point lp.CCMetric) bool {
|
||||||
// // Simple drop check
|
// Simple drop check
|
||||||
// if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
|
if conditionMatches, ok := r.config.dropMetrics[point.Name()]; ok {
|
||||||
// return conditionMatches
|
return conditionMatches
|
||||||
// }
|
}
|
||||||
|
|
||||||
// // Checking the dropping conditions
|
// Checking the dropping conditions
|
||||||
// for _, m := range r.config.DropMetricsIf {
|
for _, m := range r.config.DropMetricsIf {
|
||||||
// conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
|
conditionMatches, err := agg.EvalBoolCondition(m, getParamMap(point))
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// cclog.ComponentError("MetricRouter", err.Error())
|
cclog.ComponentError("MetricRouter", err.Error())
|
||||||
// conditionMatches = false
|
conditionMatches = false
|
||||||
// }
|
}
|
||||||
// if conditionMatches {
|
if conditionMatches {
|
||||||
// return conditionMatches
|
return conditionMatches
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// // No dropping condition met
|
// No dropping condition met
|
||||||
// return false
|
return false
|
||||||
// }
|
}
|
||||||
|
|
||||||
// func (r *metricRouter) prepareUnit(point lp.CCMessage) bool {
|
func (r *metricRouter) prepareUnit(point lp.CCMetric) bool {
|
||||||
// if r.config.NormalizeUnits {
|
if r.config.NormalizeUnits {
|
||||||
// if in_unit, ok := point.GetMeta("unit"); ok {
|
if in_unit, ok := point.GetMeta("unit"); ok {
|
||||||
// u := units.NewUnit(in_unit)
|
u := units.NewUnit(in_unit)
|
||||||
// if u.Valid() {
|
if u.Valid() {
|
||||||
// point.AddMeta("unit", u.Short())
|
point.AddMeta("unit", u.Short())
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
|
if newP, ok := r.config.ChangeUnitPrefix[point.Name()]; ok {
|
||||||
|
|
||||||
// newPrefix := units.NewPrefix(newP)
|
newPrefix := units.NewPrefix(newP)
|
||||||
|
|
||||||
// if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
|
if in_unit, ok := point.GetMeta("unit"); ok && newPrefix != units.InvalidPrefix {
|
||||||
// u := units.NewUnit(in_unit)
|
u := units.NewUnit(in_unit)
|
||||||
// if u.Valid() {
|
if u.Valid() {
|
||||||
// cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
|
cclog.ComponentDebug("MetricRouter", "Change prefix to", newP, "for metric", point.Name())
|
||||||
// conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
conv, out_unit := units.GetUnitPrefixFactor(u, newPrefix)
|
||||||
// if conv != nil && out_unit.Valid() {
|
if conv != nil && out_unit.Valid() {
|
||||||
// if val, ok := point.GetField("value"); ok {
|
if val, ok := point.GetField("value"); ok {
|
||||||
// point.AddField("value", conv(val))
|
point.AddField("value", conv(val))
|
||||||
// point.AddMeta("unit", out_unit.Short())
|
point.AddMeta("unit", out_unit.Short())
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// return true
|
return true
|
||||||
// }
|
}
|
||||||
|
|
||||||
// Start starts the metric router
|
// Start starts the metric router
|
||||||
func (r *metricRouter) Start() {
|
func (r *metricRouter) Start() {
|
||||||
@@ -296,75 +259,59 @@ func (r *metricRouter) Start() {
|
|||||||
|
|
||||||
// Forward takes a received metric, adds or deletes tags
|
// Forward takes a received metric, adds or deletes tags
|
||||||
// and forwards it to the output channels
|
// and forwards it to the output channels
|
||||||
// forward := func(point lp.CCMessage) {
|
forward := func(point lp.CCMetric) {
|
||||||
// cclog.ComponentDebug("MetricRouter", "FORWARD", point)
|
cclog.ComponentDebug("MetricRouter", "FORWARD", point)
|
||||||
// r.DoAddTags(point)
|
r.DoAddTags(point)
|
||||||
// r.DoDelTags(point)
|
r.DoDelTags(point)
|
||||||
// name := point.Name()
|
name := point.Name()
|
||||||
// if new, ok := r.config.RenameMetrics[name]; ok {
|
if new, ok := r.config.RenameMetrics[name]; ok {
|
||||||
// point.SetName(new)
|
point.SetName(new)
|
||||||
// point.AddMeta("oldname", name)
|
point.AddMeta("oldname", name)
|
||||||
// r.DoAddTags(point)
|
r.DoAddTags(point)
|
||||||
// r.DoDelTags(point)
|
r.DoDelTags(point)
|
||||||
// }
|
}
|
||||||
|
|
||||||
// r.prepareUnit(point)
|
r.prepareUnit(point)
|
||||||
|
|
||||||
// for _, o := range r.outputs {
|
for _, o := range r.outputs {
|
||||||
// o <- point
|
o <- point
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// Foward message received from collector channel
|
// Foward message received from collector channel
|
||||||
coll_forward := func(p lp.CCMessage) {
|
coll_forward := func(p lp.CCMetric) {
|
||||||
// receive from metric collector
|
// receive from metric collector
|
||||||
//p.AddTag(r.config.HostnameTagName, r.hostname)
|
p.AddTag(r.config.HostnameTagName, r.hostname)
|
||||||
if r.config.IntervalStamp {
|
if r.config.IntervalStamp {
|
||||||
p.SetTime(r.timestamp)
|
p.SetTime(r.timestamp)
|
||||||
}
|
}
|
||||||
m, err := r.mp.ProcessMessage(p)
|
if !r.dropMetric(p) {
|
||||||
if err == nil && m != nil {
|
forward(p)
|
||||||
for _, o := range r.outputs {
|
|
||||||
o <- m
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// if !r.dropMetric(p) {
|
|
||||||
// for _, o := range r.outputs {
|
|
||||||
// o <- point
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// even if the metric is dropped, it is stored in the cache for
|
// even if the metric is dropped, it is stored in the cache for
|
||||||
// aggregations
|
// aggregations
|
||||||
if r.config.NumCacheIntervals > 0 {
|
if r.config.NumCacheIntervals > 0 {
|
||||||
r.cache.Add(m)
|
r.cache.Add(p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forward message received from receivers channel
|
// Forward message received from receivers channel
|
||||||
recv_forward := func(p lp.CCMessage) {
|
recv_forward := func(p lp.CCMetric) {
|
||||||
// receive from receive manager
|
// receive from receive manager
|
||||||
if r.config.IntervalStamp {
|
if r.config.IntervalStamp {
|
||||||
p.SetTime(r.timestamp)
|
p.SetTime(r.timestamp)
|
||||||
}
|
}
|
||||||
m, err := r.mp.ProcessMessage(p)
|
if !r.dropMetric(p) {
|
||||||
if err == nil && m != nil {
|
forward(p)
|
||||||
for _, o := range r.outputs {
|
|
||||||
o <- m
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if !r.dropMetric(p) {
|
|
||||||
// forward(p)
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forward message received from cache channel
|
// Forward message received from cache channel
|
||||||
cache_forward := func(p lp.CCMessage) {
|
cache_forward := func(p lp.CCMetric) {
|
||||||
// receive from metric collector
|
// receive from metric collector
|
||||||
m, err := r.mp.ProcessMessage(p)
|
if !r.dropMetric(p) {
|
||||||
if err == nil && m != nil {
|
p.AddTag(r.config.HostnameTagName, r.hostname)
|
||||||
for _, o := range r.outputs {
|
forward(p)
|
||||||
o <- m
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -411,17 +358,17 @@ func (r *metricRouter) Start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddCollectorInput adds a channel between metric collector and metric router
|
// AddCollectorInput adds a channel between metric collector and metric router
|
||||||
func (r *metricRouter) AddCollectorInput(input chan lp.CCMessage) {
|
func (r *metricRouter) AddCollectorInput(input chan lp.CCMetric) {
|
||||||
r.coll_input = input
|
r.coll_input = input
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddReceiverInput adds a channel between metric receiver and metric router
|
// AddReceiverInput adds a channel between metric receiver and metric router
|
||||||
func (r *metricRouter) AddReceiverInput(input chan lp.CCMessage) {
|
func (r *metricRouter) AddReceiverInput(input chan lp.CCMetric) {
|
||||||
r.recv_input = input
|
r.recv_input = input
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddOutput adds a output channel to the metric router
|
// AddOutput adds a output channel to the metric router
|
||||||
func (r *metricRouter) AddOutput(output chan lp.CCMessage) {
|
func (r *metricRouter) AddOutput(output chan lp.CCMetric) {
|
||||||
r.outputs = append(r.outputs, output)
|
r.outputs = append(r.outputs, output)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -441,9 +388,9 @@ func (r *metricRouter) Close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new initialized metric router
|
// New creates a new initialized metric router
|
||||||
func New(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfig json.RawMessage) (MetricRouter, error) {
|
func New(ticker mct.MultiChanTicker, wg *sync.WaitGroup, routerConfigFile string) (MetricRouter, error) {
|
||||||
r := new(metricRouter)
|
r := new(metricRouter)
|
||||||
err := r.Init(ticker, wg, routerConfig)
|
err := r.Init(ticker, wg, routerConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
113
pkg/ccLogger/cclogger.go
Normal file
113
pkg/ccLogger/cclogger.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package cclogger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
globalDebug = false
|
||||||
|
stdout = os.Stdout
|
||||||
|
stderr = os.Stderr
|
||||||
|
debugLog *log.Logger = nil
|
||||||
|
infoLog *log.Logger = nil
|
||||||
|
errorLog *log.Logger = nil
|
||||||
|
warnLog *log.Logger = nil
|
||||||
|
defaultLog *log.Logger = nil
|
||||||
|
)
|
||||||
|
|
||||||
|
func initLogger() {
|
||||||
|
if debugLog == nil {
|
||||||
|
debugLog = log.New(stderr, "DEBUG ", log.LstdFlags)
|
||||||
|
}
|
||||||
|
if infoLog == nil {
|
||||||
|
infoLog = log.New(stdout, "INFO ", log.LstdFlags)
|
||||||
|
}
|
||||||
|
if errorLog == nil {
|
||||||
|
errorLog = log.New(stderr, "ERROR ", log.LstdFlags)
|
||||||
|
}
|
||||||
|
if warnLog == nil {
|
||||||
|
warnLog = log.New(stderr, "WARN ", log.LstdFlags)
|
||||||
|
}
|
||||||
|
if defaultLog == nil {
|
||||||
|
defaultLog = log.New(stdout, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Print(e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
defaultLog.Print(e...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ComponentPrint(component string, e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
defaultLog.Print(fmt.Sprintf("[%s] ", component), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Info(e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
infoLog.Print(e...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ComponentInfo(component string, e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
infoLog.Print(fmt.Sprintf("[%s] ", component), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Debug(e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
if globalDebug {
|
||||||
|
debugLog.Print(e...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ComponentDebug(component string, e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
if globalDebug && debugLog != nil {
|
||||||
|
//CCComponentPrint(debugLog, component, e)
|
||||||
|
debugLog.Print(fmt.Sprintf("[%s] ", component), e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Error(e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
_, fn, line, _ := runtime.Caller(1)
|
||||||
|
errorLog.Print(fmt.Sprintf("[%s:%d] ", fn, line), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ComponentError(component string, e ...interface{}) {
|
||||||
|
initLogger()
|
||||||
|
_, fn, line, _ := runtime.Caller(1)
|
||||||
|
errorLog.Print(fmt.Sprintf("[%s|%s:%d] ", component, fn, line), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetDebug() {
|
||||||
|
globalDebug = true
|
||||||
|
initLogger()
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetOutput(filename string) {
|
||||||
|
if filename == "stderr" {
|
||||||
|
if stderr != os.Stderr && stderr != os.Stdout {
|
||||||
|
stderr.Close()
|
||||||
|
}
|
||||||
|
stderr = os.Stderr
|
||||||
|
} else if filename == "stdout" {
|
||||||
|
if stderr != os.Stderr && stderr != os.Stdout {
|
||||||
|
stderr.Close()
|
||||||
|
}
|
||||||
|
stderr = os.Stdout
|
||||||
|
} else {
|
||||||
|
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
|
||||||
|
if err == nil {
|
||||||
|
defer file.Close()
|
||||||
|
stderr = file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debugLog = nil
|
||||||
|
errorLog = nil
|
||||||
|
warnLog = nil
|
||||||
|
initLogger()
|
||||||
|
}
|
||||||
57
pkg/ccMetric/README.md
Normal file
57
pkg/ccMetric/README.md
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# ClusterCockpit metrics
|
||||||
|
|
||||||
|
As described in the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications), the whole ClusterCockpit stack uses metrics in the InfluxDB line protocol format. This is also the input and output format for the ClusterCockpit Metric Collector but internally it uses an extended format while processing, named CCMetric.
|
||||||
|
|
||||||
|
It is basically a copy of the [InfluxDB line protocol](https://github.com/influxdata/line-protocol) `MutableMetric` interface with one extension. Besides the tags and fields, it contains a list of meta information (re-using the `Tag` structure of the original protocol):
|
||||||
|
|
||||||
|
```golang
|
||||||
|
type ccMetric struct {
|
||||||
|
name string // Measurement name
|
||||||
|
meta map[string]string // map of meta data tags
|
||||||
|
tags map[string]string // map of of tags
|
||||||
|
fields map[string]interface{} // map of of fields
|
||||||
|
tm time.Time // timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
type CCMetric interface {
|
||||||
|
ToPoint(metaAsTags map[string]bool) *write.Point // Generate influxDB point for data type ccMetric
|
||||||
|
ToLineProtocol(metaAsTags map[string]bool) string // Generate influxDB line protocol for data type ccMetric
|
||||||
|
String() string // Return line-protocol like string
|
||||||
|
|
||||||
|
Name() string // Get metric name
|
||||||
|
SetName(name string) // Set metric name
|
||||||
|
|
||||||
|
Time() time.Time // Get timestamp
|
||||||
|
SetTime(t time.Time) // Set timestamp
|
||||||
|
|
||||||
|
Tags() map[string]string // Map of tags
|
||||||
|
AddTag(key, value string) // Add a tag
|
||||||
|
GetTag(key string) (value string, ok bool) // Get a tag by its key
|
||||||
|
HasTag(key string) (ok bool) // Check if a tag key is present
|
||||||
|
RemoveTag(key string) // Remove a tag by its key
|
||||||
|
|
||||||
|
Meta() map[string]string // Map of meta data tags
|
||||||
|
AddMeta(key, value string) // Add a meta data tag
|
||||||
|
GetMeta(key string) (value string, ok bool) // Get a meta data tab addressed by its key
|
||||||
|
HasMeta(key string) (ok bool) // Check if a meta data key is present
|
||||||
|
RemoveMeta(key string) // Remove a meta data tag by its key
|
||||||
|
|
||||||
|
Fields() map[string]interface{} // Map of fields
|
||||||
|
AddField(key string, value interface{}) // Add a field
|
||||||
|
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
|
||||||
|
HasField(key string) (ok bool) // Check if a field key is present
|
||||||
|
RemoveField(key string) // Remove a field addressed by its key
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(name string, tags map[string]string, meta map[string]string, fields map[string]interface{}, tm time.Time) (CCMetric, error)
|
||||||
|
func FromMetric(other CCMetric) CCMetric
|
||||||
|
func FromInfluxMetric(other lp.Metric) CCMetric
|
||||||
|
```
|
||||||
|
|
||||||
|
The `CCMetric` interface provides the same functions as the `MutableMetric` like `{Add, Get, Remove, Has}{Tag, Field}` and additionally provides `{Add, Get, Remove, Has}Meta`.
|
||||||
|
|
||||||
|
The InfluxDB protocol creates a new metric with `influx.New(name, tags, fields, time)` while CCMetric uses `ccMetric.New(name, tags, meta, fields, time)` where `tags` and `meta` are both of type `map[string]string`.
|
||||||
|
|
||||||
|
You can copy a CCMetric with `FromMetric(other CCMetric) CCMetric`. If you get an `influx.Metric` from a function, like the line protocol parser, you can use `FromInfluxMetric(other influx.Metric) CCMetric` to get a CCMetric out of it (see `NatsReceiver` for an example).
|
||||||
|
|
||||||
|
Although the [cc-specifications](https://github.com/ClusterCockpit/cc-specifications/blob/master/interfaces/lineprotocol/README.md) defines that there is only a `value` field for the metric value, the CCMetric still can have multiple values similar to the InfluxDB line protocol.
|
||||||
368
pkg/ccMetric/ccMetric.go
Normal file
368
pkg/ccMetric/ccMetric.go
Normal file
@@ -0,0 +1,368 @@
|
|||||||
|
package ccmetric
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
write "github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||||
|
lp "github.com/influxdata/line-protocol" // MIT license
|
||||||
|
)
|
||||||
|
|
||||||
|
// Most functions are derived from github.com/influxdata/line-protocol/metric.go
|
||||||
|
// The metric type is extended with an extra meta information list re-using the Tag
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// See: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/
|
||||||
|
type ccMetric struct {
|
||||||
|
name string // Measurement name
|
||||||
|
meta map[string]string // map of meta data tags
|
||||||
|
tags map[string]string // map of of tags
|
||||||
|
fields map[string]interface{} // map of of fields
|
||||||
|
tm time.Time // timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ccMetric access functions
|
||||||
|
type CCMetric interface {
|
||||||
|
ToPoint(metaAsTags map[string]bool) *write.Point // Generate influxDB point for data type ccMetric
|
||||||
|
ToLineProtocol(metaAsTags map[string]bool) string // Generate influxDB line protocol for data type ccMetric
|
||||||
|
|
||||||
|
Name() string // Get metric name
|
||||||
|
SetName(name string) // Set metric name
|
||||||
|
|
||||||
|
Time() time.Time // Get timestamp
|
||||||
|
SetTime(t time.Time) // Set timestamp
|
||||||
|
|
||||||
|
Tags() map[string]string // Map of tags
|
||||||
|
AddTag(key, value string) // Add a tag
|
||||||
|
GetTag(key string) (value string, ok bool) // Get a tag by its key
|
||||||
|
HasTag(key string) (ok bool) // Check if a tag key is present
|
||||||
|
RemoveTag(key string) // Remove a tag by its key
|
||||||
|
|
||||||
|
Meta() map[string]string // Map of meta data tags
|
||||||
|
AddMeta(key, value string) // Add a meta data tag
|
||||||
|
GetMeta(key string) (value string, ok bool) // Get a meta data tab addressed by its key
|
||||||
|
HasMeta(key string) (ok bool) // Check if a meta data key is present
|
||||||
|
RemoveMeta(key string) // Remove a meta data tag by its key
|
||||||
|
|
||||||
|
Fields() map[string]interface{} // Map of fields
|
||||||
|
AddField(key string, value interface{}) // Add a field
|
||||||
|
GetField(key string) (value interface{}, ok bool) // Get a field addressed by its key
|
||||||
|
HasField(key string) (ok bool) // Check if a field key is present
|
||||||
|
RemoveField(key string) // Remove a field addressed by its key
|
||||||
|
String() string // Return line-protocol like string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the stringer interface for data type ccMetric
|
||||||
|
func (m *ccMetric) String() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"Name: %s, Tags: %+v, Meta: %+v, fields: %+v, Timestamp: %d",
|
||||||
|
m.name, m.tags, m.meta, m.fields, m.tm.UnixNano(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLineProtocol generates influxDB line protocol for data type ccMetric
|
||||||
|
func (m *ccMetric) ToPoint(metaAsTags map[string]bool) (p *write.Point) {
|
||||||
|
p = influxdb2.NewPoint(m.name, m.tags, m.fields, m.tm)
|
||||||
|
for key, ok1 := range metaAsTags {
|
||||||
|
if val, ok2 := m.GetMeta(key); ok1 && ok2 {
|
||||||
|
p.AddTag(key, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLineProtocol generates influxDB line protocol for data type ccMetric
|
||||||
|
func (m *ccMetric) ToLineProtocol(metaAsTags map[string]bool) string {
|
||||||
|
|
||||||
|
return write.PointToLineProtocol(
|
||||||
|
m.ToPoint(metaAsTags),
|
||||||
|
time.Nanosecond,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the measurement name
|
||||||
|
func (m *ccMetric) Name() string {
|
||||||
|
return m.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets the measurement name
|
||||||
|
func (m *ccMetric) SetName(name string) {
|
||||||
|
m.name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns timestamp
|
||||||
|
func (m *ccMetric) Time() time.Time {
|
||||||
|
return m.tm
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTime sets the timestamp
|
||||||
|
func (m *ccMetric) SetTime(t time.Time) {
|
||||||
|
m.tm = t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tags returns the the list of tags as key-value-mapping
|
||||||
|
func (m *ccMetric) Tags() map[string]string {
|
||||||
|
return m.tags
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTag adds a tag (consisting of key and value) to the map of tags
|
||||||
|
func (m *ccMetric) AddTag(key, value string) {
|
||||||
|
m.tags[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTag returns the tag with tag's key equal to <key>
|
||||||
|
func (m *ccMetric) GetTag(key string) (string, bool) {
|
||||||
|
value, ok := m.tags[key]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasTag checks if a tag with key equal to <key> is present in the list of tags
|
||||||
|
func (m *ccMetric) HasTag(key string) bool {
|
||||||
|
_, ok := m.tags[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveTag removes the tag with tag's key equal to <key>
|
||||||
|
func (m *ccMetric) RemoveTag(key string) {
|
||||||
|
delete(m.tags, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meta returns the meta data tags as key-value mapping
|
||||||
|
func (m *ccMetric) Meta() map[string]string {
|
||||||
|
return m.meta
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMeta adds a meta data tag (consisting of key and value) to the map of meta data tags
|
||||||
|
func (m *ccMetric) AddMeta(key, value string) {
|
||||||
|
m.meta[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMeta returns the meta data tag with meta data's key equal to <key>
|
||||||
|
func (m *ccMetric) GetMeta(key string) (string, bool) {
|
||||||
|
value, ok := m.meta[key]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasMeta checks if a meta data tag with meta data's key equal to <key> is present in the map of meta data tags
|
||||||
|
func (m *ccMetric) HasMeta(key string) bool {
|
||||||
|
_, ok := m.meta[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveMeta removes the meta data tag with tag's key equal to <key>
|
||||||
|
func (m *ccMetric) RemoveMeta(key string) {
|
||||||
|
delete(m.meta, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields returns the list of fields as key-value-mapping
|
||||||
|
func (m *ccMetric) Fields() map[string]interface{} {
|
||||||
|
return m.fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddField adds a field (consisting of key and value) to the map of fields
|
||||||
|
func (m *ccMetric) AddField(key string, value interface{}) {
|
||||||
|
m.fields[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetField returns the field with field's key equal to <key>
|
||||||
|
func (m *ccMetric) GetField(key string) (interface{}, bool) {
|
||||||
|
v, ok := m.fields[key]
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasField checks if a field with field's key equal to <key> is present in the map of fields
|
||||||
|
func (m *ccMetric) HasField(key string) bool {
|
||||||
|
_, ok := m.fields[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveField removes the field with field's key equal to <key>
|
||||||
|
// from the map of fields
|
||||||
|
func (m *ccMetric) RemoveField(key string) {
|
||||||
|
delete(m.fields, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new measurement point
|
||||||
|
func New(
|
||||||
|
name string,
|
||||||
|
tags map[string]string,
|
||||||
|
meta map[string]string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tm time.Time,
|
||||||
|
) (CCMetric, error) {
|
||||||
|
m := &ccMetric{
|
||||||
|
name: name,
|
||||||
|
tags: make(map[string]string, len(tags)),
|
||||||
|
meta: make(map[string]string, len(meta)),
|
||||||
|
fields: make(map[string]interface{}, len(fields)),
|
||||||
|
tm: tm,
|
||||||
|
}
|
||||||
|
|
||||||
|
// deep copy tags, meta data tags and fields
|
||||||
|
for k, v := range tags {
|
||||||
|
m.tags[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range meta {
|
||||||
|
m.meta[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range fields {
|
||||||
|
v := convertField(v)
|
||||||
|
if v == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.fields[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromMetric copies the metric <other>
|
||||||
|
func FromMetric(other CCMetric) CCMetric {
|
||||||
|
otags := other.Tags()
|
||||||
|
ometa := other.Meta()
|
||||||
|
ofields := other.Fields()
|
||||||
|
m := &ccMetric{
|
||||||
|
name: other.Name(),
|
||||||
|
tags: make(map[string]string, len(otags)),
|
||||||
|
meta: make(map[string]string, len(ometa)),
|
||||||
|
fields: make(map[string]interface{}, len(ofields)),
|
||||||
|
tm: other.Time(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// deep copy tags, meta data tags and fields
|
||||||
|
for key, value := range otags {
|
||||||
|
m.tags[key] = value
|
||||||
|
}
|
||||||
|
for key, value := range ometa {
|
||||||
|
m.meta[key] = value
|
||||||
|
}
|
||||||
|
for key, value := range ofields {
|
||||||
|
m.fields[key] = value
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromInfluxMetric copies the influxDB line protocol metric <other>
|
||||||
|
func FromInfluxMetric(other lp.Metric) CCMetric {
|
||||||
|
m := &ccMetric{
|
||||||
|
name: other.Name(),
|
||||||
|
tags: make(map[string]string),
|
||||||
|
meta: make(map[string]string),
|
||||||
|
fields: make(map[string]interface{}),
|
||||||
|
tm: other.Time(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// deep copy tags and fields
|
||||||
|
for _, otherTag := range other.TagList() {
|
||||||
|
m.tags[otherTag.Key] = otherTag.Value
|
||||||
|
}
|
||||||
|
for _, otherField := range other.FieldList() {
|
||||||
|
m.fields[otherField.Key] = otherField.Value
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertField converts data types of fields by the following schemata:
|
||||||
|
// *float32, *float64, float32, float64 -> float64
|
||||||
|
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
|
||||||
|
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
|
||||||
|
// *[]byte, *string, []byte, string -> string
|
||||||
|
// *bool, bool -> bool
|
||||||
|
func convertField(v interface{}) interface{} {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case float64:
|
||||||
|
return v
|
||||||
|
case int64:
|
||||||
|
return v
|
||||||
|
case string:
|
||||||
|
return v
|
||||||
|
case bool:
|
||||||
|
return v
|
||||||
|
case int:
|
||||||
|
return int64(v)
|
||||||
|
case uint:
|
||||||
|
return uint64(v)
|
||||||
|
case uint64:
|
||||||
|
return uint64(v)
|
||||||
|
case []byte:
|
||||||
|
return string(v)
|
||||||
|
case int32:
|
||||||
|
return int64(v)
|
||||||
|
case int16:
|
||||||
|
return int64(v)
|
||||||
|
case int8:
|
||||||
|
return int64(v)
|
||||||
|
case uint32:
|
||||||
|
return uint64(v)
|
||||||
|
case uint16:
|
||||||
|
return uint64(v)
|
||||||
|
case uint8:
|
||||||
|
return uint64(v)
|
||||||
|
case float32:
|
||||||
|
return float64(v)
|
||||||
|
case *float64:
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
case *int64:
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
case *string:
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
case *bool:
|
||||||
|
if v != nil {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
case *int:
|
||||||
|
if v != nil {
|
||||||
|
return int64(*v)
|
||||||
|
}
|
||||||
|
case *uint:
|
||||||
|
if v != nil {
|
||||||
|
return uint64(*v)
|
||||||
|
}
|
||||||
|
case *uint64:
|
||||||
|
if v != nil {
|
||||||
|
return uint64(*v)
|
||||||
|
}
|
||||||
|
case *[]byte:
|
||||||
|
if v != nil {
|
||||||
|
return string(*v)
|
||||||
|
}
|
||||||
|
case *int32:
|
||||||
|
if v != nil {
|
||||||
|
return int64(*v)
|
||||||
|
}
|
||||||
|
case *int16:
|
||||||
|
if v != nil {
|
||||||
|
return int64(*v)
|
||||||
|
}
|
||||||
|
case *int8:
|
||||||
|
if v != nil {
|
||||||
|
return int64(*v)
|
||||||
|
}
|
||||||
|
case *uint32:
|
||||||
|
if v != nil {
|
||||||
|
return uint64(*v)
|
||||||
|
}
|
||||||
|
case *uint16:
|
||||||
|
if v != nil {
|
||||||
|
return uint64(*v)
|
||||||
|
}
|
||||||
|
case *uint8:
|
||||||
|
if v != nil {
|
||||||
|
return uint64(*v)
|
||||||
|
}
|
||||||
|
case *float32:
|
||||||
|
if v != nil {
|
||||||
|
return float64(*v)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ package ccTopology
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -9,248 +10,94 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
cclogger "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclogger "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const SYSFS_NUMABASE = `/sys/devices/system/node`
|
||||||
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
|
const SYSFS_CPUBASE = `/sys/devices/system/cpu`
|
||||||
|
const PROCFS_CPUINFO = `/proc/cpuinfo`
|
||||||
|
|
||||||
// Structure holding all information about a hardware thread
|
// intArrayContains scans an array of ints if the value str is present in the array
|
||||||
// See https://www.kernel.org/doc/Documentation/ABI/stable/sysfs-devices-system-cpu
|
// If the specified value is found, the corresponding array index is returned.
|
||||||
type HwthreadEntry struct {
|
// The bool value is used to signal success or failure
|
||||||
// for each CPUx:
|
func intArrayContains(array []int, str int) (int, bool) {
|
||||||
CpuID int // CPU / hardware thread ID
|
for i, a := range array {
|
||||||
SMT int // Simultaneous Multithreading ID
|
if a == str {
|
||||||
CoreCPUsList []int // CPUs within the same core
|
return i, true
|
||||||
Core int // Socket local core ID
|
}
|
||||||
Socket int // Sockets (physical) ID
|
}
|
||||||
Die int // Die ID
|
return -1, false
|
||||||
NumaDomain int // NUMA Domain
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var cache struct {
|
// Used internally for sysfs file reads
|
||||||
HwthreadList []int // List of CPU hardware threads
|
|
||||||
SMTList []int // List of symmetric hyper threading IDs
|
|
||||||
CoreList []int // List of CPU core IDs
|
|
||||||
SocketList []int // List of CPU sockets (physical) IDs
|
|
||||||
DieList []int // List of CPU Die IDs
|
|
||||||
NumaDomainList []int // List of NUMA Domains
|
|
||||||
|
|
||||||
CpuData []HwthreadEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileToInt reads an integer value from a sysfs file
|
|
||||||
// In case of an error -1 is returned
|
|
||||||
func fileToInt(path string) int {
|
func fileToInt(path string) int {
|
||||||
buffer, err := os.ReadFile(path)
|
buffer, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print(err)
|
log.Print(err)
|
||||||
cclogger.ComponentError("ccTopology", "fileToInt", "Reading", path, ":", err.Error())
|
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
stringBuffer := strings.TrimSpace(string(buffer))
|
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
|
||||||
id, err := strconv.Atoi(stringBuffer)
|
var id int64
|
||||||
|
//_, err = fmt.Scanf("%d", sbuffer, &id)
|
||||||
|
id, err = strconv.ParseInt(sbuffer, 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cclogger.ComponentError("ccTopology", "fileToInt", "Parsing", path, ":", stringBuffer, err.Error())
|
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
return id
|
return int(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileToList reads a list from a sysfs file
|
// Get list of CPU socket IDs
|
||||||
// A list consists of value ranges separated by colon
|
|
||||||
// A range can be a single value or a range of values given by a startValue-endValue
|
|
||||||
// In case of an error nil is returned
|
|
||||||
func fileToList(path string) []int {
|
|
||||||
// Read list
|
|
||||||
buffer, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
cclogger.ComponentError("ccTopology", "fileToList", "Reading", path, ":", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create list
|
|
||||||
list := make([]int, 0)
|
|
||||||
stringBuffer := strings.TrimSpace(string(buffer))
|
|
||||||
for _, valueRangeString := range strings.Split(stringBuffer, ",") {
|
|
||||||
valueRange := strings.Split(valueRangeString, "-")
|
|
||||||
switch len(valueRange) {
|
|
||||||
case 1:
|
|
||||||
singleValue, err := strconv.Atoi(valueRange[0])
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
list = append(list, singleValue)
|
|
||||||
case 2:
|
|
||||||
startValue, err := strconv.Atoi(valueRange[0])
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[0], ":", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
endValue, err := strconv.Atoi(valueRange[1])
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "fileToList", "Parsing", valueRange[1], ":", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for value := startValue; value <= endValue; value++ {
|
|
||||||
list = append(list, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the cache structure
|
|
||||||
func init() {
|
|
||||||
|
|
||||||
getHWThreads :=
|
|
||||||
func() []int {
|
|
||||||
globPath := filepath.Join(SYSFS_CPUBASE, "cpu[0-9]*")
|
|
||||||
regexPath := filepath.Join(SYSFS_CPUBASE, "cpu([[:digit:]]+)")
|
|
||||||
regex := regexp.MustCompile(regexPath)
|
|
||||||
|
|
||||||
// File globbing for hardware threads
|
|
||||||
files, err := filepath.Glob(globPath)
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getHWThreads", err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hwThreadIDs := make([]int, len(files))
|
|
||||||
for i, file := range files {
|
|
||||||
// Extract hardware thread ID
|
|
||||||
matches := regex.FindStringSubmatch(file)
|
|
||||||
if len(matches) != 2 {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to extract hardware thread ID from ", file)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert hardware thread ID to int
|
|
||||||
id, err := strconv.Atoi(matches[1])
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getHWThreads: Failed to convert to int hardware thread ID ", matches[1])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hwThreadIDs[i] = id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort hardware thread IDs
|
|
||||||
slices.Sort(hwThreadIDs)
|
|
||||||
return hwThreadIDs
|
|
||||||
}
|
|
||||||
|
|
||||||
getNumaDomain :=
|
|
||||||
func(basePath string) int {
|
|
||||||
globPath := filepath.Join(basePath, "node*")
|
|
||||||
regexPath := filepath.Join(basePath, "node([[:digit:]]+)")
|
|
||||||
regex := regexp.MustCompile(regexPath)
|
|
||||||
|
|
||||||
// File globbing for NUMA node
|
|
||||||
files, err := filepath.Glob(globPath)
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getNumaDomain", err.Error())
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check, that exactly one NUMA domain was found
|
|
||||||
if len(files) != 1 {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Number of NUMA domains != 1: ", len(files))
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract NUMA node ID
|
|
||||||
matches := regex.FindStringSubmatch(files[0])
|
|
||||||
if len(matches) != 2 {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to extract NUMA node ID from: ", files[0])
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := strconv.Atoi(matches[1])
|
|
||||||
if err != nil {
|
|
||||||
cclogger.ComponentError("CCTopology", "init:getNumaDomain", "Failed to parse NUMA node ID from: ", matches[1])
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.HwthreadList = getHWThreads()
|
|
||||||
cache.CoreList = make([]int, len(cache.HwthreadList))
|
|
||||||
cache.SocketList = make([]int, len(cache.HwthreadList))
|
|
||||||
cache.DieList = make([]int, len(cache.HwthreadList))
|
|
||||||
cache.SMTList = make([]int, len(cache.HwthreadList))
|
|
||||||
cache.NumaDomainList = make([]int, len(cache.HwthreadList))
|
|
||||||
cache.CpuData = make([]HwthreadEntry, len(cache.HwthreadList))
|
|
||||||
for i, c := range cache.HwthreadList {
|
|
||||||
// Set cpuBase directory for topology lookup
|
|
||||||
cpuBase := filepath.Join(SYSFS_CPUBASE, fmt.Sprintf("cpu%d", c))
|
|
||||||
topoBase := filepath.Join(cpuBase, "topology")
|
|
||||||
|
|
||||||
// Lookup Core ID
|
|
||||||
cache.CoreList[i] = fileToInt(filepath.Join(topoBase, "core_id"))
|
|
||||||
|
|
||||||
// Lookup socket / physical package ID
|
|
||||||
cache.SocketList[i] = fileToInt(filepath.Join(topoBase, "physical_package_id"))
|
|
||||||
|
|
||||||
// Lookup CPU die id
|
|
||||||
cache.DieList[i] = fileToInt(filepath.Join(topoBase, "die_id"))
|
|
||||||
if cache.DieList[i] < 0 {
|
|
||||||
cache.DieList[i] = cache.SocketList[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup List of CPUs within the same core
|
|
||||||
coreCPUsList := fileToList(filepath.Join(topoBase, "core_cpus_list"))
|
|
||||||
|
|
||||||
// Find index of CPU ID in List of CPUs within the same core
|
|
||||||
// if not found return -1
|
|
||||||
cache.SMTList[i] = slices.Index(coreCPUsList, c)
|
|
||||||
|
|
||||||
// Lookup NUMA domain id
|
|
||||||
cache.NumaDomainList[i] = getNumaDomain(cpuBase)
|
|
||||||
|
|
||||||
cache.CpuData[i] =
|
|
||||||
HwthreadEntry{
|
|
||||||
CpuID: cache.HwthreadList[i],
|
|
||||||
SMT: cache.SMTList[i],
|
|
||||||
CoreCPUsList: coreCPUsList,
|
|
||||||
Socket: cache.SocketList[i],
|
|
||||||
NumaDomain: cache.NumaDomainList[i],
|
|
||||||
Die: cache.DieList[i],
|
|
||||||
Core: cache.CoreList[i],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
slices.Sort(cache.HwthreadList)
|
|
||||||
cache.HwthreadList = slices.Compact(cache.HwthreadList)
|
|
||||||
|
|
||||||
slices.Sort(cache.SMTList)
|
|
||||||
cache.SMTList = slices.Compact(cache.SMTList)
|
|
||||||
|
|
||||||
slices.Sort(cache.CoreList)
|
|
||||||
cache.CoreList = slices.Compact(cache.CoreList)
|
|
||||||
|
|
||||||
slices.Sort(cache.SocketList)
|
|
||||||
cache.SocketList = slices.Compact(cache.SocketList)
|
|
||||||
|
|
||||||
slices.Sort(cache.DieList)
|
|
||||||
cache.DieList = slices.Compact(cache.DieList)
|
|
||||||
|
|
||||||
slices.Sort(cache.NumaDomainList)
|
|
||||||
cache.NumaDomainList = slices.Compact(cache.NumaDomainList)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SocketList gets the list of CPU socket IDs
|
|
||||||
func SocketList() []int {
|
func SocketList() []int {
|
||||||
return slices.Clone(cache.SocketList)
|
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ll := strings.Split(string(buffer), "\n")
|
||||||
|
packs := make([]int, 0)
|
||||||
|
for _, line := range ll {
|
||||||
|
if strings.HasPrefix(line, "physical id") {
|
||||||
|
lv := strings.Fields(line)
|
||||||
|
id, err := strconv.ParseInt(lv[3], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return packs
|
||||||
|
}
|
||||||
|
_, found := intArrayContains(packs, int(id))
|
||||||
|
if !found {
|
||||||
|
packs = append(packs, int(id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return packs
|
||||||
}
|
}
|
||||||
|
|
||||||
// HwthreadList gets the list of hardware thread IDs in the order of listing in /proc/cpuinfo
|
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
|
||||||
func HwthreadList() []int {
|
func HwthreadList() []int {
|
||||||
return slices.Clone(cache.HwthreadList)
|
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ll := strings.Split(string(buffer), "\n")
|
||||||
|
cpulist := make([]int, 0)
|
||||||
|
for _, line := range ll {
|
||||||
|
if strings.HasPrefix(line, "processor") {
|
||||||
|
lv := strings.Fields(line)
|
||||||
|
id, err := strconv.ParseInt(lv[2], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return cpulist
|
||||||
|
}
|
||||||
|
_, found := intArrayContains(cpulist, int(id))
|
||||||
|
if !found {
|
||||||
|
cpulist = append(cpulist, int(id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cpulist
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
|
// Get list of hardware thread IDs in the order of listing in /proc/cpuinfo
|
||||||
@@ -259,25 +106,88 @@ func CpuList() []int {
|
|||||||
return HwthreadList()
|
return HwthreadList()
|
||||||
}
|
}
|
||||||
|
|
||||||
// CoreList gets the list of CPU core IDs in the order of listing in /proc/cpuinfo
|
// Get list of CPU core IDs in the order of listing in /proc/cpuinfo
|
||||||
func CoreList() []int {
|
func CoreList() []int {
|
||||||
return slices.Clone(cache.CoreList)
|
buffer, err := ioutil.ReadFile(string(PROCFS_CPUINFO))
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ll := strings.Split(string(buffer), "\n")
|
||||||
|
corelist := make([]int, 0)
|
||||||
|
for _, line := range ll {
|
||||||
|
if strings.HasPrefix(line, "core id") {
|
||||||
|
lv := strings.Fields(line)
|
||||||
|
id, err := strconv.ParseInt(lv[3], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
return corelist
|
||||||
|
}
|
||||||
|
_, found := intArrayContains(corelist, int(id))
|
||||||
|
if !found {
|
||||||
|
corelist = append(corelist, int(id))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return corelist
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get list of NUMA node IDs
|
// Get list of NUMA node IDs
|
||||||
func NumaNodeList() []int {
|
func NumaNodeList() []int {
|
||||||
return slices.Clone(cache.NumaDomainList)
|
numaList := make([]int, 0)
|
||||||
|
globPath := filepath.Join(string(SYSFS_NUMABASE), "node*")
|
||||||
|
regexPath := filepath.Join(string(SYSFS_NUMABASE), "node(\\d+)")
|
||||||
|
regex := regexp.MustCompile(regexPath)
|
||||||
|
files, err := filepath.Glob(globPath)
|
||||||
|
if err != nil {
|
||||||
|
cclogger.ComponentError("CCTopology", "NumaNodeList", err.Error())
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
if !regex.MatchString(f) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
finfo, err := os.Lstat(f)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !finfo.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matches := regex.FindStringSubmatch(f)
|
||||||
|
if len(matches) == 2 {
|
||||||
|
id, err := strconv.Atoi(matches[1])
|
||||||
|
if err == nil {
|
||||||
|
if _, found := intArrayContains(numaList, id); !found {
|
||||||
|
numaList = append(numaList, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return numaList
|
||||||
}
|
}
|
||||||
|
|
||||||
// DieList gets the list of CPU die IDs
|
// Get list of CPU die IDs
|
||||||
func DieList() []int {
|
func DieList() []int {
|
||||||
if len(cache.DieList) > 0 {
|
cpulist := HwthreadList()
|
||||||
return slices.Clone(cache.DieList)
|
dielist := make([]int, 0)
|
||||||
|
for _, c := range cpulist {
|
||||||
|
diepath := filepath.Join(string(SYSFS_CPUBASE), fmt.Sprintf("cpu%d", c), "topology/die_id")
|
||||||
|
dieid := fileToInt(diepath)
|
||||||
|
if dieid > 0 {
|
||||||
|
_, found := intArrayContains(dielist, int(dieid))
|
||||||
|
if !found {
|
||||||
|
dielist = append(dielist, int(dieid))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dielist) > 0 {
|
||||||
|
return dielist
|
||||||
}
|
}
|
||||||
return SocketList()
|
return SocketList()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
|
// Get list of specified type using the naming format inside ClusterCockpit
|
||||||
func GetTypeList(topology_type string) []int {
|
func GetTypeList(topology_type string) []int {
|
||||||
switch topology_type {
|
switch topology_type {
|
||||||
case "node":
|
case "node":
|
||||||
@@ -296,33 +206,128 @@ func GetTypeList(topology_type string) []int {
|
|||||||
return []int{}
|
return []int{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetTypeId(hwt HwthreadEntry, topology_type string) (int, error) {
|
// Structure holding all information about a hardware thread
|
||||||
var err error = nil
|
type HwthreadEntry struct {
|
||||||
switch topology_type {
|
Cpuid int
|
||||||
case "node":
|
SMT int
|
||||||
return 0, err
|
Core int
|
||||||
case "socket":
|
Socket int
|
||||||
return hwt.Socket, err
|
Numadomain int
|
||||||
case "die":
|
Die int
|
||||||
return hwt.Die, err
|
|
||||||
case "memoryDomain":
|
|
||||||
return hwt.NumaDomain, err
|
|
||||||
case "core":
|
|
||||||
return hwt.Core, err
|
|
||||||
case "hwthread":
|
|
||||||
return hwt.CpuID, err
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("unknown topology type '%s'", topology_type)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CpuData returns CPU data for each hardware thread
|
|
||||||
func CpuData() []HwthreadEntry {
|
func CpuData() []HwthreadEntry {
|
||||||
// return a deep copy to protect cache data
|
|
||||||
c := slices.Clone(cache.CpuData)
|
// fileToInt := func(path string) int {
|
||||||
for i := range c {
|
// buffer, err := ioutil.ReadFile(path)
|
||||||
c[i].CoreCPUsList = slices.Clone(cache.CpuData[i].CoreCPUsList)
|
// if err != nil {
|
||||||
|
// log.Print(err)
|
||||||
|
// //cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
|
||||||
|
// return -1
|
||||||
|
// }
|
||||||
|
// sbuffer := strings.Replace(string(buffer), "\n", "", -1)
|
||||||
|
// var id int64
|
||||||
|
// //_, err = fmt.Scanf("%d", sbuffer, &id)
|
||||||
|
// id, err = strconv.ParseInt(sbuffer, 10, 32)
|
||||||
|
// if err != nil {
|
||||||
|
// cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
|
||||||
|
// return -1
|
||||||
|
// }
|
||||||
|
// return int(id)
|
||||||
|
// }
|
||||||
|
getCore := func(basepath string) int {
|
||||||
|
return fileToInt(fmt.Sprintf("%s/core_id", basepath))
|
||||||
}
|
}
|
||||||
return c
|
|
||||||
|
getSocket := func(basepath string) int {
|
||||||
|
return fileToInt(fmt.Sprintf("%s/physical_package_id", basepath))
|
||||||
|
}
|
||||||
|
|
||||||
|
getDie := func(basepath string) int {
|
||||||
|
return fileToInt(fmt.Sprintf("%s/die_id", basepath))
|
||||||
|
}
|
||||||
|
|
||||||
|
getSMT := func(cpuid int, basepath string) int {
|
||||||
|
buffer, err := ioutil.ReadFile(fmt.Sprintf("%s/thread_siblings_list", basepath))
|
||||||
|
if err != nil {
|
||||||
|
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
|
||||||
|
}
|
||||||
|
threadlist := make([]int, 0)
|
||||||
|
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
|
||||||
|
for _, x := range strings.Split(sbuffer, ",") {
|
||||||
|
id, err := strconv.ParseInt(x, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
cclogger.ComponentError("CCTopology", "CpuData:getSMT", err.Error())
|
||||||
|
}
|
||||||
|
threadlist = append(threadlist, int(id))
|
||||||
|
}
|
||||||
|
for i, x := range threadlist {
|
||||||
|
if x == cpuid {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
getNumaDomain := func(basepath string) int {
|
||||||
|
globPath := filepath.Join(basepath, "node*")
|
||||||
|
regexPath := filepath.Join(basepath, "node(\\d+)")
|
||||||
|
regex := regexp.MustCompile(regexPath)
|
||||||
|
files, err := filepath.Glob(globPath)
|
||||||
|
if err != nil {
|
||||||
|
cclogger.ComponentError("CCTopology", "CpuData:getNumaDomain", err.Error())
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
finfo, err := os.Lstat(f)
|
||||||
|
if err == nil && finfo.IsDir() {
|
||||||
|
matches := regex.FindStringSubmatch(f)
|
||||||
|
if len(matches) == 2 {
|
||||||
|
id, err := strconv.Atoi(matches[1])
|
||||||
|
if err == nil {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
clist := make([]HwthreadEntry, 0)
|
||||||
|
for _, c := range HwthreadList() {
|
||||||
|
clist = append(clist, HwthreadEntry{Cpuid: c})
|
||||||
|
}
|
||||||
|
for i, centry := range clist {
|
||||||
|
centry.Socket = -1
|
||||||
|
centry.Numadomain = -1
|
||||||
|
centry.Die = -1
|
||||||
|
centry.Core = -1
|
||||||
|
// Set base directory for topology lookup
|
||||||
|
cpustr := fmt.Sprintf("cpu%d", centry.Cpuid)
|
||||||
|
base := filepath.Join("/sys/devices/system/cpu", cpustr)
|
||||||
|
topoBase := filepath.Join(base, "topology")
|
||||||
|
|
||||||
|
// Lookup CPU core id
|
||||||
|
centry.Core = getCore(topoBase)
|
||||||
|
|
||||||
|
// Lookup CPU socket id
|
||||||
|
centry.Socket = getSocket(topoBase)
|
||||||
|
|
||||||
|
// Lookup CPU die id
|
||||||
|
centry.Die = getDie(topoBase)
|
||||||
|
if centry.Die < 0 {
|
||||||
|
centry.Die = centry.Socket
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup SMT thread id
|
||||||
|
centry.SMT = getSMT(centry.Cpuid, topoBase)
|
||||||
|
|
||||||
|
// Lookup NUMA domain id
|
||||||
|
centry.Numadomain = getNumaDomain(base)
|
||||||
|
|
||||||
|
// Update values in output list
|
||||||
|
clist[i] = centry
|
||||||
|
}
|
||||||
|
return clist
|
||||||
}
|
}
|
||||||
|
|
||||||
// Structure holding basic information about a CPU
|
// Structure holding basic information about a CPU
|
||||||
@@ -335,129 +340,130 @@ type CpuInformation struct {
|
|||||||
NumNumaDomains int
|
NumNumaDomains int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CpuInformation reports basic information about the CPU
|
// Get basic information about the CPU
|
||||||
func CpuInfo() CpuInformation {
|
func CpuInfo() CpuInformation {
|
||||||
return CpuInformation{
|
var c CpuInformation
|
||||||
NumNumaDomains: len(cache.NumaDomainList),
|
|
||||||
SMTWidth: len(cache.SMTList),
|
smtList := make([]int, 0)
|
||||||
NumDies: len(cache.DieList),
|
numaList := make([]int, 0)
|
||||||
NumCores: len(cache.CoreList),
|
dieList := make([]int, 0)
|
||||||
NumSockets: len(cache.SocketList),
|
socketList := make([]int, 0)
|
||||||
NumHWthreads: len(cache.HwthreadList),
|
coreList := make([]int, 0)
|
||||||
|
cdata := CpuData()
|
||||||
|
for _, d := range cdata {
|
||||||
|
if _, ok := intArrayContains(smtList, d.SMT); !ok {
|
||||||
|
smtList = append(smtList, d.SMT)
|
||||||
}
|
}
|
||||||
|
if _, ok := intArrayContains(numaList, d.Numadomain); !ok {
|
||||||
|
numaList = append(numaList, d.Numadomain)
|
||||||
|
}
|
||||||
|
if _, ok := intArrayContains(dieList, d.Die); !ok {
|
||||||
|
dieList = append(dieList, d.Die)
|
||||||
|
}
|
||||||
|
if _, ok := intArrayContains(socketList, d.Socket); !ok {
|
||||||
|
socketList = append(socketList, d.Socket)
|
||||||
|
}
|
||||||
|
if _, ok := intArrayContains(coreList, d.Core); !ok {
|
||||||
|
coreList = append(coreList, d.Core)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.NumNumaDomains = len(numaList)
|
||||||
|
c.SMTWidth = len(smtList)
|
||||||
|
c.NumDies = len(dieList)
|
||||||
|
c.NumCores = len(coreList)
|
||||||
|
c.NumSockets = len(socketList)
|
||||||
|
c.NumHWthreads = len(cdata)
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHwthreadSocket gets the CPU socket ID for a given hardware thread ID
|
// Get the CPU socket ID for a given hardware thread ID
|
||||||
// In case hardware thread ID is not found -1 is returned
|
func GetHwthreadSocket(cpuid int) int {
|
||||||
func GetHwthreadSocket(cpuID int) int {
|
cdata := CpuData()
|
||||||
for i := range cache.CpuData {
|
for _, d := range cdata {
|
||||||
d := &cache.CpuData[i]
|
if d.Cpuid == cpuid {
|
||||||
if d.CpuID == cpuID {
|
|
||||||
return d.Socket
|
return d.Socket
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHwthreadNumaDomain gets the NUMA domain ID for a given hardware thread ID
|
// Get the NUMA node ID for a given hardware thread ID
|
||||||
// In case hardware thread ID is not found -1 is returned
|
func GetHwthreadNumaDomain(cpuid int) int {
|
||||||
func GetHwthreadNumaDomain(cpuID int) int {
|
cdata := CpuData()
|
||||||
for i := range cache.CpuData {
|
for _, d := range cdata {
|
||||||
d := &cache.CpuData[i]
|
if d.Cpuid == cpuid {
|
||||||
if d.CpuID == cpuID {
|
return d.Numadomain
|
||||||
return d.NumaDomain
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHwthreadDie gets the CPU die ID for a given hardware thread ID
|
// Get the CPU die ID for a given hardware thread ID
|
||||||
// In case hardware thread ID is not found -1 is returned
|
func GetHwthreadDie(cpuid int) int {
|
||||||
func GetHwthreadDie(cpuID int) int {
|
cdata := CpuData()
|
||||||
for i := range cache.CpuData {
|
for _, d := range cdata {
|
||||||
d := &cache.CpuData[i]
|
if d.Cpuid == cpuid {
|
||||||
if d.CpuID == cpuID {
|
|
||||||
return d.Die
|
return d.Die
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHwthreadCore gets the CPU core ID for a given hardware thread ID
|
// Get the CPU core ID for a given hardware thread ID
|
||||||
// In case hardware thread ID is not found -1 is returned
|
func GetHwthreadCore(cpuid int) int {
|
||||||
func GetHwthreadCore(cpuID int) int {
|
cdata := CpuData()
|
||||||
for i := range cache.CpuData {
|
for _, d := range cdata {
|
||||||
d := &cache.CpuData[i]
|
if d.Cpuid == cpuid {
|
||||||
if d.CpuID == cpuID {
|
|
||||||
return d.Core
|
return d.Core
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSocketHwthreads gets all hardware thread IDs associated with a CPU socket
|
// Get the all hardware thread ID associated with a CPU socket
|
||||||
func GetSocketHwthreads(socket int) []int {
|
func GetSocketHwthreads(socket int) []int {
|
||||||
cpuList := make([]int, 0)
|
all := CpuData()
|
||||||
for i := range cache.CpuData {
|
cpulist := make([]int, 0)
|
||||||
d := &cache.CpuData[i]
|
for _, d := range all {
|
||||||
if d.Socket == socket {
|
if d.Socket == socket {
|
||||||
cpuList = append(cpuList, d.CpuID)
|
cpulist = append(cpulist, d.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cpuList
|
return cpulist
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNumaDomainHwthreads gets the all hardware thread IDs associated with a NUMA domain
|
// Get the all hardware thread ID associated with a NUMA node
|
||||||
func GetNumaDomainHwthreads(numaDomain int) []int {
|
func GetNumaDomainHwthreads(domain int) []int {
|
||||||
cpuList := make([]int, 0)
|
all := CpuData()
|
||||||
for i := range cache.CpuData {
|
cpulist := make([]int, 0)
|
||||||
d := &cache.CpuData[i]
|
for _, d := range all {
|
||||||
if d.NumaDomain == numaDomain {
|
if d.Numadomain == domain {
|
||||||
cpuList = append(cpuList, d.CpuID)
|
cpulist = append(cpulist, d.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cpuList
|
return cpulist
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDieHwthreads gets all hardware thread IDs associated with a CPU die
|
// Get the all hardware thread ID associated with a CPU die
|
||||||
func GetDieHwthreads(die int) []int {
|
func GetDieHwthreads(die int) []int {
|
||||||
cpuList := make([]int, 0)
|
all := CpuData()
|
||||||
for i := range cache.CpuData {
|
cpulist := make([]int, 0)
|
||||||
d := &cache.CpuData[i]
|
for _, d := range all {
|
||||||
if d.Die == die {
|
if d.Die == die {
|
||||||
cpuList = append(cpuList, d.CpuID)
|
cpulist = append(cpulist, d.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cpuList
|
return cpulist
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCoreHwthreads get all hardware thread IDs associated with a CPU core
|
// Get the all hardware thread ID associated with a CPU core
|
||||||
func GetCoreHwthreads(core int) []int {
|
func GetCoreHwthreads(core int) []int {
|
||||||
cpuList := make([]int, 0)
|
all := CpuData()
|
||||||
for i := range cache.CpuData {
|
cpulist := make([]int, 0)
|
||||||
d := &cache.CpuData[i]
|
for _, d := range all {
|
||||||
if d.Core == core {
|
if d.Core == core {
|
||||||
cpuList = append(cpuList, d.CpuID)
|
cpulist = append(cpulist, d.Cpuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cpuList
|
return cpulist
|
||||||
}
|
|
||||||
|
|
||||||
// GetTypeList gets the list of specified type using the naming format inside ClusterCockpit
|
|
||||||
func GetTypeHwthreads(topology_type string, id int) []int {
|
|
||||||
switch topology_type {
|
|
||||||
case "node":
|
|
||||||
return HwthreadList()
|
|
||||||
case "socket":
|
|
||||||
return GetSocketHwthreads(id)
|
|
||||||
case "die":
|
|
||||||
return GetDieHwthreads(id)
|
|
||||||
case "memoryDomain":
|
|
||||||
return GetNumaDomainHwthreads(id)
|
|
||||||
case "core":
|
|
||||||
return GetCoreHwthreads(id)
|
|
||||||
case "hwthread":
|
|
||||||
return []int{id}
|
|
||||||
}
|
|
||||||
return []int{}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,125 +0,0 @@
|
|||||||
package hostlist
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Expand(in string) (result []string, err error) {
|
|
||||||
|
|
||||||
// Create ranges regular expression
|
|
||||||
reStNumber := "[[:digit:]]+"
|
|
||||||
reStRange := reStNumber + "-" + reStNumber
|
|
||||||
reStOptionalNumberOrRange := "(" + reStNumber + ",|" + reStRange + ",)*"
|
|
||||||
reStNumberOrRange := "(" + reStNumber + "|" + reStRange + ")"
|
|
||||||
reStBraceLeft := "[[]"
|
|
||||||
reStBraceRight := "[]]"
|
|
||||||
reStRanges := reStBraceLeft +
|
|
||||||
reStOptionalNumberOrRange +
|
|
||||||
reStNumberOrRange +
|
|
||||||
reStBraceRight
|
|
||||||
reRanges := regexp.MustCompile(reStRanges)
|
|
||||||
|
|
||||||
// Create host list regular expression
|
|
||||||
reStDNSChars := "[a-zA-Z0-9-]+"
|
|
||||||
reStPrefix := "^(" + reStDNSChars + ")"
|
|
||||||
reStOptionalSuffix := "(" + reStDNSChars + ")?"
|
|
||||||
re := regexp.MustCompile(reStPrefix + "([[][0-9,-]+[]])?" + reStOptionalSuffix)
|
|
||||||
|
|
||||||
// Remove all delimiters from the input
|
|
||||||
in = strings.TrimLeft(in, ", ")
|
|
||||||
|
|
||||||
for len(in) > 0 {
|
|
||||||
if v := re.FindStringSubmatch(in); v != nil {
|
|
||||||
|
|
||||||
// Remove matched part from the input
|
|
||||||
lenPrefix := len(v[0])
|
|
||||||
in = in[lenPrefix:]
|
|
||||||
|
|
||||||
// Remove all delimiters from the input
|
|
||||||
in = strings.TrimLeft(in, ", ")
|
|
||||||
|
|
||||||
// matched prefix, range and suffix
|
|
||||||
hlPrefix := v[1]
|
|
||||||
hlRanges := v[2]
|
|
||||||
hlSuffix := v[3]
|
|
||||||
|
|
||||||
// Single node without ranges
|
|
||||||
if hlRanges == "" {
|
|
||||||
result = append(result, hlPrefix)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node with ranges
|
|
||||||
if v := reRanges.FindStringSubmatch(hlRanges); v != nil {
|
|
||||||
|
|
||||||
// Remove braces
|
|
||||||
hlRanges = hlRanges[1 : len(hlRanges)-1]
|
|
||||||
|
|
||||||
// Split host ranges at ,
|
|
||||||
for _, hlRange := range strings.Split(hlRanges, ",") {
|
|
||||||
|
|
||||||
// Split host range at -
|
|
||||||
RangeStartEnd := strings.Split(hlRange, "-")
|
|
||||||
|
|
||||||
// Range is only a single number
|
|
||||||
if len(RangeStartEnd) == 1 {
|
|
||||||
result = append(result, hlPrefix+RangeStartEnd[0]+hlSuffix)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range has a start and an end
|
|
||||||
widthRangeStart := len(RangeStartEnd[0])
|
|
||||||
widthRangeEnd := len(RangeStartEnd[1])
|
|
||||||
iStart, _ := strconv.ParseUint(RangeStartEnd[0], 10, 64)
|
|
||||||
iEnd, _ := strconv.ParseUint(RangeStartEnd[1], 10, 64)
|
|
||||||
if iStart > iEnd {
|
|
||||||
return nil, fmt.Errorf("single range start is greater than end: %s", hlRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create print format string for range numbers
|
|
||||||
doPadding := widthRangeStart == widthRangeEnd
|
|
||||||
widthPadding := widthRangeStart
|
|
||||||
var formatString string
|
|
||||||
if doPadding {
|
|
||||||
formatString = "%0" + fmt.Sprint(widthPadding) + "d"
|
|
||||||
} else {
|
|
||||||
formatString = "%d"
|
|
||||||
}
|
|
||||||
formatString = hlPrefix + formatString + hlSuffix
|
|
||||||
|
|
||||||
// Add nodes from this range
|
|
||||||
for i := iStart; i <= iEnd; i++ {
|
|
||||||
result = append(result, fmt.Sprintf(formatString, i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("not at hostlist range: %s", hlRanges)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("not a hostlist: %s", in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if result != nil {
|
|
||||||
// sort
|
|
||||||
sort.Strings(result)
|
|
||||||
|
|
||||||
// uniq
|
|
||||||
previous := 1
|
|
||||||
for current := 1; current < len(result); current++ {
|
|
||||||
if result[current-1] != result[current] {
|
|
||||||
if previous != current {
|
|
||||||
result[previous] = result[current]
|
|
||||||
}
|
|
||||||
previous++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result = result[:previous]
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
@@ -1,126 +0,0 @@
|
|||||||
package hostlist
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExpand(t *testing.T) {
|
|
||||||
|
|
||||||
// Compare two slices of strings
|
|
||||||
equal := func(a, b []string) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, v := range a {
|
|
||||||
if v != b[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type testDefinition struct {
|
|
||||||
input string
|
|
||||||
resultExpected []string
|
|
||||||
errorExpected bool
|
|
||||||
}
|
|
||||||
|
|
||||||
expandTests := []testDefinition{
|
|
||||||
{
|
|
||||||
// Single node
|
|
||||||
input: "n1",
|
|
||||||
resultExpected: []string{"n1"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Single node, duplicated
|
|
||||||
input: "n1,n1",
|
|
||||||
resultExpected: []string{"n1"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Single node with padding
|
|
||||||
input: "n[01]",
|
|
||||||
resultExpected: []string{"n01"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Single node with suffix
|
|
||||||
input: "n[01]-p",
|
|
||||||
resultExpected: []string{"n01-p"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Multiple nodes with a single range
|
|
||||||
input: "n[1-2]",
|
|
||||||
resultExpected: []string{"n1", "n2"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Multiple nodes with a single range and a single index
|
|
||||||
input: "n[1-2,3]",
|
|
||||||
resultExpected: []string{"n1", "n2", "n3"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Multiple nodes with different prefixes
|
|
||||||
input: "n[1-2],m[1,2]",
|
|
||||||
resultExpected: []string{"m1", "m2", "n1", "n2"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Multiple nodes with different suffixes
|
|
||||||
input: "n[1-2]-p,n[1,2]-q",
|
|
||||||
resultExpected: []string{"n1-p", "n1-q", "n2-p", "n2-q"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Multiple nodes with and without node ranges
|
|
||||||
input: " n09, n[01-04,06-07,09] , , n10,n04",
|
|
||||||
resultExpected: []string{"n01", "n02", "n03", "n04", "n06", "n07", "n09", "n10"},
|
|
||||||
errorExpected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Forbidden DNS character
|
|
||||||
input: "n@",
|
|
||||||
resultExpected: []string{},
|
|
||||||
errorExpected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Forbidden range
|
|
||||||
input: "n[1-2-2,3]",
|
|
||||||
resultExpected: []string{},
|
|
||||||
errorExpected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Forbidden range limits
|
|
||||||
input: "n[2-1]",
|
|
||||||
resultExpected: []string{},
|
|
||||||
errorExpected: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, expandTest := range expandTests {
|
|
||||||
result, err := Expand(expandTest.input)
|
|
||||||
|
|
||||||
hasError := err != nil
|
|
||||||
if hasError != expandTest.errorExpected && hasError {
|
|
||||||
t.Errorf("Expand('%s') failed: unexpected error '%v'",
|
|
||||||
expandTest.input, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if hasError != expandTest.errorExpected && !hasError {
|
|
||||||
t.Errorf("Expand('%s') did not fail as expected: got result '%+v'",
|
|
||||||
expandTest.input, result)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !hasError && !equal(result, expandTest.resultExpected) {
|
|
||||||
t.Errorf("Expand('%s') failed: got result '%+v', expected result '%v'",
|
|
||||||
expandTest.input, result, expandTest.resultExpected)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("Checked hostlist.Expand('%s'): result = '%+v', err = '%v'",
|
|
||||||
expandTest.input, result, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,7 @@ package multiChanTicker
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
type multiChanTicker struct {
|
type multiChanTicker struct {
|
||||||
|
|||||||
@@ -1,43 +1,24 @@
|
|||||||
{
|
{
|
||||||
"natsrecv": {
|
"natsrecv" : {
|
||||||
"type": "nats",
|
"type": "nats",
|
||||||
"address": "nats://my-url",
|
"address": "nats://my-url",
|
||||||
"port": "4222",
|
"port" : "4222",
|
||||||
"database": "testcluster"
|
"database": "testcluster"
|
||||||
},
|
},
|
||||||
"redfish_recv": {
|
"redfish_recv": {
|
||||||
"type": "redfish",
|
"type": "redfish",
|
||||||
"endpoint": "https://%h-bmc",
|
|
||||||
"client_config": [
|
|
||||||
{
|
|
||||||
"host_list": "my-host-1-[1-2]",
|
|
||||||
"username": "username-1",
|
|
||||||
"password": "password-1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"host_list": "my-host-2-[1,2]",
|
|
||||||
"username": "username-2",
|
|
||||||
"password": "password-2"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ipmi_recv": {
|
|
||||||
"type": "ipmi",
|
|
||||||
"endpoint": "ipmi-sensors://%h-ipmi",
|
|
||||||
"exclude_metrics": [
|
|
||||||
"fan_speed",
|
|
||||||
"voltage"
|
|
||||||
],
|
|
||||||
"client_config": [
|
"client_config": [
|
||||||
{
|
{
|
||||||
|
"hostname": "my-host-1",
|
||||||
"username": "username-1",
|
"username": "username-1",
|
||||||
"password": "password-1",
|
"password": "password-1",
|
||||||
"host_list": "my-host-1-[1-2]"
|
"endpoint": "https://my-endpoint-1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"hostname": "my-host-2",
|
||||||
"username": "username-2",
|
"username": "username-2",
|
||||||
"password": "password-2",
|
"password": "password-2",
|
||||||
"host_list": "my-host-2-[1,2]"
|
"endpoint": "https://my-endpoint-2"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
29
receivers/README.md
Normal file
29
receivers/README.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# CCMetric receivers
|
||||||
|
|
||||||
|
This folder contains the ReceiveManager and receiver implementations for the cc-metric-collector.
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
The configuration file for the receivers is a list of configurations. The `type` field in each specifies which receiver to initialize.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"myreceivername" : {
|
||||||
|
"type": "receiver-type",
|
||||||
|
<receiver-specific configuration>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This allows to specify
|
||||||
|
|
||||||
|
## Available receivers
|
||||||
|
|
||||||
|
- [`nats`](./natsReceiver.md): Receive metrics from the NATS network
|
||||||
|
- [`prometheus`](./prometheusReceiver.md): Scrape data from a Prometheus client
|
||||||
|
- [`http`](./httpReceiver.md): Listen for HTTP Post requests transporting metrics in InfluxDB line protocol
|
||||||
|
|
||||||
|
# Contributing own receivers
|
||||||
|
A receiver contains a few functions and is derived from the type `Receiver` (in `metricReceiver.go`):
|
||||||
|
|
||||||
|
For an example, check the [sample receiver](./sampleReceiver.go)
|
||||||
118
receivers/httpReceiver.go
Normal file
118
receivers/httpReceiver.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
influx "github.com/influxdata/line-protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
const HTTP_RECEIVER_PORT = "8080"
|
||||||
|
|
||||||
|
type HttpReceiverConfig struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Addr string `json:"address"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type HttpReceiver struct {
|
||||||
|
receiver
|
||||||
|
handler *influx.MetricHandler
|
||||||
|
parser *influx.Parser
|
||||||
|
meta map[string]string
|
||||||
|
config HttpReceiverConfig
|
||||||
|
router *mux.Router
|
||||||
|
server *http.Server
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HttpReceiver) Init(name string, config json.RawMessage) error {
|
||||||
|
r.name = fmt.Sprintf("HttpReceiver(%s)", name)
|
||||||
|
r.config.Port = HTTP_RECEIVER_PORT
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &r.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, "Error reading config:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(r.config.Port) == 0 {
|
||||||
|
return errors.New("not all configuration variables set required by HttpReceiver")
|
||||||
|
}
|
||||||
|
r.meta = map[string]string{"source": r.name}
|
||||||
|
p := r.config.Path
|
||||||
|
if !strings.HasPrefix(p, "/") {
|
||||||
|
p = "/" + p
|
||||||
|
}
|
||||||
|
uri := fmt.Sprintf("%s:%s%s", r.config.Addr, r.config.Port, p)
|
||||||
|
cclog.ComponentDebug(r.name, "INIT", uri)
|
||||||
|
r.handler = influx.NewMetricHandler()
|
||||||
|
r.parser = influx.NewParser(r.handler)
|
||||||
|
r.parser.SetTimeFunc(DefaultTime)
|
||||||
|
|
||||||
|
r.router = mux.NewRouter()
|
||||||
|
r.router.Path(p).HandlerFunc(r.ServerHttp)
|
||||||
|
r.server = &http.Server{Addr: uri, Handler: r.router}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HttpReceiver) Start() {
|
||||||
|
cclog.ComponentDebug(r.name, "START")
|
||||||
|
r.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
err := r.server.ListenAndServe()
|
||||||
|
if err != nil && err.Error() != "http: Server closed" {
|
||||||
|
cclog.ComponentError(r.name, err.Error())
|
||||||
|
}
|
||||||
|
r.wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HttpReceiver) ServerHttp(w http.ResponseWriter, req *http.Request) {
|
||||||
|
if req.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
metrics, err := r.parser.Parse(body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range metrics {
|
||||||
|
y := lp.FromInfluxMetric(m)
|
||||||
|
for k, v := range r.meta {
|
||||||
|
y.AddMeta(k, v)
|
||||||
|
}
|
||||||
|
if r.sink != nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HttpReceiver) Close() {
|
||||||
|
r.server.Shutdown(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHttpReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||||
|
r := new(HttpReceiver)
|
||||||
|
err := r.Init(name, config)
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
23
receivers/httpReceiver.md
Normal file
23
receivers/httpReceiver.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
## `http` receiver
|
||||||
|
|
||||||
|
The `http` receiver can be used receive metrics through HTTP POST requests.
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "http",
|
||||||
|
"address" : "",
|
||||||
|
"port" : "8080",
|
||||||
|
"path" : "/write"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the receiver a `http` receiver
|
||||||
|
- `address`: Listen address
|
||||||
|
- `port`: Listen port
|
||||||
|
- `path`: URL path for the write endpoint
|
||||||
|
|
||||||
|
The HTTP endpoint listens to `http://<address>:<port>/<path>`
|
||||||
40
receivers/metricReceiver.go
Normal file
40
receivers/metricReceiver.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type defaultReceiverConfig struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receiver configuration: Listen address, port
|
||||||
|
type ReceiverConfig struct {
|
||||||
|
Addr string `json:"address"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
Database string `json:"database"`
|
||||||
|
Organization string `json:"organization,omitempty"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type receiver struct {
|
||||||
|
name string
|
||||||
|
sink chan lp.CCMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
type Receiver interface {
|
||||||
|
Start()
|
||||||
|
Close() // Close / finish metric receiver
|
||||||
|
Name() string // Name of the metric receiver
|
||||||
|
SetSink(sink chan lp.CCMetric) // Set sink channel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the metric receiver
|
||||||
|
func (r *receiver) Name() string {
|
||||||
|
return r.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSink set the sink channel
|
||||||
|
func (r *receiver) SetSink(sink chan lp.CCMetric) {
|
||||||
|
r.sink = sink
|
||||||
|
}
|
||||||
92
receivers/natsReceiver.go
Normal file
92
receivers/natsReceiver.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
influx "github.com/influxdata/line-protocol"
|
||||||
|
nats "github.com/nats-io/nats.go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NatsReceiverConfig struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Addr string `json:"address"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
Subject string `json:"subject"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NatsReceiver struct {
|
||||||
|
receiver
|
||||||
|
nc *nats.Conn
|
||||||
|
handler *influx.MetricHandler
|
||||||
|
parser *influx.Parser
|
||||||
|
meta map[string]string
|
||||||
|
config NatsReceiverConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultTime = func() time.Time {
|
||||||
|
return time.Unix(42, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *NatsReceiver) Start() {
|
||||||
|
cclog.ComponentDebug(r.name, "START")
|
||||||
|
r.nc.Subscribe(r.config.Subject, r._NatsReceive)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *NatsReceiver) _NatsReceive(m *nats.Msg) {
|
||||||
|
metrics, err := r.parser.Parse(m.Data)
|
||||||
|
if err == nil {
|
||||||
|
for _, m := range metrics {
|
||||||
|
y := lp.FromInfluxMetric(m)
|
||||||
|
for k, v := range r.meta {
|
||||||
|
y.AddMeta(k, v)
|
||||||
|
}
|
||||||
|
if r.sink != nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *NatsReceiver) Close() {
|
||||||
|
if r.nc != nil {
|
||||||
|
cclog.ComponentDebug(r.name, "CLOSE")
|
||||||
|
r.nc.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNatsReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||||
|
r := new(NatsReceiver)
|
||||||
|
r.name = fmt.Sprintf("NatsReceiver(%s)", name)
|
||||||
|
r.config.Addr = nats.DefaultURL
|
||||||
|
r.config.Port = "4222"
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &r.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, "Error reading config:", err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(r.config.Addr) == 0 ||
|
||||||
|
len(r.config.Port) == 0 ||
|
||||||
|
len(r.config.Subject) == 0 {
|
||||||
|
return nil, errors.New("not all configuration variables set required by NatsReceiver")
|
||||||
|
}
|
||||||
|
r.meta = map[string]string{"source": r.name}
|
||||||
|
uri := fmt.Sprintf("%s:%s", r.config.Addr, r.config.Port)
|
||||||
|
cclog.ComponentDebug(r.name, "NewNatsReceiver", uri, "Subject", r.config.Subject)
|
||||||
|
if nc, err := nats.Connect(uri); err == nil {
|
||||||
|
r.nc = nc
|
||||||
|
} else {
|
||||||
|
r.nc = nil
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.handler = influx.NewMetricHandler()
|
||||||
|
r.parser = influx.NewParser(r.handler)
|
||||||
|
r.parser.SetTimeFunc(DefaultTime)
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
21
receivers/natsReceiver.md
Normal file
21
receivers/natsReceiver.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
## `nats` receiver
|
||||||
|
|
||||||
|
The `nats` receiver can be used receive metrics from the NATS network. The `nats` receiver subscribes to the topic `database` and listens on `address` and `port` for metrics in the InfluxDB line protocol.
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "nats",
|
||||||
|
"address" : "nats-server.example.org",
|
||||||
|
"port" : "4222",
|
||||||
|
"subject" : "subject"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the receiver a `nats` receiver
|
||||||
|
- `address`: Address of the NATS control server
|
||||||
|
- `port`: Port of the NATS control server
|
||||||
|
- `subject`: Subscribes to this subject and receive metrics
|
||||||
122
receivers/prometheusReceiver.go
Normal file
122
receivers/prometheusReceiver.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PrometheusReceiverConfig struct {
|
||||||
|
defaultReceiverConfig
|
||||||
|
Addr string `json:"address"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Interval string `json:"interval"`
|
||||||
|
SSL bool `json:"ssl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrometheusReceiver struct {
|
||||||
|
receiver
|
||||||
|
meta map[string]string
|
||||||
|
config PrometheusReceiverConfig
|
||||||
|
interval time.Duration
|
||||||
|
done chan bool
|
||||||
|
wg sync.WaitGroup
|
||||||
|
ticker *time.Ticker
|
||||||
|
uri string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *PrometheusReceiver) Start() {
|
||||||
|
cclog.ComponentDebug(r.name, "START", r.uri)
|
||||||
|
r.wg.Add(1)
|
||||||
|
|
||||||
|
r.ticker = time.NewTicker(r.interval)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.done:
|
||||||
|
r.wg.Done()
|
||||||
|
return
|
||||||
|
case t := <-r.ticker.C:
|
||||||
|
resp, err := http.Get(r.uri)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lineSplit := strings.Fields(line)
|
||||||
|
// separate metric name from tags (labels in Prometheus)
|
||||||
|
tags := map[string]string{}
|
||||||
|
name := lineSplit[0]
|
||||||
|
if sindex := strings.Index(name, "{"); sindex >= 0 {
|
||||||
|
eindex := strings.Index(name, "}")
|
||||||
|
for _, kv := range strings.Split(name[sindex+1:eindex], ",") {
|
||||||
|
eq := strings.Index(kv, "=")
|
||||||
|
tags[kv[0:eq]] = strings.Trim(kv[eq+1:], "\"")
|
||||||
|
}
|
||||||
|
name = lineSplit[0][0:sindex]
|
||||||
|
}
|
||||||
|
value, err := strconv.ParseFloat(lineSplit[1], 64)
|
||||||
|
if err == nil {
|
||||||
|
y, err := lp.New(name, tags, r.meta, map[string]interface{}{"value": value}, t)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *PrometheusReceiver) Close() {
|
||||||
|
cclog.ComponentDebug(r.name, "CLOSE")
|
||||||
|
r.done <- true
|
||||||
|
r.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPrometheusReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||||
|
r := new(PrometheusReceiver)
|
||||||
|
r.name = fmt.Sprintf("PrometheusReceiver(%s)", name)
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &r.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, "Error reading config:", err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(r.config.Addr) == 0 ||
|
||||||
|
len(r.config.Port) == 0 ||
|
||||||
|
len(r.config.Interval) == 0 {
|
||||||
|
return nil, errors.New("not all configuration variables set required by PrometheusReceiver (address and port)")
|
||||||
|
}
|
||||||
|
if len(r.config.Interval) > 0 {
|
||||||
|
t, err := time.ParseDuration(r.config.Interval)
|
||||||
|
if err == nil {
|
||||||
|
r.interval = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.meta = map[string]string{"source": r.name}
|
||||||
|
proto := "http"
|
||||||
|
if r.config.SSL {
|
||||||
|
proto = "https"
|
||||||
|
}
|
||||||
|
r.uri = fmt.Sprintf("%s://%s:%s/%s", proto, r.config.Addr, r.config.Port, r.config.Path)
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
27
receivers/prometheusReceiver.md
Normal file
27
receivers/prometheusReceiver.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
## `prometheus` receiver
|
||||||
|
|
||||||
|
The `prometheus` receiver can be used to scrape the metrics of a single `prometheus` client. It does **not** use any official Golang library but making simple HTTP get requests and parse the response.
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"address" : "testpromhost",
|
||||||
|
"port" : "12345",
|
||||||
|
"path" : "/prometheus",
|
||||||
|
"interval": "5s",
|
||||||
|
"ssl" : true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the receiver a `prometheus` receiver
|
||||||
|
- `address`: Hostname or IP of the Prometheus agent
|
||||||
|
- `port`: Port of Prometheus agent
|
||||||
|
- `path`: Path to the Prometheus endpoint
|
||||||
|
- `interval`: Scrape the Prometheus endpoint in this interval (default '5s')
|
||||||
|
- `ssl`: Use SSL or not
|
||||||
|
|
||||||
|
The receiver requests data from `http(s)://<address>:<port>/<path>`.
|
||||||
115
receivers/receiveManager.go
Normal file
115
receivers/receiveManager.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
var AvailableReceivers = map[string]func(name string, config json.RawMessage) (Receiver, error){
|
||||||
|
"nats": NewNatsReceiver,
|
||||||
|
"redfish": NewRedfishReceiver,
|
||||||
|
}
|
||||||
|
|
||||||
|
type receiveManager struct {
|
||||||
|
inputs []Receiver
|
||||||
|
output chan lp.CCMetric
|
||||||
|
config []json.RawMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReceiveManager interface {
|
||||||
|
Init(wg *sync.WaitGroup, receiverConfigFile string) error
|
||||||
|
AddInput(name string, rawConfig json.RawMessage) error
|
||||||
|
AddOutput(output chan lp.CCMetric)
|
||||||
|
Start()
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *receiveManager) Init(wg *sync.WaitGroup, receiverConfigFile string) error {
|
||||||
|
// Initialize struct fields
|
||||||
|
rm.inputs = make([]Receiver, 0)
|
||||||
|
rm.output = nil
|
||||||
|
rm.config = make([]json.RawMessage, 0)
|
||||||
|
|
||||||
|
configFile, err := os.Open(receiverConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError("ReceiveManager", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer configFile.Close()
|
||||||
|
jsonParser := json.NewDecoder(configFile)
|
||||||
|
var rawConfigs map[string]json.RawMessage
|
||||||
|
err = jsonParser.Decode(&rawConfigs)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError("ReceiveManager", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for name, raw := range rawConfigs {
|
||||||
|
rm.AddInput(name, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *receiveManager) Start() {
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "START")
|
||||||
|
|
||||||
|
for _, r := range rm.inputs {
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "START", r.Name())
|
||||||
|
r.Start()
|
||||||
|
}
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "STARTED")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *receiveManager) AddInput(name string, rawConfig json.RawMessage) error {
|
||||||
|
var config defaultReceiverConfig
|
||||||
|
err := json.Unmarshal(rawConfig, &config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "JSON config error:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, found := AvailableReceivers[config.Type]; !found {
|
||||||
|
cclog.ComponentError("ReceiveManager", "SKIP", config.Type, "unknown receiver:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r, err := AvailableReceivers[config.Type](name, rawConfig)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError("ReceiveManager", "SKIP", name, "initialization failed:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rm.inputs = append(rm.inputs, r)
|
||||||
|
rm.config = append(rm.config, rawConfig)
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "ADD RECEIVER", r.Name())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *receiveManager) AddOutput(output chan lp.CCMetric) {
|
||||||
|
rm.output = output
|
||||||
|
for _, r := range rm.inputs {
|
||||||
|
r.SetSink(rm.output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *receiveManager) Close() {
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "CLOSE")
|
||||||
|
|
||||||
|
// Close all receivers
|
||||||
|
for _, r := range rm.inputs {
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "CLOSE", r.Name())
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
cclog.ComponentDebug("ReceiveManager", "DONE")
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(wg *sync.WaitGroup, receiverConfigFile string) (ReceiveManager, error) {
|
||||||
|
r := new(receiveManager)
|
||||||
|
err := r.Init(wg, receiverConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
822
receivers/redfishReceiver.go
Normal file
822
receivers/redfishReceiver.go
Normal file
@@ -0,0 +1,822 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
|
||||||
|
// See: https://pkg.go.dev/github.com/stmcginnis/gofish
|
||||||
|
"github.com/stmcginnis/gofish"
|
||||||
|
"github.com/stmcginnis/gofish/common"
|
||||||
|
"github.com/stmcginnis/gofish/redfish"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RedfishReceiverClientConfig struct {
|
||||||
|
|
||||||
|
// Hostname the redfish service belongs to
|
||||||
|
Hostname string
|
||||||
|
|
||||||
|
// is metric excluded globally or per client
|
||||||
|
isExcluded map[string](bool)
|
||||||
|
|
||||||
|
doPowerMetric bool
|
||||||
|
doProcessorMetrics bool
|
||||||
|
doThermalMetrics bool
|
||||||
|
|
||||||
|
skipProcessorMetricsURL map[string]bool
|
||||||
|
|
||||||
|
gofish gofish.ClientConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedfishReceiver configuration:
|
||||||
|
type RedfishReceiver struct {
|
||||||
|
receiver
|
||||||
|
|
||||||
|
config struct {
|
||||||
|
fanout int
|
||||||
|
Interval time.Duration
|
||||||
|
HttpTimeout time.Duration
|
||||||
|
|
||||||
|
// Client config for each redfish service
|
||||||
|
ClientConfigs []RedfishReceiverClientConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
done chan bool // channel to finish / stop redfish receiver
|
||||||
|
wg sync.WaitGroup // wait group for redfish receiver
|
||||||
|
}
|
||||||
|
|
||||||
|
// readThermalMetrics reads thermal metrics from a redfish device
|
||||||
|
func (r *RedfishReceiver) readThermalMetrics(
|
||||||
|
clientConfig *RedfishReceiverClientConfig,
|
||||||
|
chassis *redfish.Chassis) error {
|
||||||
|
|
||||||
|
// Get thermal information for each chassis
|
||||||
|
thermal, err := chassis.Thermal()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: chassis.Thermal() failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip empty thermal information
|
||||||
|
if thermal == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := time.Now()
|
||||||
|
|
||||||
|
for _, temperature := range thermal.Temperatures {
|
||||||
|
|
||||||
|
// Skip, when temperature metric is excluded
|
||||||
|
if clientConfig.isExcluded["temperature"] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip all temperatures which are not in enabled state
|
||||||
|
if temperature.Status.State != "" && temperature.Status.State != common.EnabledState {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"hostname": clientConfig.Hostname,
|
||||||
|
"type": "node",
|
||||||
|
// ChassisType shall indicate the physical form factor for the type of chassis
|
||||||
|
"chassis_typ": string(chassis.ChassisType),
|
||||||
|
// Chassis name
|
||||||
|
"chassis_name": chassis.Name,
|
||||||
|
// ID uniquely identifies the resource
|
||||||
|
"temperature_id": temperature.ID,
|
||||||
|
// MemberID shall uniquely identify the member within the collection. For
|
||||||
|
// services supporting Redfish v1.6 or higher, this value shall be the
|
||||||
|
// zero-based array index.
|
||||||
|
"temperature_member_id": temperature.MemberID,
|
||||||
|
// PhysicalContext shall be a description of the affected device or region
|
||||||
|
// within the chassis to which this temperature measurement applies
|
||||||
|
"temperature_physical_context": string(temperature.PhysicalContext),
|
||||||
|
// Name
|
||||||
|
"temperature_name": temperature.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete empty tags
|
||||||
|
for key, value := range tags {
|
||||||
|
if value == "" {
|
||||||
|
delete(tags, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set meta data tags
|
||||||
|
meta := map[string]string{
|
||||||
|
"source": r.name,
|
||||||
|
"group": "Temperature",
|
||||||
|
"unit": "degC",
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadingCelsius shall be the current value of the temperature sensor's reading.
|
||||||
|
value := temperature.ReadingCelsius
|
||||||
|
|
||||||
|
y, err := lp.New("temperature", tags, meta,
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
timestamp)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fan := range thermal.Fans {
|
||||||
|
// Skip, when fan_speed metric is excluded
|
||||||
|
if clientConfig.isExcluded["fan_speed"] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip all fans which are not in enabled state
|
||||||
|
if fan.Status.State != common.EnabledState {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"hostname": clientConfig.Hostname,
|
||||||
|
"type": "node",
|
||||||
|
// ChassisType shall indicate the physical form factor for the type of chassis
|
||||||
|
"chassis_typ": string(chassis.ChassisType),
|
||||||
|
// Chassis name
|
||||||
|
"chassis_name": chassis.Name,
|
||||||
|
// ID uniquely identifies the resource
|
||||||
|
"fan_id": fan.ID,
|
||||||
|
// MemberID shall uniquely identify the member within the collection. For
|
||||||
|
// services supporting Redfish v1.6 or higher, this value shall be the
|
||||||
|
// zero-based array index.
|
||||||
|
"fan_member_id": fan.MemberID,
|
||||||
|
// PhysicalContext shall be a description of the affected device or region
|
||||||
|
// within the chassis to which this fan is associated
|
||||||
|
"fan_physical_context": string(fan.PhysicalContext),
|
||||||
|
// Name
|
||||||
|
"fan_name": fan.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete empty tags
|
||||||
|
for key, value := range tags {
|
||||||
|
if value == "" {
|
||||||
|
delete(tags, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set meta data tags
|
||||||
|
meta := map[string]string{
|
||||||
|
"source": r.name,
|
||||||
|
"group": "FanSpeed",
|
||||||
|
"unit": string(fan.ReadingUnits),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reading shall be the current value of the fan sensor's reading
|
||||||
|
value := fan.Reading
|
||||||
|
|
||||||
|
y, err := lp.New("fan_speed", tags, meta,
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
timestamp)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readPowerMetrics reads power metrics from a redfish device
|
||||||
|
func (r *RedfishReceiver) readPowerMetrics(
|
||||||
|
clientConfig *RedfishReceiverClientConfig,
|
||||||
|
chassis *redfish.Chassis) error {
|
||||||
|
|
||||||
|
// Get power information for each chassis
|
||||||
|
power, err := chassis.Power()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: chassis.Power() failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip empty power information
|
||||||
|
if power == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := time.Now()
|
||||||
|
|
||||||
|
// Read min, max and average consumed watts for each power control
|
||||||
|
for _, pc := range power.PowerControl {
|
||||||
|
|
||||||
|
// Skip all power controls which are not in enabled state
|
||||||
|
if pc.Status.State != "" && pc.Status.State != common.EnabledState {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map of collected metrics
|
||||||
|
metrics := make(map[string]float32)
|
||||||
|
|
||||||
|
// PowerConsumedWatts shall represent the actual power being consumed (in
|
||||||
|
// Watts) by the chassis
|
||||||
|
if !clientConfig.isExcluded["consumed_watts"] {
|
||||||
|
metrics["consumed_watts"] = pc.PowerConsumedWatts
|
||||||
|
}
|
||||||
|
// AverageConsumedWatts shall represent the
|
||||||
|
// average power level that occurred averaged over the last IntervalInMin
|
||||||
|
// minutes.
|
||||||
|
if !clientConfig.isExcluded["average_consumed_watts"] {
|
||||||
|
metrics["average_consumed_watts"] = pc.PowerMetrics.AverageConsumedWatts
|
||||||
|
}
|
||||||
|
// MinConsumedWatts shall represent the
|
||||||
|
// minimum power level in watts that occurred within the last
|
||||||
|
// IntervalInMin minutes.
|
||||||
|
if !clientConfig.isExcluded["min_consumed_watts"] {
|
||||||
|
metrics["min_consumed_watts"] = pc.PowerMetrics.MinConsumedWatts
|
||||||
|
}
|
||||||
|
// MaxConsumedWatts shall represent the
|
||||||
|
// maximum power level in watts that occurred within the last
|
||||||
|
// IntervalInMin minutes
|
||||||
|
if !clientConfig.isExcluded["max_consumed_watts"] {
|
||||||
|
metrics["max_consumed_watts"] = pc.PowerMetrics.MaxConsumedWatts
|
||||||
|
}
|
||||||
|
// IntervalInMin shall represent the time interval (or window), in minutes,
|
||||||
|
// in which the PowerMetrics properties are measured over.
|
||||||
|
// Should be an integer, but some Dell implementations return as a float
|
||||||
|
intervalInMin :=
|
||||||
|
strconv.FormatFloat(
|
||||||
|
float64(pc.PowerMetrics.IntervalInMin), 'f', -1, 32)
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
tags := map[string]string{
|
||||||
|
"hostname": clientConfig.Hostname,
|
||||||
|
"type": "node",
|
||||||
|
// ChassisType shall indicate the physical form factor for the type of chassis
|
||||||
|
"chassis_typ": string(chassis.ChassisType),
|
||||||
|
// Chassis name
|
||||||
|
"chassis_name": chassis.Name,
|
||||||
|
// ID uniquely identifies the resource
|
||||||
|
"power_control_id": pc.ID,
|
||||||
|
// MemberID shall uniquely identify the member within the collection. For
|
||||||
|
// services supporting Redfish v1.6 or higher, this value shall be the
|
||||||
|
// zero-based array index.
|
||||||
|
"power_control_member_id": pc.MemberID,
|
||||||
|
// PhysicalContext shall be a description of the affected device(s) or region
|
||||||
|
// within the chassis to which this power control applies.
|
||||||
|
"power_control_physical_context": string(pc.PhysicalContext),
|
||||||
|
// Name
|
||||||
|
"power_control_name": pc.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete empty tags
|
||||||
|
for key, value := range tags {
|
||||||
|
if value == "" {
|
||||||
|
delete(tags, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set meta data tags
|
||||||
|
meta := map[string]string{
|
||||||
|
"source": r.name,
|
||||||
|
"group": "Energy",
|
||||||
|
"interval_in_minutes": intervalInMin,
|
||||||
|
"unit": "watts",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete empty meta data tags
|
||||||
|
for key, value := range meta {
|
||||||
|
if value == "" {
|
||||||
|
delete(meta, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, value := range metrics {
|
||||||
|
|
||||||
|
y, err := lp.New(name, tags, meta,
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
timestamp)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readProcessorMetrics reads processor metrics from a redfish device
|
||||||
|
// See: https://redfish.dmtf.org/schemas/v1/ProcessorMetrics.json
|
||||||
|
func (r *RedfishReceiver) readProcessorMetrics(
|
||||||
|
clientConfig *RedfishReceiverClientConfig,
|
||||||
|
processor *redfish.Processor) error {
|
||||||
|
|
||||||
|
timestamp := time.Now()
|
||||||
|
|
||||||
|
// URL to processor metrics
|
||||||
|
URL := processor.ODataID + "/ProcessorMetrics"
|
||||||
|
|
||||||
|
// Skip previously detected non existing URLs
|
||||||
|
if clientConfig.skipProcessorMetricsURL[URL] {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := processor.Client.Get(URL)
|
||||||
|
if err != nil {
|
||||||
|
// Skip non existing URLs
|
||||||
|
if statusCode := err.(*common.Error).HTTPReturnedStatusCode; statusCode == http.StatusNotFound {
|
||||||
|
clientConfig.skipProcessorMetricsURL[URL] = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("processor.Client.Get(%v) failed: %+w", URL, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var processorMetrics struct {
|
||||||
|
common.Entity
|
||||||
|
ODataType string `json:"@odata.type"`
|
||||||
|
ODataEtag string `json:"@odata.etag"`
|
||||||
|
Description string `json:"Description"`
|
||||||
|
// This property shall contain the power, in watts, that the processor has consumed.
|
||||||
|
ConsumedPowerWatt float32 `json:"ConsumedPowerWatt"`
|
||||||
|
// This property shall contain the temperature, in Celsius, of the processor.
|
||||||
|
TemperatureCelsius float32 `json:"TemperatureCelsius"`
|
||||||
|
}
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&processorMetrics)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to decode JSON for processor metrics: %+w", err)
|
||||||
|
}
|
||||||
|
processorMetrics.SetClient(processor.Client)
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
tags := map[string]string{
|
||||||
|
"hostname": clientConfig.Hostname,
|
||||||
|
"type": "socket",
|
||||||
|
// ProcessorType shall contain the string which identifies the type of processor contained in this Socket
|
||||||
|
"processor_typ": string(processor.ProcessorType),
|
||||||
|
// Processor name
|
||||||
|
"processor_name": processor.Name,
|
||||||
|
// ID uniquely identifies the resource
|
||||||
|
"processor_id": processor.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete empty tags
|
||||||
|
for key, value := range tags {
|
||||||
|
if value == "" {
|
||||||
|
delete(tags, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set meta data tags
|
||||||
|
metaPower := map[string]string{
|
||||||
|
"source": r.name,
|
||||||
|
"group": "Energy",
|
||||||
|
"unit": "watts",
|
||||||
|
}
|
||||||
|
|
||||||
|
namePower := "consumed_power"
|
||||||
|
|
||||||
|
if !clientConfig.isExcluded[namePower] {
|
||||||
|
y, err := lp.New(namePower, tags, metaPower,
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": processorMetrics.ConsumedPowerWatt,
|
||||||
|
},
|
||||||
|
timestamp)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Set meta data tags
|
||||||
|
metaThermal := map[string]string{
|
||||||
|
"source": r.name,
|
||||||
|
"group": "Temperature",
|
||||||
|
"unit": "degC",
|
||||||
|
}
|
||||||
|
|
||||||
|
nameThermal := "temperature"
|
||||||
|
|
||||||
|
if !clientConfig.isExcluded[nameThermal] {
|
||||||
|
y, err := lp.New(nameThermal, tags, metaThermal,
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": processorMetrics.TemperatureCelsius,
|
||||||
|
},
|
||||||
|
timestamp)
|
||||||
|
if err == nil {
|
||||||
|
r.sink <- y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetrics reads redfish thermal, power and processor metrics from the redfish device
|
||||||
|
// configured in clientConfig
|
||||||
|
func (r *RedfishReceiver) readMetrics(clientConfig *RedfishReceiverClientConfig) error {
|
||||||
|
|
||||||
|
// Connect to redfish service
|
||||||
|
c, err := gofish.Connect(clientConfig.gofish)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"readMetrics: gofish.Connect({Username: %v, Endpoint: %v, BasicAuth: %v, HttpTimeout: %v, HttpInsecure: %v}) failed: %v",
|
||||||
|
clientConfig.gofish.Username,
|
||||||
|
clientConfig.gofish.Endpoint,
|
||||||
|
clientConfig.gofish.BasicAuth,
|
||||||
|
clientConfig.gofish.HTTPClient.Timeout,
|
||||||
|
clientConfig.gofish.HTTPClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
defer c.Logout()
|
||||||
|
|
||||||
|
// Create a session, when required
|
||||||
|
if _, err = c.GetSession(); err != nil {
|
||||||
|
c, err = c.CloneWithSession()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: Failed to create a session: %+w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all chassis managed by this service
|
||||||
|
isChassisListRequired :=
|
||||||
|
clientConfig.doThermalMetrics ||
|
||||||
|
clientConfig.doPowerMetric
|
||||||
|
var chassisList []*redfish.Chassis
|
||||||
|
if isChassisListRequired {
|
||||||
|
chassisList, err = c.Service.Chassis()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: c.Service.Chassis() failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all computer systems managed by this service
|
||||||
|
isComputerSystemListRequired := clientConfig.doProcessorMetrics
|
||||||
|
var computerSystemList []*redfish.ComputerSystem
|
||||||
|
if isComputerSystemListRequired {
|
||||||
|
computerSystemList, err = c.Service.Systems()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: c.Service.Systems() failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read thermal metrics
|
||||||
|
if clientConfig.doThermalMetrics {
|
||||||
|
for _, chassis := range chassisList {
|
||||||
|
err := r.readThermalMetrics(clientConfig, chassis)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read power metrics
|
||||||
|
if clientConfig.doPowerMetric {
|
||||||
|
for _, chassis := range chassisList {
|
||||||
|
err = r.readPowerMetrics(clientConfig, chassis)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read processor metrics
|
||||||
|
if clientConfig.doProcessorMetrics {
|
||||||
|
// loop for all computer systems
|
||||||
|
for _, system := range computerSystemList {
|
||||||
|
|
||||||
|
// loop for all processors
|
||||||
|
processors, err := system.Processors()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readMetrics: system.Processors() failed: %v", err)
|
||||||
|
}
|
||||||
|
for _, processor := range processors {
|
||||||
|
err := r.readProcessorMetrics(clientConfig, processor)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doReadMetrics reads metrics from all configure redfish devices.
|
||||||
|
// To compensate latencies of the Redfish devices a fanout is used.
|
||||||
|
func (r *RedfishReceiver) doReadMetric() {
|
||||||
|
|
||||||
|
// Create wait group and input channel for workers
|
||||||
|
var workerWaitGroup sync.WaitGroup
|
||||||
|
workerInput := make(chan *RedfishReceiverClientConfig, r.config.fanout)
|
||||||
|
|
||||||
|
// Create worker go routines
|
||||||
|
for i := 0; i < r.config.fanout; i++ {
|
||||||
|
// Increment worker wait group counter
|
||||||
|
workerWaitGroup.Add(1)
|
||||||
|
go func() {
|
||||||
|
// Decrement worker wait group counter
|
||||||
|
defer workerWaitGroup.Done()
|
||||||
|
|
||||||
|
// Read power metrics for each client config
|
||||||
|
for clientConfig := range workerInput {
|
||||||
|
err := r.readMetrics(clientConfig)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Distribute client configs to workers
|
||||||
|
for i := range r.config.ClientConfigs {
|
||||||
|
|
||||||
|
// Check done channel status
|
||||||
|
select {
|
||||||
|
case workerInput <- &r.config.ClientConfigs[i]:
|
||||||
|
case <-r.done:
|
||||||
|
// process done event
|
||||||
|
// Stop workers, clear channel and wait for all workers to finish
|
||||||
|
close(workerInput)
|
||||||
|
for range workerInput {
|
||||||
|
}
|
||||||
|
workerWaitGroup.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop workers and wait for all workers to finish
|
||||||
|
close(workerInput)
|
||||||
|
workerWaitGroup.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the redfish receiver
|
||||||
|
func (r *RedfishReceiver) Start() {
|
||||||
|
cclog.ComponentDebug(r.name, "START")
|
||||||
|
|
||||||
|
// Start redfish receiver
|
||||||
|
r.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer r.wg.Done()
|
||||||
|
|
||||||
|
// Create ticker
|
||||||
|
ticker := time.NewTicker(r.config.Interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
r.doReadMetric()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case tickerTime := <-ticker.C:
|
||||||
|
// Check if we missed the ticker event
|
||||||
|
if since := time.Since(tickerTime); since > 5*time.Second {
|
||||||
|
cclog.ComponentInfo(r.name, "Missed ticker event for more then", since)
|
||||||
|
}
|
||||||
|
|
||||||
|
// process ticker event -> continue
|
||||||
|
continue
|
||||||
|
case <-r.done:
|
||||||
|
// process done event
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cclog.ComponentDebug(r.name, "STARTED")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the redfish receiver
|
||||||
|
func (r *RedfishReceiver) Close() {
|
||||||
|
cclog.ComponentDebug(r.name, "CLOSE")
|
||||||
|
|
||||||
|
// Send the signal and wait
|
||||||
|
close(r.done)
|
||||||
|
r.wg.Wait()
|
||||||
|
|
||||||
|
cclog.ComponentDebug(r.name, "DONE")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRedfishReceiver creates a new instance of the redfish receiver
|
||||||
|
// Initialize the receiver by giving it a name and reading in the config JSON
|
||||||
|
func NewRedfishReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||||
|
r := new(RedfishReceiver)
|
||||||
|
|
||||||
|
// Config options from config file
|
||||||
|
configJSON := struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
// Maximum number of simultaneous redfish connections (default: 64)
|
||||||
|
Fanout int `json:"fanout,omitempty"`
|
||||||
|
// How often the redfish power metrics should be read and send to the sink (default: 30 s)
|
||||||
|
IntervalString string `json:"interval,omitempty"`
|
||||||
|
|
||||||
|
// Control whether a client verifies the server's certificate
|
||||||
|
// (default: true == do not verify server's certificate)
|
||||||
|
HttpInsecure bool `json:"http_insecure,omitempty"`
|
||||||
|
// Time limit for requests made by this HTTP client (default: 10 s)
|
||||||
|
HttpTimeoutString string `json:"http_timeout,omitempty"`
|
||||||
|
|
||||||
|
// Default client username, password and endpoint
|
||||||
|
Username *string `json:"username"` // User name to authenticate with
|
||||||
|
Password *string `json:"password"` // Password to use for authentication
|
||||||
|
Endpoint *string `json:"endpoint"` // URL of the redfish service
|
||||||
|
|
||||||
|
// Globally disable collection of power, processor or thermal metrics
|
||||||
|
DisablePowerMetrics bool `json:"disable_power_metrics"`
|
||||||
|
DisableProcessorMetrics bool `json:"disable_processor_metrics"`
|
||||||
|
DisableThermalMetrics bool `json:"disable_thermal_metrics"`
|
||||||
|
|
||||||
|
// Globally excluded metrics
|
||||||
|
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||||
|
|
||||||
|
ClientConfigs []struct {
|
||||||
|
HostList []string `json:"host_list"` // List of hosts with the same client configuration
|
||||||
|
Username *string `json:"username"` // User name to authenticate with
|
||||||
|
Password *string `json:"password"` // Password to use for authentication
|
||||||
|
Endpoint *string `json:"endpoint"` // URL of the redfish service
|
||||||
|
|
||||||
|
// Per client disable collection of power,processor or thermal metrics
|
||||||
|
DisablePowerMetrics bool `json:"disable_power_metrics"`
|
||||||
|
DisableProcessorMetrics bool `json:"disable_processor_metrics"`
|
||||||
|
DisableThermalMetrics bool `json:"disable_thermal_metrics"`
|
||||||
|
|
||||||
|
// Per client excluded metrics
|
||||||
|
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
|
||||||
|
} `json:"client_config"`
|
||||||
|
}{
|
||||||
|
// Set defaults values
|
||||||
|
// Allow overwriting these defaults by reading config JSON
|
||||||
|
Fanout: 64,
|
||||||
|
IntervalString: "30s",
|
||||||
|
HttpTimeoutString: "10s",
|
||||||
|
HttpInsecure: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set name
|
||||||
|
r.name = fmt.Sprintf("RedfishReceiver(%s)", name)
|
||||||
|
|
||||||
|
// Create done channel
|
||||||
|
r.done = make(chan bool)
|
||||||
|
|
||||||
|
// Read the redfish receiver specific JSON config
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &configJSON)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, "Error reading config:", err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert interval string representation to duration
|
||||||
|
var err error
|
||||||
|
r.config.Interval, err = time.ParseDuration(configJSON.IntervalString)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Failed to parse duration string interval='%s': %w",
|
||||||
|
configJSON.IntervalString,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
cclog.Error(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP timeout duration
|
||||||
|
r.config.HttpTimeout, err = time.ParseDuration(configJSON.HttpTimeoutString)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"Failed to parse duration string http_timeout='%s': %w",
|
||||||
|
configJSON.HttpTimeoutString,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
cclog.Error(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new http client
|
||||||
|
customTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
customTransport.TLSClientConfig = &tls.Config{
|
||||||
|
InsecureSkipVerify: configJSON.HttpInsecure,
|
||||||
|
}
|
||||||
|
httpClient := &http.Client{
|
||||||
|
Timeout: r.config.HttpTimeout,
|
||||||
|
Transport: customTransport,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize client configurations
|
||||||
|
r.config.ClientConfigs = make([]RedfishReceiverClientConfig, 0)
|
||||||
|
|
||||||
|
// Create client config from JSON config
|
||||||
|
for i := range configJSON.ClientConfigs {
|
||||||
|
|
||||||
|
clientConfigJSON := &configJSON.ClientConfigs[i]
|
||||||
|
|
||||||
|
var endpoint_pattern string
|
||||||
|
if clientConfigJSON.Endpoint != nil {
|
||||||
|
endpoint_pattern = *clientConfigJSON.Endpoint
|
||||||
|
} else if configJSON.Endpoint != nil {
|
||||||
|
endpoint_pattern = *configJSON.Endpoint
|
||||||
|
} else {
|
||||||
|
err := fmt.Errorf("client config number %v requires endpoint", i)
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var username string
|
||||||
|
if clientConfigJSON.Username != nil {
|
||||||
|
username = *clientConfigJSON.Username
|
||||||
|
} else if configJSON.Username != nil {
|
||||||
|
username = *configJSON.Username
|
||||||
|
} else {
|
||||||
|
err := fmt.Errorf("client config number %v requires username", i)
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var password string
|
||||||
|
if clientConfigJSON.Password != nil {
|
||||||
|
password = *clientConfigJSON.Password
|
||||||
|
} else if configJSON.Password != nil {
|
||||||
|
password = *configJSON.Password
|
||||||
|
} else {
|
||||||
|
err := fmt.Errorf("client config number %v requires password", i)
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Which metrics should be collected
|
||||||
|
doPowerMetric :=
|
||||||
|
!(configJSON.DisablePowerMetrics ||
|
||||||
|
clientConfigJSON.DisablePowerMetrics)
|
||||||
|
doProcessorMetrics :=
|
||||||
|
!(configJSON.DisableProcessorMetrics ||
|
||||||
|
clientConfigJSON.DisableProcessorMetrics)
|
||||||
|
doThermalMetrics :=
|
||||||
|
!(configJSON.DisableThermalMetrics ||
|
||||||
|
clientConfigJSON.DisableThermalMetrics)
|
||||||
|
|
||||||
|
// Is metrics excluded globally or per client
|
||||||
|
isExcluded := make(map[string]bool)
|
||||||
|
for _, key := range clientConfigJSON.ExcludeMetrics {
|
||||||
|
isExcluded[key] = true
|
||||||
|
}
|
||||||
|
for _, key := range configJSON.ExcludeMetrics {
|
||||||
|
isExcluded[key] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, host := range clientConfigJSON.HostList {
|
||||||
|
|
||||||
|
// Endpoint of the redfish service
|
||||||
|
endpoint := strings.Replace(endpoint_pattern, "%h", host, -1)
|
||||||
|
|
||||||
|
r.config.ClientConfigs = append(
|
||||||
|
r.config.ClientConfigs,
|
||||||
|
RedfishReceiverClientConfig{
|
||||||
|
Hostname: host,
|
||||||
|
isExcluded: isExcluded,
|
||||||
|
doPowerMetric: doPowerMetric,
|
||||||
|
doProcessorMetrics: doProcessorMetrics,
|
||||||
|
doThermalMetrics: doThermalMetrics,
|
||||||
|
skipProcessorMetricsURL: make(map[string]bool),
|
||||||
|
gofish: gofish.ClientConfig{
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
Endpoint: endpoint,
|
||||||
|
HTTPClient: httpClient,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute parallel fanout to use
|
||||||
|
numClients := len(r.config.ClientConfigs)
|
||||||
|
r.config.fanout = configJSON.Fanout
|
||||||
|
if numClients < r.config.fanout {
|
||||||
|
r.config.fanout = numClients
|
||||||
|
}
|
||||||
|
|
||||||
|
if numClients == 0 {
|
||||||
|
err := fmt.Errorf("at least one client config is required")
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for duplicate client configurations
|
||||||
|
isDuplicate := make(map[string]bool)
|
||||||
|
for i := range r.config.ClientConfigs {
|
||||||
|
host := r.config.ClientConfigs[i].Hostname
|
||||||
|
if isDuplicate[host] {
|
||||||
|
err := fmt.Errorf("Found duplicate client config for host %s", host)
|
||||||
|
cclog.ComponentError(r.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
isDuplicate[host] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give some basic info about redfish receiver status
|
||||||
|
cclog.ComponentInfo(r.name, "Monitoring", numClients, "clients")
|
||||||
|
cclog.ComponentInfo(r.name, "Monitoring interval:", r.config.Interval)
|
||||||
|
cclog.ComponentInfo(r.name, "Monitoring parallel fanout:", r.config.fanout)
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
54
receivers/redfishReceiver.md
Normal file
54
receivers/redfishReceiver.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
## Redfish receiver
|
||||||
|
|
||||||
|
The Redfish receiver uses the [Redfish (specification)](https://www.dmtf.org/standards/redfish) to query thermal and power metrics. Thermal metrics may include various fan speeds and temperatures. Power metrics may include the current power consumption of various hardware components. It may also include the minimum, maximum and average power consumption of these components in a given time interval. The receiver will poll each configured redfish device once in a given interval. Multiple devices can be accessed in parallel to increase throughput.
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<redfish receiver name>": {
|
||||||
|
"type": "redfish",
|
||||||
|
"username": "<user A>",
|
||||||
|
"password": "<password A>",
|
||||||
|
"endpoint": "https://%h-bmc",
|
||||||
|
"exclude_metrics": [ "min_consumed_watts" ],
|
||||||
|
"client_config": [
|
||||||
|
{
|
||||||
|
"host_list": [ "<host 1>", "<host 2>" ]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"host_list": [ "<host 3>", "<host 4>" ]
|
||||||
|
"disable_power_metrics": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"host_list": [ "<host 5>" ],
|
||||||
|
"username": "<user B>",
|
||||||
|
"password": "<password B>",
|
||||||
|
"endpoint": "https://%h-BMC",
|
||||||
|
"disable_thermal_metrics": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Global settings:
|
||||||
|
|
||||||
|
- `fanout`: Maximum number of simultaneous redfish connections (default: 64)
|
||||||
|
- `interval`: How often the redfish power metrics should be read and send to the sink (default: 30 s)
|
||||||
|
- `http_insecure`: Control whether a client verifies the server's certificate (default: true == do not verify server's certificate)
|
||||||
|
- `http_timeout`: Time limit for requests made by this HTTP client (default: 10 s)
|
||||||
|
|
||||||
|
Global and per redfish device settings (per redfish device settings overwrite the global settings):
|
||||||
|
|
||||||
|
- `disable_power_metrics`: disable collection of power metrics
|
||||||
|
- `disable_processor_metrics`: disable collection of processor metrics
|
||||||
|
- `disable_thermal_metrics`: disable collection of thermal metrics
|
||||||
|
- `exclude_metrics`: list of excluded metrics
|
||||||
|
- `username`: User name to authenticate with
|
||||||
|
- `password`: Password to use for authentication
|
||||||
|
- `endpoint`: URL of the redfish service (placeholder `%h` gets replaced by the hostname)
|
||||||
|
|
||||||
|
Per redfish device settings:
|
||||||
|
|
||||||
|
- `host_list`: List of hosts with the same client configuration
|
||||||
101
receivers/sampleReceiver.go
Normal file
101
receivers/sampleReceiver.go
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package receivers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SampleReceiver configuration: receiver type, listen address, port
|
||||||
|
type SampleReceiverConfig struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Addr string `json:"address"`
|
||||||
|
Port string `json:"port"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SampleReceiver struct {
|
||||||
|
receiver
|
||||||
|
config SampleReceiverConfig
|
||||||
|
|
||||||
|
// Storage for static information
|
||||||
|
meta map[string]string
|
||||||
|
// Use in case of own go routine
|
||||||
|
// done chan bool
|
||||||
|
// wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement functions required for Receiver interface
|
||||||
|
// Start(), Close()
|
||||||
|
// See: metricReceiver.go
|
||||||
|
|
||||||
|
func (r *SampleReceiver) Start() {
|
||||||
|
cclog.ComponentDebug(r.name, "START")
|
||||||
|
|
||||||
|
// Start server process like http.ListenAndServe()
|
||||||
|
|
||||||
|
// or use own go routine but always make sure it exits
|
||||||
|
// as soon as it gets the signal of the r.done channel
|
||||||
|
//
|
||||||
|
// r.done = make(chan bool)
|
||||||
|
// r.wg.Add(1)
|
||||||
|
// go func() {
|
||||||
|
// defer r.wg.Done()
|
||||||
|
//
|
||||||
|
// // Create ticker
|
||||||
|
// ticker := time.NewTicker(30 * time.Second)
|
||||||
|
// defer ticker.Stop()
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// readMetric()
|
||||||
|
// select {
|
||||||
|
// case <-ticker.C:
|
||||||
|
// // process ticker event -> continue
|
||||||
|
// continue
|
||||||
|
// case <-r.done:
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close receiver: close network connection, close files, close libraries, ...
|
||||||
|
func (r *SampleReceiver) Close() {
|
||||||
|
cclog.ComponentDebug(r.name, "CLOSE")
|
||||||
|
|
||||||
|
// Close server like http.Shutdown()
|
||||||
|
|
||||||
|
// in case of own go routine, send the signal and wait
|
||||||
|
// r.done <- true
|
||||||
|
// r.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// New function to create a new instance of the receiver
|
||||||
|
// Initialize the receiver by giving it a name and reading in the config JSON
|
||||||
|
func NewSampleReceiver(name string, config json.RawMessage) (Receiver, error) {
|
||||||
|
r := new(SampleReceiver)
|
||||||
|
|
||||||
|
// Set name of SampleReceiver
|
||||||
|
// The name should be chosen in such a way that different instances of SampleReceiver can be distinguished
|
||||||
|
r.name = fmt.Sprintf("SampleReceiver(%s)", name)
|
||||||
|
|
||||||
|
// Set static information
|
||||||
|
r.meta = map[string]string{"source": r.name}
|
||||||
|
|
||||||
|
// Set defaults in r.config
|
||||||
|
// Allow overwriting these defaults by reading config JSON
|
||||||
|
|
||||||
|
// Read the sample receiver specific JSON config
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &r.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(r.name, "Error reading config:", err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all required fields in the configuration are set
|
||||||
|
// Use 'if len(r.config.Option) > 0' for strings
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
13
router.json
13
router.json
@@ -1,10 +1,9 @@
|
|||||||
{
|
{
|
||||||
"process_messages" : {
|
"add_tags" : [
|
||||||
"add_tag_if": [
|
|
||||||
{
|
{
|
||||||
"key" : "cluster",
|
"key" : "cluster",
|
||||||
"value" : "testcluster",
|
"value" : "testcluster",
|
||||||
"if" : "true"
|
"if" : "*"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"key" : "test",
|
"key" : "test",
|
||||||
@@ -12,12 +11,12 @@
|
|||||||
"if" : "name == 'temp_package_id_0'"
|
"if" : "name == 'temp_package_id_0'"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"delete_tag_if": [
|
"delete_tags" : [
|
||||||
{
|
{
|
||||||
"key" : "unit",
|
"key" : "unit",
|
||||||
"if" : "true"
|
"value" : "*",
|
||||||
|
"if" : "*"
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
},
|
|
||||||
"interval_timestamp" : true
|
"interval_timestamp" : true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ CC_USER=clustercockpit
|
|||||||
CC_GROUP=clustercockpit
|
CC_GROUP=clustercockpit
|
||||||
CONF_DIR=/etc/cc-metric-collector
|
CONF_DIR=/etc/cc-metric-collector
|
||||||
PID_FILE=/var/run/$NAME.pid
|
PID_FILE=/var/run/$NAME.pid
|
||||||
DAEMON=/usr/bin/$NAME
|
DAEMON=/usr/sbin/$NAME
|
||||||
CONF_FILE=${CONF_DIR}/cc-metric-collector.json
|
CONF_FILE=${CONF_DIR}/cc-metric-collector.json
|
||||||
|
|
||||||
umask 0027
|
umask 0027
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ Description=ClusterCockpit metric collector
|
|||||||
Documentation=https://github.com/ClusterCockpit/cc-metric-collector
|
Documentation=https://github.com/ClusterCockpit/cc-metric-collector
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
|
After=postgresql.service mariadb.service mysql.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=/etc/default/cc-metric-collector
|
EnvironmentFile=/etc/default/cc-metric-collector
|
||||||
@@ -13,7 +14,7 @@ Restart=on-failure
|
|||||||
WorkingDirectory=/tmp
|
WorkingDirectory=/tmp
|
||||||
RuntimeDirectory=cc-metric-collector
|
RuntimeDirectory=cc-metric-collector
|
||||||
RuntimeDirectoryMode=0750
|
RuntimeDirectoryMode=0750
|
||||||
ExecStart=/usr/bin/cc-metric-collector --config=${CONF_FILE}
|
ExecStart=/usr/sbin/cc-metric-collector --config=${CONF_FILE}
|
||||||
LimitNOFILE=10000
|
LimitNOFILE=10000
|
||||||
TimeoutStopSec=20
|
TimeoutStopSec=20
|
||||||
UMask=0027
|
UMask=0027
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ BuildRequires: go-toolset
|
|||||||
BuildRequires: systemd-rpm-macros
|
BuildRequires: systemd-rpm-macros
|
||||||
# for header downloads
|
# for header downloads
|
||||||
BuildRequires: wget
|
BuildRequires: wget
|
||||||
# Recommended when using the sysusers_create_package macro
|
|
||||||
Requires(pre): /usr/bin/systemd-sysusers
|
|
||||||
|
|
||||||
Provides: %{name} = %{version}
|
Provides: %{name} = %{version}
|
||||||
|
|
||||||
@@ -29,7 +27,7 @@ make
|
|||||||
|
|
||||||
|
|
||||||
%install
|
%install
|
||||||
install -Dpm 0750 %{name} %{buildroot}%{_bindir}/%{name}
|
install -Dpm 0750 %{name} %{buildroot}%{_sbindir}/%{name}
|
||||||
install -Dpm 0600 config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json
|
install -Dpm 0600 config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json
|
||||||
install -Dpm 0600 collectors.json %{buildroot}%{_sysconfdir}/%{name}/collectors.json
|
install -Dpm 0600 collectors.json %{buildroot}%{_sysconfdir}/%{name}/collectors.json
|
||||||
install -Dpm 0600 sinks.json %{buildroot}%{_sysconfdir}/%{name}/sinks.json
|
install -Dpm 0600 sinks.json %{buildroot}%{_sysconfdir}/%{name}/sinks.json
|
||||||
@@ -44,7 +42,7 @@ install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.c
|
|||||||
# go test should be here... :)
|
# go test should be here... :)
|
||||||
|
|
||||||
%pre
|
%pre
|
||||||
%sysusers_create_package %{name} scripts/%{name}.sysusers
|
%sysusers_create_package scripts/%{name}.sysusers
|
||||||
|
|
||||||
%post
|
%post
|
||||||
%systemd_post %{name}.service
|
%systemd_post %{name}.service
|
||||||
@@ -54,7 +52,7 @@ install -Dpm 0644 scripts/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.c
|
|||||||
|
|
||||||
%files
|
%files
|
||||||
# Binary
|
# Binary
|
||||||
%attr(-,clustercockpit,clustercockpit) %{_bindir}/%{name}
|
%attr(-,clustercockpit,clustercockpit) %{_sbindir}/%{name}
|
||||||
# Config
|
# Config
|
||||||
%dir %{_sysconfdir}/%{name}
|
%dir %{_sysconfdir}/%{name}
|
||||||
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
|
%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json
|
||||||
|
|||||||
@@ -1,175 +0,0 @@
|
|||||||
#!/bin/bash -l
|
|
||||||
|
|
||||||
SRCDIR="$(pwd)"
|
|
||||||
DESTDIR="$1"
|
|
||||||
|
|
||||||
if [ -z "$DESTDIR" ]; then
|
|
||||||
echo "Destination folder not provided"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
COLLECTORS=$(find "${SRCDIR}/collectors" -name "*Metric.md")
|
|
||||||
SINKS=$(find "${SRCDIR}/sinks" -name "*Sink.md")
|
|
||||||
RECEIVERS=$(find "${SRCDIR}/receivers" -name "*Receiver.md")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Collectors
|
|
||||||
mkdir -p "${DESTDIR}/collectors"
|
|
||||||
for F in $COLLECTORS; do
|
|
||||||
echo "$F"
|
|
||||||
FNAME=$(basename "$F")
|
|
||||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
|
||||||
echo "'${TITLE//\`/}'"
|
|
||||||
if [ "${TITLE}" == "" ]; then continue; fi
|
|
||||||
rm --force "${DESTDIR}/collectors/${FNAME}"
|
|
||||||
cat << EOF >> "${DESTDIR}/collectors/${FNAME}"
|
|
||||||
---
|
|
||||||
title: ${TITLE//\`/}
|
|
||||||
description: >
|
|
||||||
Toplevel ${FNAME/.md/}
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Collector, ${FNAME/Metric.md/}]
|
|
||||||
weight: 2
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "$F" >> "${DESTDIR}/collectors/${FNAME}"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -e "${SRCDIR}/collectors/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/collectors/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector's collectors
|
|
||||||
description: Documentation of cc-metric-collector's collectors
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Collector, General]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/collectors/README.md" >> "${DESTDIR}/collectors/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Sinks
|
|
||||||
mkdir -p "${DESTDIR}/sinks"
|
|
||||||
for F in $SINKS; do
|
|
||||||
echo "$F"
|
|
||||||
FNAME=$(basename "$F")
|
|
||||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
|
||||||
echo "'${TITLE//\`/}'"
|
|
||||||
if [ "${TITLE}" == "" ]; then continue; fi
|
|
||||||
rm --force "${DESTDIR}/sinks/${FNAME}"
|
|
||||||
cat << EOF >> "${DESTDIR}/sinks/${FNAME}"
|
|
||||||
---
|
|
||||||
title: ${TITLE//\`/}
|
|
||||||
description: >
|
|
||||||
Toplevel ${FNAME/.md/}
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Sink, ${FNAME/Sink.md/}]
|
|
||||||
weight: 2
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "$F" >> "${DESTDIR}/sinks/${FNAME}"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -e "${SRCDIR}/collectors/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/sinks/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector's sinks
|
|
||||||
description: Documentation of cc-metric-collector's sinks
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Sink, General]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/sinks/README.md" >> "${DESTDIR}/sinks/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Receivers
|
|
||||||
mkdir -p "${DESTDIR}/receivers"
|
|
||||||
for F in $RECEIVERS; do
|
|
||||||
echo "$F"
|
|
||||||
FNAME=$(basename "$F")
|
|
||||||
TITLE=$(grep -E "^##" "$F" | head -n 1 | sed -e 's+## ++g')
|
|
||||||
echo "'${TITLE//\`/}'"
|
|
||||||
if [ "${TITLE}" == "" ]; then continue; fi
|
|
||||||
rm --force "${DESTDIR}/receivers/${FNAME}"
|
|
||||||
cat << EOF >> "${DESTDIR}/receivers/${FNAME}"
|
|
||||||
---
|
|
||||||
title: ${TITLE//\`/}
|
|
||||||
description: >
|
|
||||||
Toplevel ${FNAME/.md/}
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Receiver, ${FNAME/Receiver.md/}]
|
|
||||||
weight: 2
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "$F" >> "${DESTDIR}/receivers/${FNAME}"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -e "${SRCDIR}/receivers/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/receivers/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector's receivers
|
|
||||||
description: Documentation of cc-metric-collector's receivers
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Receiver, General]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/receivers/README.md" >> "${DESTDIR}/receivers/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p "${DESTDIR}/internal/metricRouter"
|
|
||||||
if [ -e "${SRCDIR}/internal/metricRouter/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/internal/metricRouter/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector's router
|
|
||||||
description: Documentation of cc-metric-collector's router
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Router, General]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/internal/metricRouter/README.md" >> "${DESTDIR}/internal/metricRouter/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -e "${SRCDIR}/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector
|
|
||||||
description: Documentation of cc-metric-collector
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, General]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/README.md" >> "${DESTDIR}/_index.md"
|
|
||||||
sed -i -e 's+README.md+_index.md+g' "${DESTDIR}/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
mkdir -p "${DESTDIR}/pkg/messageProcessor"
|
|
||||||
if [ -e "${SRCDIR}/pkg/messageProcessor/README.md" ]; then
|
|
||||||
cat << EOF > "${DESTDIR}/pkg/messageProcessor/_index.md"
|
|
||||||
---
|
|
||||||
title: cc-metric-collector's message processor
|
|
||||||
description: Documentation of cc-metric-collector's message processor
|
|
||||||
categories: [cc-metric-collector]
|
|
||||||
tags: [cc-metric-collector, Message Processor]
|
|
||||||
weight: 40
|
|
||||||
---
|
|
||||||
|
|
||||||
EOF
|
|
||||||
cat "${SRCDIR}/pkg/messageProcessor/README.md" >> "${DESTDIR}/pkg/messageProcessor/_index.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ def group_to_json(groupfile):
|
|||||||
if "PWR" in calc:
|
if "PWR" in calc:
|
||||||
scope = "socket"
|
scope = "socket"
|
||||||
|
|
||||||
m = {"name" : metric, "calc": calc, "type" : scope, "publish" : True}
|
m = {"name" : metric, "calc": calc, "scope" : scope, "publish" : True}
|
||||||
metrics.append(m)
|
metrics.append(m)
|
||||||
return {"events" : events, "metrics" : metrics}
|
return {"events" : events, "metrics" : metrics}
|
||||||
|
|
||||||
|
|||||||
106
sinks/README.md
Normal file
106
sinks/README.md
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# CCMetric sinks
|
||||||
|
|
||||||
|
This folder contains the SinkManager and sink implementations for the cc-metric-collector.
|
||||||
|
|
||||||
|
# Available sinks:
|
||||||
|
- [`stdout`](./stdoutSink.md): Print all metrics to `stdout`, `stderr` or a file
|
||||||
|
- [`http`](./httpSink.md): Send metrics to an HTTP server as POST requests
|
||||||
|
- [`influxdb`](./influxSink.md): Send metrics to an [InfluxDB](https://www.influxdata.com/products/influxdb/) database
|
||||||
|
- [`influxasync`](./influxAsyncSink.md): Send metrics to an [InfluxDB](https://www.influxdata.com/products/influxdb/) database with non-blocking write API
|
||||||
|
- [`nats`](./natsSink.md): Publish metrics to the [NATS](https://nats.io/) network overlay system
|
||||||
|
- [`ganglia`](./gangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) using the `gmetric` CLI tool
|
||||||
|
- [`libganglia`](./libgangliaSink.md): Publish metrics in the [Ganglia Monitoring System](http://ganglia.info/) directly using `libganglia.so`
|
||||||
|
- [`prometeus`](./prometheusSink.md): Publish metrics for the [Prometheus Monitoring System](https://prometheus.io/)
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
The configuration file for the sinks is a list of configurations. The `type` field in each specifies which sink to initialize.
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"mystdout" : {
|
||||||
|
"type" : "stdout",
|
||||||
|
"meta_as_tags" : false
|
||||||
|
},
|
||||||
|
"metricstore" : {
|
||||||
|
"type" : "http",
|
||||||
|
"host" : "localhost",
|
||||||
|
"port" : "4123",
|
||||||
|
"database" : "ccmetric",
|
||||||
|
"password" : "<jwt token>"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Contributing own sinks
|
||||||
|
A sink contains five functions and is derived from the type `sink`:
|
||||||
|
* `Init(name string, config json.RawMessage) error`
|
||||||
|
* `Write(point CCMetric) error`
|
||||||
|
* `Flush() error`
|
||||||
|
* `Close()`
|
||||||
|
* `New<Typename>(name string, config json.RawMessage) (Sink, error)` (calls the `Init()` function)
|
||||||
|
|
||||||
|
The data structures should be set up in `Init()` like opening a file or server connection. The `Write()` function writes/sends the data. For non-blocking sinks, the `Flush()` method tells the sink to drain its internal buffers. The `Close()` function should tear down anything created in `Init()`.
|
||||||
|
|
||||||
|
Finally, the sink needs to be registered in the `sinkManager.go`. There is a list of sinks called `AvailableSinks` which is a map (`sink_type_string` -> `pointer to sink interface`). Add a new entry with a descriptive name and the new sink.
|
||||||
|
|
||||||
|
## Sample sink
|
||||||
|
|
||||||
|
```go
|
||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SampleSinkConfig struct {
|
||||||
|
defaultSinkConfig // defines JSON tags for 'name' and 'meta_as_tags'
|
||||||
|
}
|
||||||
|
|
||||||
|
type SampleSink struct {
|
||||||
|
sink // declarate 'name' and 'meta_as_tags'
|
||||||
|
config StdoutSinkConfig // entry point to the SampleSinkConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the sink by giving it a name and reading in the config JSON
|
||||||
|
func (s *SampleSink) Init(name string, config json.RawMessage) error {
|
||||||
|
s.name = fmt.Sprintf("SampleSink(%s)", name) // Always specify a name here
|
||||||
|
// Read in the config JSON
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &s.config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code to submit a single CCMetric to the sink
|
||||||
|
func (s *SampleSink) Write(point lp.CCMetric) error {
|
||||||
|
log.Print(point)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the sink uses batched sends internally, you can tell to flush its buffers
|
||||||
|
func (s *SampleSink) Flush() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Close sink: close network connection, close files, close libraries, ...
|
||||||
|
func (s *SampleSink) Close() {}
|
||||||
|
|
||||||
|
|
||||||
|
// New function to create a new instance of the sink
|
||||||
|
func NewSampleSink(name string, config json.RawMessage) (Sink, error) {
|
||||||
|
s := new(SampleSink)
|
||||||
|
err := s.Init(name, config)
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
269
sinks/gangliaCommon.go
Normal file
269
sinks/gangliaCommon.go
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GangliaMetricName(point lp.CCMetric) string {
|
||||||
|
name := point.Name()
|
||||||
|
metricType, typeOK := point.GetTag("type")
|
||||||
|
metricTid, tidOk := point.GetTag("type-id")
|
||||||
|
gangliaType := metricType + metricTid
|
||||||
|
if strings.Contains(name, metricType) && tidOk {
|
||||||
|
name = strings.Replace(name, metricType, gangliaType, -1)
|
||||||
|
} else if typeOK && tidOk {
|
||||||
|
name = metricType + metricTid + "_" + name
|
||||||
|
} else if point.HasTag("device") {
|
||||||
|
device, _ := point.GetTag("device")
|
||||||
|
name = name + "_" + device
|
||||||
|
}
|
||||||
|
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func GangliaMetricRename(name string) string {
|
||||||
|
if name == "net_bytes_in" {
|
||||||
|
return "bytes_in"
|
||||||
|
} else if name == "net_bytes_out" {
|
||||||
|
return "bytes_out"
|
||||||
|
} else if name == "net_pkts_in" {
|
||||||
|
return "pkts_in"
|
||||||
|
} else if name == "net_pkts_out" {
|
||||||
|
return "pkts_out"
|
||||||
|
} else if name == "cpu_iowait" {
|
||||||
|
return "cpu_wio"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func GangliaSlopeType(point lp.CCMetric) uint {
|
||||||
|
name := point.Name()
|
||||||
|
if name == "mem_total" || name == "swap_total" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_GANGLIA_METRIC_TMAX = 300
|
||||||
|
const DEFAULT_GANGLIA_METRIC_SLOPE = "both"
|
||||||
|
|
||||||
|
type GangliaMetric struct {
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Slope string
|
||||||
|
Tmax int
|
||||||
|
Unit string
|
||||||
|
}
|
||||||
|
|
||||||
|
type GangliaMetricGroup struct {
|
||||||
|
Name string
|
||||||
|
Metrics []GangliaMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
var CommonGangliaMetrics = []GangliaMetricGroup{
|
||||||
|
{
|
||||||
|
Name: "memory",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"mem_total", "float", "zero", 1200, "KB"},
|
||||||
|
{"swap_total", "float", "zero", 1200, "KB"},
|
||||||
|
{"mem_free", "float", "both", 180, "KB"},
|
||||||
|
{"mem_shared", "float", "both", 180, "KB"},
|
||||||
|
{"mem_buffers", "float", "both", 180, "KB"},
|
||||||
|
{"mem_cached", "float", "both", 180, "KB"},
|
||||||
|
{"swap_free", "float", "both", 180, "KB"},
|
||||||
|
{"mem_sreclaimable", "float", "both", 180, "KB"},
|
||||||
|
{"mem_slab", "float", "both", 180, "KB"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "cpu",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"cpu_num", "uint32", "zero", 1200, "CPUs"},
|
||||||
|
{"cpu_speed", "uint32", "zero", 1200, "MHz"},
|
||||||
|
{"cpu_user", "float", "both", 90, "%"},
|
||||||
|
{"cpu_nice", "float", "both", 90, "%"},
|
||||||
|
{"cpu_system", "float", "both", 90, "%"},
|
||||||
|
{"cpu_idle", "float", "both", 3800, "%"},
|
||||||
|
{"cpu_aidle", "float", "both", 90, "%"},
|
||||||
|
{"cpu_wio", "float", "both", 90, "%"},
|
||||||
|
{"cpu_intr", "float", "both", 90, "%"},
|
||||||
|
{"cpu_sintr", "float", "both", 90, "%"},
|
||||||
|
{"cpu_steal", "float", "both", 90, "%"},
|
||||||
|
{"cpu_guest", "float", "both", 90, "%"},
|
||||||
|
{"cpu_gnice", "float", "both", 90, "%"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "load",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"load_one", "float", "both", 70, ""},
|
||||||
|
{"load_five", "float", "both", 325, ""},
|
||||||
|
{"load_fifteen", "float", "both", 950, ""},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "disk",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"disk_total", "double", "both", 1200, "GB"},
|
||||||
|
{"disk_free", "double", "both", 180, "GB"},
|
||||||
|
{"part_max_used", "float", "both", 180, "%"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "network",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"bytes_out", "float", "both", 300, "bytes/sec"},
|
||||||
|
{"bytes_in", "float", "both", 300, "bytes/sec"},
|
||||||
|
{"pkts_in", "float", "both", 300, "packets/sec"},
|
||||||
|
{"pkts_out", "float", "both", 300, "packets/sec"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "process",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"proc_run", "uint32", "both", 950, ""},
|
||||||
|
{"proc_total", "uint32", "both", 950, ""},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "system",
|
||||||
|
Metrics: []GangliaMetric{
|
||||||
|
{"boottime", "uint32", "zero", 1200, "s"},
|
||||||
|
{"sys_clock", "uint32", "zero", 1200, "s"},
|
||||||
|
{"machine_type", "string", "zero", 1200, ""},
|
||||||
|
{"os_name", "string", "zero", 1200, ""},
|
||||||
|
{"os_release", "string", "zero", 1200, ""},
|
||||||
|
{"mtu", "uint32", "both", 1200, ""},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type GangliaMetricConfig struct {
|
||||||
|
Type string
|
||||||
|
Slope string
|
||||||
|
Tmax int
|
||||||
|
Unit string
|
||||||
|
Group string
|
||||||
|
Value string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetCommonGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
|
||||||
|
mname := GangliaMetricRename(point.Name())
|
||||||
|
if oldname, ok := point.GetMeta("oldname"); ok {
|
||||||
|
mname = GangliaMetricRename(oldname)
|
||||||
|
}
|
||||||
|
for _, group := range CommonGangliaMetrics {
|
||||||
|
for _, metric := range group.Metrics {
|
||||||
|
if metric.Name == mname {
|
||||||
|
valueStr := ""
|
||||||
|
value, ok := point.GetField("value")
|
||||||
|
if ok {
|
||||||
|
switch real := value.(type) {
|
||||||
|
case float64:
|
||||||
|
valueStr = fmt.Sprintf("%f", real)
|
||||||
|
case float32:
|
||||||
|
valueStr = fmt.Sprintf("%f", real)
|
||||||
|
case int64:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case int32:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case int:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case uint64:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case uint32:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case uint:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
case string:
|
||||||
|
valueStr = real
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return GangliaMetricConfig{
|
||||||
|
Group: group.Name,
|
||||||
|
Type: metric.Type,
|
||||||
|
Slope: metric.Slope,
|
||||||
|
Tmax: metric.Tmax,
|
||||||
|
Unit: metric.Unit,
|
||||||
|
Value: valueStr,
|
||||||
|
Name: GangliaMetricRename(mname),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return GangliaMetricConfig{
|
||||||
|
Group: "",
|
||||||
|
Type: "",
|
||||||
|
Slope: "",
|
||||||
|
Tmax: 0,
|
||||||
|
Unit: "",
|
||||||
|
Value: "",
|
||||||
|
Name: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGangliaConfig(point lp.CCMetric) GangliaMetricConfig {
|
||||||
|
mname := GangliaMetricRename(point.Name())
|
||||||
|
if oldname, ok := point.GetMeta("oldname"); ok {
|
||||||
|
mname = GangliaMetricRename(oldname)
|
||||||
|
}
|
||||||
|
group := ""
|
||||||
|
if g, ok := point.GetMeta("group"); ok {
|
||||||
|
group = g
|
||||||
|
}
|
||||||
|
unit := ""
|
||||||
|
if u, ok := point.GetMeta("unit"); ok {
|
||||||
|
unit = u
|
||||||
|
}
|
||||||
|
valueType := "double"
|
||||||
|
valueStr := ""
|
||||||
|
value, ok := point.GetField("value")
|
||||||
|
if ok {
|
||||||
|
switch real := value.(type) {
|
||||||
|
case float64:
|
||||||
|
valueStr = fmt.Sprintf("%f", real)
|
||||||
|
valueType = "double"
|
||||||
|
case float32:
|
||||||
|
valueStr = fmt.Sprintf("%f", real)
|
||||||
|
valueType = "float"
|
||||||
|
case int64:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "int32"
|
||||||
|
case int32:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "int32"
|
||||||
|
case int:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "int32"
|
||||||
|
case uint64:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "uint32"
|
||||||
|
case uint32:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "uint32"
|
||||||
|
case uint:
|
||||||
|
valueStr = fmt.Sprintf("%d", real)
|
||||||
|
valueType = "uint32"
|
||||||
|
case string:
|
||||||
|
valueStr = real
|
||||||
|
valueType = "string"
|
||||||
|
default:
|
||||||
|
valueType = "invalid"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return GangliaMetricConfig{
|
||||||
|
Group: group,
|
||||||
|
Type: valueType,
|
||||||
|
Slope: DEFAULT_GANGLIA_METRIC_SLOPE,
|
||||||
|
Tmax: DEFAULT_GANGLIA_METRIC_TMAX,
|
||||||
|
Unit: unit,
|
||||||
|
Value: valueStr,
|
||||||
|
Name: GangliaMetricRename(mname),
|
||||||
|
}
|
||||||
|
}
|
||||||
124
sinks/gangliaSink.go
Normal file
124
sinks/gangliaSink.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
// "time"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GMETRIC_EXEC = `gmetric`
|
||||||
|
const GMETRIC_CONFIG = `/etc/ganglia/gmond.conf`
|
||||||
|
|
||||||
|
type GangliaSinkConfig struct {
|
||||||
|
defaultSinkConfig
|
||||||
|
GmetricPath string `json:"gmetric_path,omitempty"`
|
||||||
|
GmetricConfig string `json:"gmetric_config,omitempty"`
|
||||||
|
AddGangliaGroup bool `json:"add_ganglia_group,omitempty"`
|
||||||
|
AddTagsAsDesc bool `json:"add_tags_as_desc,omitempty"`
|
||||||
|
ClusterName string `json:"cluster_name,omitempty"`
|
||||||
|
AddTypeToName bool `json:"add_type_to_name,omitempty"`
|
||||||
|
AddUnits bool `json:"add_units,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GangliaSink struct {
|
||||||
|
sink
|
||||||
|
gmetric_path string
|
||||||
|
gmetric_config string
|
||||||
|
config GangliaSinkConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GangliaSink) Write(point lp.CCMetric) error {
|
||||||
|
var err error = nil
|
||||||
|
//var tagsstr []string
|
||||||
|
var argstr []string
|
||||||
|
|
||||||
|
// Get metric config (type, value, ... in suitable format)
|
||||||
|
conf := GetCommonGangliaConfig(point)
|
||||||
|
if len(conf.Type) == 0 {
|
||||||
|
conf = GetGangliaConfig(point)
|
||||||
|
}
|
||||||
|
if len(conf.Type) == 0 {
|
||||||
|
return fmt.Errorf("metric %q (Ganglia name %q) has no 'value' field", point.Name(), conf.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.config.AddGangliaGroup {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--group=%s", conf.Group))
|
||||||
|
}
|
||||||
|
if s.config.AddUnits && len(conf.Unit) > 0 {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--units=%s", conf.Unit))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.config.ClusterName) > 0 {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--cluster=%s", s.config.ClusterName))
|
||||||
|
}
|
||||||
|
// if s.config.AddTagsAsDesc && len(tagsstr) > 0 {
|
||||||
|
// argstr = append(argstr, fmt.Sprintf("--desc=%q", strings.Join(tagsstr, ",")))
|
||||||
|
// }
|
||||||
|
if len(s.gmetric_config) > 0 {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--conf=%s", s.gmetric_config))
|
||||||
|
}
|
||||||
|
if s.config.AddTypeToName {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--name=%s", GangliaMetricName(point)))
|
||||||
|
} else {
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--name=%s", conf.Name))
|
||||||
|
}
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--slope=%s", conf.Slope))
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--value=%s", conf.Value))
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--type=%s", conf.Type))
|
||||||
|
argstr = append(argstr, fmt.Sprintf("--tmax=%d", conf.Tmax))
|
||||||
|
|
||||||
|
cclog.ComponentDebug(s.name, s.gmetric_path, strings.Join(argstr, " "))
|
||||||
|
command := exec.Command(s.gmetric_path, argstr...)
|
||||||
|
command.Wait()
|
||||||
|
_, err = command.Output()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GangliaSink) Flush() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GangliaSink) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGangliaSink(name string, config json.RawMessage) (Sink, error) {
|
||||||
|
s := new(GangliaSink)
|
||||||
|
s.name = fmt.Sprintf("GangliaSink(%s)", name)
|
||||||
|
s.config.AddTagsAsDesc = false
|
||||||
|
s.config.AddGangliaGroup = false
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &s.config)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(s.name, "Error reading config for", s.name, ":", err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.gmetric_path = ""
|
||||||
|
s.gmetric_config = ""
|
||||||
|
if len(s.config.GmetricPath) > 0 {
|
||||||
|
p, err := exec.LookPath(s.config.GmetricPath)
|
||||||
|
if err == nil {
|
||||||
|
s.gmetric_path = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.gmetric_path) == 0 {
|
||||||
|
p, err := exec.LookPath(string(GMETRIC_EXEC))
|
||||||
|
if err == nil {
|
||||||
|
s.gmetric_path = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.gmetric_path) == 0 {
|
||||||
|
return nil, errors.New("cannot find executable 'gmetric'")
|
||||||
|
}
|
||||||
|
if len(s.config.GmetricConfig) > 0 {
|
||||||
|
s.gmetric_config = s.config.GmetricConfig
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
21
sinks/gangliaSink.md
Normal file
21
sinks/gangliaSink.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
## `ganglia` sink
|
||||||
|
|
||||||
|
The `ganglia` sink uses the `gmetric` tool of the [Ganglia Monitoring System](http://ganglia.info/) to submit the metrics
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "ganglia",
|
||||||
|
"meta_as_tags" : true,
|
||||||
|
"gmetric_path" : "/path/to/gmetric",
|
||||||
|
"add_ganglia_group" : true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the sink an `ganglia` sink
|
||||||
|
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||||
|
- `gmetric_path`: Path to `gmetric` executable (optional). If not given, the sink searches in `$PATH` for `gmetric`.
|
||||||
|
- `add_ganglia_group`: Add `--group=X` based on meta information to the `gmetric` call. Some old versions of `gmetric` do not support the `--group` option.
|
||||||
180
sinks/httpSink.go
Normal file
180
sinks/httpSink.go
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
influx "github.com/influxdata/line-protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HttpSinkConfig struct {
|
||||||
|
defaultSinkConfig
|
||||||
|
URL string `json:"url,omitempty"`
|
||||||
|
JWT string `json:"jwt,omitempty"`
|
||||||
|
Timeout string `json:"timeout,omitempty"`
|
||||||
|
IdleConnTimeout string `json:"idle_connection_timeout,omitempty"`
|
||||||
|
FlushDelay string `json:"flush_delay,omitempty"`
|
||||||
|
MaxRetries int `json:"max_retries,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type HttpSink struct {
|
||||||
|
sink
|
||||||
|
client *http.Client
|
||||||
|
encoder *influx.Encoder
|
||||||
|
lock sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
flushTimer *time.Timer
|
||||||
|
config HttpSinkConfig
|
||||||
|
idleConnTimeout time.Duration
|
||||||
|
timeout time.Duration
|
||||||
|
flushDelay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HttpSink) Write(m lp.CCMetric) error {
|
||||||
|
p := m.ToPoint(s.meta_as_tags)
|
||||||
|
s.lock.Lock()
|
||||||
|
firstWriteOfBatch := s.buffer.Len() == 0
|
||||||
|
_, err := s.encoder.Encode(p)
|
||||||
|
s.lock.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(s.name, "encoding failed:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.flushDelay == 0 {
|
||||||
|
return s.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstWriteOfBatch {
|
||||||
|
if s.flushTimer == nil {
|
||||||
|
s.flushTimer = time.AfterFunc(s.flushDelay, func() {
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
cclog.ComponentError(s.name, "flush failed:", err.Error())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
s.flushTimer.Reset(s.flushDelay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HttpSink) Flush() error {
|
||||||
|
// Own lock for as short as possible: the time it takes to copy the buffer.
|
||||||
|
s.lock.Lock()
|
||||||
|
buf := make([]byte, s.buffer.Len())
|
||||||
|
copy(buf, s.buffer.Bytes())
|
||||||
|
s.buffer.Reset()
|
||||||
|
s.lock.Unlock()
|
||||||
|
if len(buf) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var res *http.Response
|
||||||
|
for i := 0; i < s.config.MaxRetries; i++ {
|
||||||
|
// Create new request to send buffer
|
||||||
|
req, err := http.NewRequest(http.MethodPost, s.config.URL, bytes.NewReader(buf))
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(s.name, "failed to create request:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set authorization header
|
||||||
|
if len(s.config.JWT) != 0 {
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.config.JWT))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do request
|
||||||
|
res, err = s.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(s.name, "transport/tcp error:", err.Error())
|
||||||
|
// Wait between retries
|
||||||
|
time.Sleep(time.Duration(i+1) * (time.Second / 2))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if res == nil {
|
||||||
|
return errors.New("flush failed due to repeated errors")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle application errors
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
err := errors.New(res.Status)
|
||||||
|
cclog.ComponentError(s.name, "application error:", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *HttpSink) Close() {
|
||||||
|
s.flushTimer.Stop()
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
cclog.ComponentError(s.name, "flush failed:", err.Error())
|
||||||
|
}
|
||||||
|
s.client.CloseIdleConnections()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHttpSink(name string, config json.RawMessage) (Sink, error) {
|
||||||
|
s := new(HttpSink)
|
||||||
|
// Set default values
|
||||||
|
s.name = fmt.Sprintf("HttpSink(%s)", name)
|
||||||
|
s.config.IdleConnTimeout = "120s" // should be larger than the measurement interval.
|
||||||
|
s.config.Timeout = "5s"
|
||||||
|
s.config.FlushDelay = "5s"
|
||||||
|
s.config.MaxRetries = 3
|
||||||
|
|
||||||
|
// Read config
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &s.config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.config.URL) == 0 {
|
||||||
|
return nil, errors.New("`url` config option is required for HTTP sink")
|
||||||
|
}
|
||||||
|
if len(s.config.IdleConnTimeout) > 0 {
|
||||||
|
t, err := time.ParseDuration(s.config.IdleConnTimeout)
|
||||||
|
if err == nil {
|
||||||
|
s.idleConnTimeout = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.config.Timeout) > 0 {
|
||||||
|
t, err := time.ParseDuration(s.config.Timeout)
|
||||||
|
if err == nil {
|
||||||
|
s.timeout = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.config.FlushDelay) > 0 {
|
||||||
|
t, err := time.ParseDuration(s.config.FlushDelay)
|
||||||
|
if err == nil {
|
||||||
|
s.flushDelay = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create lookup map to use meta infos as tags in the output metric
|
||||||
|
s.meta_as_tags = make(map[string]bool)
|
||||||
|
for _, k := range s.config.MetaAsTags {
|
||||||
|
s.meta_as_tags[k] = true
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
MaxIdleConns: 1, // We will only ever talk to one host.
|
||||||
|
IdleConnTimeout: s.idleConnTimeout,
|
||||||
|
}
|
||||||
|
s.client = &http.Client{Transport: tr, Timeout: s.timeout}
|
||||||
|
s.buffer = &bytes.Buffer{}
|
||||||
|
s.encoder = influx.NewEncoder(s.buffer)
|
||||||
|
s.encoder.SetPrecision(time.Second)
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
29
sinks/httpSink.md
Normal file
29
sinks/httpSink.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
## `http` sink
|
||||||
|
|
||||||
|
The `http` sink uses POST requests to a HTTP server to submit the metrics in the InfluxDB line-protocol format. It uses JSON web tokens for authentification. The sink creates batches of metrics before sending, to reduce the HTTP traffic.
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "http",
|
||||||
|
"meta_as_tags" : true,
|
||||||
|
"url" : "https://my-monitoring.example.com:1234/api/write",
|
||||||
|
"jwt" : "blabla.blabla.blabla",
|
||||||
|
"timeout": "5s",
|
||||||
|
"max_idle_connections" : 10,
|
||||||
|
"idle_connection_timeout" : "5s",
|
||||||
|
"flush_delay": "2s",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the sink an `http` sink
|
||||||
|
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||||
|
- `url`: The full URL of the endpoint
|
||||||
|
- `jwt`: JSON web tokens for authentification (Using the *Bearer* scheme)
|
||||||
|
- `timeout`: General timeout for the HTTP client (default '5s')
|
||||||
|
- `max_idle_connections`: Maximally idle connections (default 10)
|
||||||
|
- `idle_connection_timeout`: Timeout for idle connections (default '5s')
|
||||||
|
- `flush_delay`: Batch all writes arriving in during this duration (default '1s', batching can be disabled by setting it to 0)
|
||||||
239
sinks/influxAsyncSink.go
Normal file
239
sinks/influxAsyncSink.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
|
influxdb2ApiHttp "github.com/influxdata/influxdb-client-go/v2/api/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InfluxAsyncSinkConfig struct {
|
||||||
|
defaultSinkConfig
|
||||||
|
Host string `json:"host,omitempty"`
|
||||||
|
Port string `json:"port,omitempty"`
|
||||||
|
Database string `json:"database,omitempty"`
|
||||||
|
User string `json:"user,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Organization string `json:"organization,omitempty"`
|
||||||
|
SSL bool `json:"ssl,omitempty"`
|
||||||
|
// Maximum number of points sent to server in single request. Default 5000
|
||||||
|
BatchSize uint `json:"batch_size,omitempty"`
|
||||||
|
// Interval, in ms, in which is buffer flushed if it has not been already written (by reaching batch size) . Default 1000ms
|
||||||
|
FlushInterval uint `json:"flush_interval,omitempty"`
|
||||||
|
InfluxRetryInterval string `json:"retry_interval,omitempty"`
|
||||||
|
InfluxExponentialBase uint `json:"retry_exponential_base,omitempty"`
|
||||||
|
InfluxMaxRetries uint `json:"max_retries,omitempty"`
|
||||||
|
InfluxMaxRetryTime string `json:"max_retry_time,omitempty"`
|
||||||
|
CustomFlushInterval string `json:"custom_flush_interval,omitempty"`
|
||||||
|
MaxRetryAttempts uint `json:"max_retry_attempts,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type InfluxAsyncSink struct {
|
||||||
|
sink
|
||||||
|
client influxdb2.Client
|
||||||
|
writeApi influxdb2Api.WriteAPI
|
||||||
|
errors <-chan error
|
||||||
|
config InfluxAsyncSinkConfig
|
||||||
|
influxRetryInterval uint
|
||||||
|
influxMaxRetryTime uint
|
||||||
|
customFlushInterval time.Duration
|
||||||
|
flushTimer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxAsyncSink) connect() error {
|
||||||
|
var auth string
|
||||||
|
var uri string
|
||||||
|
if s.config.SSL {
|
||||||
|
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
|
||||||
|
} else {
|
||||||
|
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
|
||||||
|
}
|
||||||
|
if len(s.config.User) == 0 {
|
||||||
|
auth = s.config.Password
|
||||||
|
} else {
|
||||||
|
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
|
||||||
|
}
|
||||||
|
cclog.ComponentDebug(s.name, "Using URI", uri, "Org", s.config.Organization, "Bucket", s.config.Database)
|
||||||
|
clientOptions := influxdb2.DefaultOptions()
|
||||||
|
if s.config.BatchSize != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "Batch size", s.config.BatchSize)
|
||||||
|
clientOptions.SetBatchSize(s.config.BatchSize)
|
||||||
|
}
|
||||||
|
if s.config.FlushInterval != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "Flush interval", s.config.FlushInterval)
|
||||||
|
clientOptions.SetFlushInterval(s.config.FlushInterval)
|
||||||
|
}
|
||||||
|
if s.influxRetryInterval != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "MaxRetryInterval", s.influxRetryInterval)
|
||||||
|
clientOptions.SetMaxRetryInterval(s.influxRetryInterval)
|
||||||
|
}
|
||||||
|
if s.influxMaxRetryTime != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "MaxRetryTime", s.influxMaxRetryTime)
|
||||||
|
clientOptions.SetMaxRetryTime(s.influxMaxRetryTime)
|
||||||
|
}
|
||||||
|
if s.config.InfluxExponentialBase != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "Exponential Base", s.config.InfluxExponentialBase)
|
||||||
|
clientOptions.SetExponentialBase(s.config.InfluxExponentialBase)
|
||||||
|
}
|
||||||
|
if s.config.InfluxMaxRetries != 0 {
|
||||||
|
cclog.ComponentDebug(s.name, "Max Retries", s.config.InfluxMaxRetries)
|
||||||
|
clientOptions.SetMaxRetries(s.config.InfluxMaxRetries)
|
||||||
|
}
|
||||||
|
clientOptions.SetTLSConfig(
|
||||||
|
&tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
).SetPrecision(time.Second)
|
||||||
|
|
||||||
|
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
|
||||||
|
s.writeApi = s.client.WriteAPI(s.config.Organization, s.config.Database)
|
||||||
|
ok, err := s.client.Ping(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("connection to %s not healthy", uri)
|
||||||
|
}
|
||||||
|
s.writeApi.SetWriteFailedCallback(func(batch string, err influxdb2ApiHttp.Error, retryAttempts uint) bool {
|
||||||
|
mlist := strings.Split(batch, "\n")
|
||||||
|
cclog.ComponentError(s.name, fmt.Sprintf("Failed to write batch with %d metrics %d times (max: %d): %s", len(mlist), retryAttempts, s.config.MaxRetryAttempts, err.Error()))
|
||||||
|
return retryAttempts <= s.config.MaxRetryAttempts
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxAsyncSink) Write(m lp.CCMetric) error {
|
||||||
|
if s.customFlushInterval != 0 && s.flushTimer == nil {
|
||||||
|
// Run a batched flush for all lines that have arrived in the defined interval
|
||||||
|
s.flushTimer = time.AfterFunc(s.customFlushInterval, func() {
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
cclog.ComponentError(s.name, "flush failed:", err.Error())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
s.writeApi.WritePoint(
|
||||||
|
m.ToPoint(s.meta_as_tags),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxAsyncSink) Flush() error {
|
||||||
|
cclog.ComponentDebug(s.name, "Flushing")
|
||||||
|
s.writeApi.Flush()
|
||||||
|
if s.customFlushInterval != 0 && s.flushTimer != nil {
|
||||||
|
s.flushTimer = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxAsyncSink) Close() {
|
||||||
|
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
|
||||||
|
s.writeApi.Flush()
|
||||||
|
s.client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInfluxAsyncSink(name string, config json.RawMessage) (Sink, error) {
|
||||||
|
s := new(InfluxAsyncSink)
|
||||||
|
s.name = fmt.Sprintf("InfluxSink(%s)", name)
|
||||||
|
|
||||||
|
// Set default for maximum number of points sent to server in single request.
|
||||||
|
s.config.BatchSize = 0
|
||||||
|
s.influxRetryInterval = 0
|
||||||
|
//s.config.InfluxRetryInterval = "1s"
|
||||||
|
s.influxMaxRetryTime = 0
|
||||||
|
//s.config.InfluxMaxRetryTime = "168h"
|
||||||
|
s.config.InfluxMaxRetries = 0
|
||||||
|
s.config.InfluxExponentialBase = 0
|
||||||
|
s.config.FlushInterval = 0
|
||||||
|
s.config.CustomFlushInterval = ""
|
||||||
|
s.customFlushInterval = time.Duration(0)
|
||||||
|
s.config.MaxRetryAttempts = 1
|
||||||
|
|
||||||
|
// Default retry intervals (in seconds)
|
||||||
|
// 1 2
|
||||||
|
// 2 4
|
||||||
|
// 4 8
|
||||||
|
// 8 16
|
||||||
|
// 16 32
|
||||||
|
// 32 64
|
||||||
|
// 64 128
|
||||||
|
// 128 256
|
||||||
|
// 256 512
|
||||||
|
// 512 1024
|
||||||
|
// 1024 2048
|
||||||
|
// 2048 4096
|
||||||
|
// 4096 8192
|
||||||
|
// 8192 16384
|
||||||
|
// 16384 32768
|
||||||
|
// 32768 65536
|
||||||
|
// 65536 131072
|
||||||
|
// 131072 262144
|
||||||
|
// 262144 524288
|
||||||
|
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &s.config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s.config.Port) == 0 {
|
||||||
|
return nil, errors.New("Missing port configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Database) == 0 {
|
||||||
|
return nil, errors.New("Missing database configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Organization) == 0 {
|
||||||
|
return nil, errors.New("Missing organization configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Password) == 0 {
|
||||||
|
return nil, errors.New("Missing password configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
// Create lookup map to use meta infos as tags in the output metric
|
||||||
|
s.meta_as_tags = make(map[string]bool)
|
||||||
|
for _, k := range s.config.MetaAsTags {
|
||||||
|
s.meta_as_tags[k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
toUint := func(duration string, def uint) uint {
|
||||||
|
t, err := time.ParseDuration(duration)
|
||||||
|
if err == nil {
|
||||||
|
return uint(t.Milliseconds())
|
||||||
|
}
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
s.influxRetryInterval = toUint(s.config.InfluxRetryInterval, s.influxRetryInterval)
|
||||||
|
s.influxMaxRetryTime = toUint(s.config.InfluxMaxRetryTime, s.influxMaxRetryTime)
|
||||||
|
|
||||||
|
// Use a own timer for calling Flush()
|
||||||
|
if len(s.config.CustomFlushInterval) > 0 {
|
||||||
|
t, err := time.ParseDuration(s.config.CustomFlushInterval)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid duration in 'custom_flush_interval': %v", err)
|
||||||
|
}
|
||||||
|
s.customFlushInterval = t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to InfluxDB server
|
||||||
|
if err := s.connect(); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to connect: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start background: Read from error channel
|
||||||
|
s.errors = s.writeApi.Errors()
|
||||||
|
go func() {
|
||||||
|
for err := range s.errors {
|
||||||
|
cclog.ComponentError(s.name, err.Error())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
44
sinks/influxAsyncSink.md
Normal file
44
sinks/influxAsyncSink.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
## `influxasync` sink
|
||||||
|
|
||||||
|
The `influxasync` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **non-blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "influxasync",
|
||||||
|
"meta_as_tags" : true,
|
||||||
|
"database" : "mymetrics",
|
||||||
|
"host": "dbhost.example.com",
|
||||||
|
"port": "4222",
|
||||||
|
"user": "exampleuser",
|
||||||
|
"password" : "examplepw",
|
||||||
|
"organization": "myorg",
|
||||||
|
"ssl": true,
|
||||||
|
"batch_size": 200,
|
||||||
|
"retry_interval" : "1s",
|
||||||
|
"retry_exponential_base" : 2,
|
||||||
|
"max_retries": 20,
|
||||||
|
"max_retry_time" : "168h"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the sink an `influxdb` sink
|
||||||
|
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||||
|
- `database`: All metrics are written to this bucket
|
||||||
|
- `host`: Hostname of the InfluxDB database server
|
||||||
|
- `port`: Portnumber (as string) of the InfluxDB database server
|
||||||
|
- `user`: Username for basic authentification
|
||||||
|
- `password`: Password for basic authentification
|
||||||
|
- `organization`: Organization in the InfluxDB
|
||||||
|
- `ssl`: Use SSL connection
|
||||||
|
- `batch_size`: batch up metrics internally, default 100
|
||||||
|
- `retry_interval`: Base retry interval for failed write requests, default 1s
|
||||||
|
- `retry_exponential_base`: The retry interval is exponentially increased with this base, default 2
|
||||||
|
- `max_retries`: Maximal number of retry attempts
|
||||||
|
- `max_retry_time`: Maximal time to retry failed writes, default 168h (one week)
|
||||||
|
|
||||||
|
For information about the calculation of the retry interval settings, see [offical influxdb-client-go documentation](https://github.com/influxdata/influxdb-client-go#handling-of-failed-async-writes)
|
||||||
265
sinks/influxSink.go
Normal file
265
sinks/influxSink.go
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
package sinks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
|
||||||
|
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
|
||||||
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
|
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
|
"github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InfluxSink struct {
|
||||||
|
sink
|
||||||
|
client influxdb2.Client
|
||||||
|
writeApi influxdb2Api.WriteAPIBlocking
|
||||||
|
config struct {
|
||||||
|
defaultSinkConfig
|
||||||
|
Host string `json:"host,omitempty"`
|
||||||
|
Port string `json:"port,omitempty"`
|
||||||
|
Database string `json:"database,omitempty"`
|
||||||
|
User string `json:"user,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Organization string `json:"organization,omitempty"`
|
||||||
|
SSL bool `json:"ssl,omitempty"`
|
||||||
|
// Maximum number of points sent to server in single request.
|
||||||
|
// Default: 1000
|
||||||
|
BatchSize int `json:"batch_size,omitempty"`
|
||||||
|
// Time interval for delayed sending of metrics.
|
||||||
|
// If the buffers are already filled before the end of this interval,
|
||||||
|
// the metrics are sent without further delay.
|
||||||
|
// Default: 1s
|
||||||
|
FlushInterval string `json:"flush_delay,omitempty"`
|
||||||
|
// Number of metrics that are dropped when buffer is full
|
||||||
|
// Default: 100
|
||||||
|
DropRate int `json:"drop_rate,omitempty"`
|
||||||
|
}
|
||||||
|
batch []*write.Point
|
||||||
|
flushTimer *time.Timer
|
||||||
|
flushDelay time.Duration
|
||||||
|
batchMutex sync.Mutex // Flush() runs in another goroutine, so this lock has to protect the buffer
|
||||||
|
flushTimerMutex sync.Mutex // Ensure only one flush timer is running
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect connects to the InfluxDB server
|
||||||
|
func (s *InfluxSink) connect() error {
|
||||||
|
|
||||||
|
// URI options:
|
||||||
|
// * http://host:port
|
||||||
|
// * https://host:port
|
||||||
|
var uri string
|
||||||
|
if s.config.SSL {
|
||||||
|
uri = fmt.Sprintf("https://%s:%s", s.config.Host, s.config.Port)
|
||||||
|
} else {
|
||||||
|
uri = fmt.Sprintf("http://%s:%s", s.config.Host, s.config.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authentication options:
|
||||||
|
// * token
|
||||||
|
// * username:password
|
||||||
|
var auth string
|
||||||
|
if len(s.config.User) == 0 {
|
||||||
|
auth = s.config.Password
|
||||||
|
} else {
|
||||||
|
auth = fmt.Sprintf("%s:%s", s.config.User, s.config.Password)
|
||||||
|
}
|
||||||
|
cclog.ComponentDebug(s.name,
|
||||||
|
"Using URI='"+uri+"'",
|
||||||
|
"Org='"+s.config.Organization+"'",
|
||||||
|
"Bucket='"+s.config.Database+"'")
|
||||||
|
|
||||||
|
// Set influxDB client options
|
||||||
|
clientOptions := influxdb2.DefaultOptions()
|
||||||
|
|
||||||
|
// Do not check InfluxDB certificate
|
||||||
|
clientOptions.SetTLSConfig(
|
||||||
|
&tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
clientOptions.SetPrecision(time.Second)
|
||||||
|
|
||||||
|
// Create new writeAPI
|
||||||
|
s.client = influxdb2.NewClientWithOptions(uri, auth, clientOptions)
|
||||||
|
s.writeApi = s.client.WriteAPIBlocking(s.config.Organization, s.config.Database)
|
||||||
|
|
||||||
|
// Check InfluxDB server accessibility
|
||||||
|
ok, err := s.client.Ping(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("connection to %s not healthy", uri)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxSink) Write(m lp.CCMetric) error {
|
||||||
|
|
||||||
|
if s.flushDelay != 0 && s.flushTimerMutex.TryLock() {
|
||||||
|
// Run a batched flush for all metrics that arrived in the last flush delay interval
|
||||||
|
cclog.ComponentDebug(s.name, "Starting new flush timer")
|
||||||
|
s.flushTimer = time.AfterFunc(
|
||||||
|
s.flushDelay,
|
||||||
|
func() {
|
||||||
|
defer s.flushTimerMutex.Unlock()
|
||||||
|
cclog.ComponentDebug(s.name, "Starting flush in flush timer")
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
cclog.ComponentError(s.name, "Flush timer: flush failed:", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock access to batch slice
|
||||||
|
s.batchMutex.Lock()
|
||||||
|
|
||||||
|
// batch slice full, dropping oldest metric(s)
|
||||||
|
// e.g. when previous flushes failed and batch slice was not cleared
|
||||||
|
if len(s.batch) == s.config.BatchSize {
|
||||||
|
newSize := s.config.BatchSize - s.config.DropRate
|
||||||
|
|
||||||
|
for i := 0; i < newSize; i++ {
|
||||||
|
s.batch[i] = s.batch[i+s.config.DropRate]
|
||||||
|
}
|
||||||
|
for i := newSize; i < s.config.BatchSize; i++ {
|
||||||
|
s.batch[i] = nil
|
||||||
|
}
|
||||||
|
s.batch = s.batch[:newSize]
|
||||||
|
cclog.ComponentError(s.name, "Batch slice full, dropping", s.config.DropRate, "oldest metric(s)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append metric to batch slice
|
||||||
|
p := m.ToPoint(s.meta_as_tags)
|
||||||
|
s.batch = append(s.batch, p)
|
||||||
|
|
||||||
|
// Flush synchronously if "flush_delay" is zero
|
||||||
|
// or
|
||||||
|
// Flush if batch size is reached
|
||||||
|
if s.flushDelay == 0 ||
|
||||||
|
len(s.batch) == s.config.BatchSize {
|
||||||
|
// Unlock access to batch slice
|
||||||
|
s.batchMutex.Unlock()
|
||||||
|
return s.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock access to batch slice
|
||||||
|
s.batchMutex.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush sends all metrics buffered in batch slice to InfluxDB server
|
||||||
|
func (s *InfluxSink) Flush() error {
|
||||||
|
cclog.ComponentDebug(s.name, "Flushing")
|
||||||
|
|
||||||
|
// Lock access to batch slice
|
||||||
|
s.batchMutex.Lock()
|
||||||
|
defer s.batchMutex.Unlock()
|
||||||
|
|
||||||
|
// Nothing to do, batch slice is empty
|
||||||
|
if len(s.batch) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send metrics from batch slice
|
||||||
|
err := s.writeApi.WritePoint(context.Background(), s.batch...)
|
||||||
|
if err != nil {
|
||||||
|
cclog.ComponentError(s.name, "Flush(): Flush of", len(s.batch), "metrics failed:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear batch slice
|
||||||
|
for i := range s.batch {
|
||||||
|
s.batch[i] = nil
|
||||||
|
}
|
||||||
|
s.batch = s.batch[:0]
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *InfluxSink) Close() {
|
||||||
|
cclog.ComponentDebug(s.name, "Closing InfluxDB connection")
|
||||||
|
s.flushTimer.Stop()
|
||||||
|
s.Flush()
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
cclog.ComponentError(s.name, "Close(): Flush failed:", err)
|
||||||
|
}
|
||||||
|
s.client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInfluxSink create a new InfluxDB sink
|
||||||
|
func NewInfluxSink(name string, config json.RawMessage) (Sink, error) {
|
||||||
|
s := new(InfluxSink)
|
||||||
|
s.name = fmt.Sprintf("InfluxSink(%s)", name)
|
||||||
|
|
||||||
|
// Set config default values
|
||||||
|
s.config.BatchSize = 1000
|
||||||
|
s.config.FlushInterval = "1s"
|
||||||
|
s.config.DropRate = 100
|
||||||
|
|
||||||
|
// Read config
|
||||||
|
if len(config) > 0 {
|
||||||
|
err := json.Unmarshal(config, &s.config)
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(s.config.Host) == 0 {
|
||||||
|
return s, errors.New("Missing host configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Port) == 0 {
|
||||||
|
return s, errors.New("Missing port configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Database) == 0 {
|
||||||
|
return s, errors.New("Missing database configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Organization) == 0 {
|
||||||
|
return s, errors.New("Missing organization configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
if len(s.config.Password) == 0 {
|
||||||
|
return s, errors.New("Missing password configuration required by InfluxSink")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create lookup map to use meta infos as tags in the output metric
|
||||||
|
s.meta_as_tags = make(map[string]bool)
|
||||||
|
for _, k := range s.config.MetaAsTags {
|
||||||
|
s.meta_as_tags[k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure flush delay duration
|
||||||
|
if len(s.config.FlushInterval) > 0 {
|
||||||
|
t, err := time.ParseDuration(s.config.FlushInterval)
|
||||||
|
if err == nil {
|
||||||
|
s.flushDelay = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(s.config.BatchSize > 0) {
|
||||||
|
return s, fmt.Errorf("batch_size=%d in InfluxDB config must be > 0", s.config.BatchSize)
|
||||||
|
}
|
||||||
|
if !(s.config.DropRate > 0) {
|
||||||
|
return s, fmt.Errorf("drop_rate=%d in InfluxDB config must be > 0", s.config.DropRate)
|
||||||
|
}
|
||||||
|
if !(s.config.BatchSize > s.config.DropRate) {
|
||||||
|
return s, fmt.Errorf(
|
||||||
|
"batch_size=%d must be greater then drop_rate=%d in InfluxDB config",
|
||||||
|
s.config.BatchSize, s.config.DropRate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate batch slice
|
||||||
|
s.batch = make([]*write.Point, 0, s.config.BatchSize)
|
||||||
|
|
||||||
|
// Connect to InfluxDB server
|
||||||
|
if err := s.connect(); err != nil {
|
||||||
|
return s, fmt.Errorf("unable to connect: %v", err)
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
37
sinks/influxSink.md
Normal file
37
sinks/influxSink.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
## `influxdb` sink
|
||||||
|
|
||||||
|
The `influxdb` sink uses the official [InfluxDB golang client](https://pkg.go.dev/github.com/influxdata/influxdb-client-go/v2) to write the metrics to an InfluxDB database in a **blocking** fashion. It provides only support for V2 write endpoints (InfluxDB 1.8.0 or later).
|
||||||
|
|
||||||
|
|
||||||
|
### Configuration structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"<name>": {
|
||||||
|
"type": "influxdb",
|
||||||
|
"meta_as_tags" : true,
|
||||||
|
"database" : "mymetrics",
|
||||||
|
"host": "dbhost.example.com",
|
||||||
|
"port": "4222",
|
||||||
|
"user": "exampleuser",
|
||||||
|
"password" : "examplepw",
|
||||||
|
"organization": "myorg",
|
||||||
|
"ssl": true,
|
||||||
|
"flush_delay" : "1s",
|
||||||
|
"batch_size" : 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `type`: makes the sink an `influxdb` sink
|
||||||
|
- `meta_as_tags`: print all meta information as tags in the output (optional)
|
||||||
|
- `database`: All metrics are written to this bucket
|
||||||
|
- `host`: Hostname of the InfluxDB database server
|
||||||
|
- `port`: Portnumber (as string) of the InfluxDB database server
|
||||||
|
- `user`: Username for basic authentification
|
||||||
|
- `password`: Password for basic authentification
|
||||||
|
- `organization`: Organization in the InfluxDB
|
||||||
|
- `ssl`: Use SSL connection
|
||||||
|
- `flush_delay`: Group metrics coming in to a single batch
|
||||||
|
- `batch_size`: Maximal batch size
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user