mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2025-07-31 08:56:06 +02:00
Modularize the whole thing (#16)
* Use channels, add a metric router, split up configuration and use extended version of Influx line protocol internally * Use central timer for collectors and router. Add expressions to router * Add expression to router config * Update entry points * Start with README * Update README for CCMetric * Formatting * Update README.md * Add README for MultiChanTicker * Add README for MultiChanTicker * Update README.md * Add README to metric router * Update main README * Remove SinkEntity type * Update README for sinks * Update go files * Update README for receivers * Update collectors README * Update collectors README * Use seperate page per collector * Fix for tempstat page * Add docs for customcmd collector * Add docs for ipmistat collector * Add docs for topprocs collector * Update customCmdMetric.md * Use seconds when calculating LIKWID metrics * Add IB metrics ib_recv_pkts and ib_xmit_pkts * Drop domain part of host name * Updated to latest stable version of likwid * Define source code dependencies in Makefile * Add GPFS / IBM Spectrum Scale collector * Add vet and staticcheck make targets * Add vet and staticcheck make targets * Avoid go vet warning: struct field tag `json:"..., omitempty"` not compatible with reflect.StructTag.Get: suspicious space in struct tag value struct field tag `json:"...", omitempty` not compatible with reflect.StructTag.Get: key:"value" pairs not separated by spaces * Add sample collector to README.md * Add CPU frequency collector * Avoid staticcheck warning: redundant return statement * Avoid staticcheck warning: unnecessary assignment to the blank identifier * Simplified code * Add CPUFreqCollectorCpuinfo a metric collector to measure the current frequency of the CPUs as obtained from /proc/cpuinfo Only measure on the first hyperthread * Add collector for NFS clients * Move publication of metrics into Flush() for NatsSink * Update GitHub actions * Refactoring * Avoid vet warning: Println arg list ends with redundant newline * Avoid vet warning struct field commands has json tag but is not exported * Avoid vet warning: return copies lock value. * Corrected typo * Refactoring * Add go sources in internal/... * Bad separator in Makefile * Fix Infiniband collector Co-authored-by: Holger Obermaier <40787752+ho-ob@users.noreply.github.com>
This commit is contained in:
@@ -13,18 +13,20 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lp "github.com/influxdata/line-protocol"
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
|
||||
)
|
||||
|
||||
type GpfsCollector struct {
|
||||
MetricCollector
|
||||
metricCollector
|
||||
tags map[string]string
|
||||
|
||||
config struct {
|
||||
Mmpmon string `json:"mmpmon"`
|
||||
}
|
||||
}
|
||||
|
||||
func (m *GpfsCollector) Init(config []byte) error {
|
||||
|
||||
func (m *GpfsCollector) Init(config json.RawMessage) error {
|
||||
var err error
|
||||
m.name = "GpfsCollector"
|
||||
m.setup()
|
||||
@@ -40,6 +42,14 @@ func (m *GpfsCollector) Init(config []byte) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m.meta = map[string]string{
|
||||
"source": m.name,
|
||||
"group": "GPFS",
|
||||
}
|
||||
m.tags = map[string]string{
|
||||
"type": "node",
|
||||
"filesystem": "",
|
||||
}
|
||||
|
||||
// GPFS / IBM Spectrum Scale file system statistics can only be queried by user root
|
||||
user, err := user.Current()
|
||||
@@ -60,7 +70,7 @@ func (m *GpfsCollector) Init(config []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
func (m *GpfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
|
||||
if !m.init {
|
||||
return
|
||||
}
|
||||
@@ -108,6 +118,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
continue
|
||||
}
|
||||
|
||||
m.tags["filesystem"] = filesystem
|
||||
|
||||
|
||||
// return code
|
||||
rc, err := strconv.Atoi(key_value["_rc_"])
|
||||
if err != nil {
|
||||
@@ -140,17 +153,10 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
key_value["_br_"], err.Error())
|
||||
continue
|
||||
}
|
||||
y, err := lp.New(
|
||||
"gpfs_bytes_read",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": bytesRead,
|
||||
},
|
||||
timestamp)
|
||||
|
||||
y, err := lp.New("gpfs_bytes_read", m.tags, m.meta, map[string]interface{}{"value": bytesRead}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// bytes written
|
||||
@@ -161,17 +167,10 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
key_value["_bw_"], err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_bytes_written",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": bytesWritten,
|
||||
},
|
||||
timestamp)
|
||||
|
||||
y, err = lp.New("gpfs_bytes_written", m.tags, m.meta, map[string]interface{}{"value": bytesWritten}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// number of opens
|
||||
@@ -182,17 +181,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
key_value["_oc_"], err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_opens",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numOpens,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_opens", m.tags, m.meta, map[string]interface{}{"value": numOpens}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// number of closes
|
||||
@@ -201,17 +192,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of closes: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_closes",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numCloses,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_closes", m.tags, m.meta, map[string]interface{}{"value": numCloses}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// number of reads
|
||||
@@ -220,17 +203,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of reads: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_reads",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numReads,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_reads", m.tags, m.meta, map[string]interface{}{"value": numReads}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// number of writes
|
||||
@@ -239,17 +214,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of writes: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_writes",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numWrites,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_writes", m.tags, m.meta, map[string]interface{}{"value": numWrites}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// number of read directories
|
||||
@@ -258,17 +225,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert number of read directories: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_readdirs",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numReaddirs,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_readdirs", m.tags, m.meta, map[string]interface{}{"value": numReaddirs}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
|
||||
// Number of inode updates
|
||||
@@ -277,17 +236,9 @@ func (m *GpfsCollector) Read(interval time.Duration, out *[]lp.MutableMetric) {
|
||||
fmt.Fprintf(os.Stderr, "GpfsCollector.Read(): Failed to convert Number of inode updates: %s\n", err.Error())
|
||||
continue
|
||||
}
|
||||
y, err = lp.New(
|
||||
"gpfs_num_inode_updates",
|
||||
map[string]string{
|
||||
"filesystem": filesystem,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"value": numInodeUpdates,
|
||||
},
|
||||
timestamp)
|
||||
y, err = lp.New("gpfs_num_inode_updates", m.tags, m.meta, map[string]interface{}{"value": numInodeUpdates}, timestamp)
|
||||
if err == nil {
|
||||
*out = append(*out, y)
|
||||
output <- y
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user