Split NfsCollector in Nfs3Collector and Nfs4Collector (#28)

* Split NfsCollector in Nfs3Collector and Nfs4Collector

* Add documentation
This commit is contained in:
Thomas Gruber 2022-02-07 15:43:01 +01:00 committed by GitHub
parent bb87046501
commit 5263a974d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 189 additions and 58 deletions

View File

@ -29,6 +29,8 @@ In contrast to the configuration files for sinks and receivers, the collectors c
* [`customcmd`](./customCmdMetric.md)
* [`ipmistat`](./ipmiMetric.md)
* [`topprocs`](./topprocsMetric.md)
* [`nfs3stat`](./nfs3Metric.md)
* [`nfs4stat`](./nfs4Metric.md)
## Todos

View File

@ -31,7 +31,8 @@ var AvailableCollectors = map[string]MetricCollector{
"gpfs": new(GpfsCollector),
"cpufreq": new(CPUFreqCollector),
"cpufreq_cpuinfo": new(CPUFreqCpuInfoCollector),
"nfsstat": new(NfsCollector),
"nfs3stat": new(Nfs3Collector),
"nfs4stat": new(Nfs4Collector),
"numastats": new(NUMAStatsCollector),
}

39
collectors/nfs3Metric.md Normal file
View File

@ -0,0 +1,39 @@
## `nfs3stat` collector
```json
"nfs3stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs3_total"
]
}
```
The `nfs3stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs3_total`
* `nfs3_null`
* `nfs3_getattr`
* `nfs3_setattr`
* `nfs3_lookup`
* `nfs3_access`
* `nfs3_readlink`
* `nfs3_read`
* `nfs3_write`
* `nfs3_create`
* `nfs3_mkdir`
* `nfs3_symlink`
* `nfs3_remove`
* `nfs3_rmdir`
* `nfs3_rename`
* `nfs3_link`
* `nfs3_readdir`
* `nfs3_readdirplus`
* `nfs3_fsstat`
* `nfs3_fsinfo`
* `nfs3_pathconf`
* `nfs3_commit`

62
collectors/nfs4Metric.md Normal file
View File

@ -0,0 +1,62 @@
## `nfs4stat` collector
```json
"nfs4stat": {
"nfsstat" : "/path/to/nfsstat",
"exclude_metrics": [
"nfs4_total"
]
}
```
The `nfs4stat` collector reads data from `nfsstat` command and outputs a handful **node** metrics. If a metric is not required, it can be excluded from forwarding it to the sink. There is currently no possibility to get the metrics per mount point.
Metrics:
* `nfs4_total`
* `nfs4_null`
* `nfs4_read`
* `nfs4_write`
* `nfs4_commit`
* `nfs4_open`
* `nfs4_open_conf`
* `nfs4_open_noat`
* `nfs4_open_dgrd`
* `nfs4_close`
* `nfs4_setattr`
* `nfs4_fsinfo`
* `nfs4_renew`
* `nfs4_setclntid`
* `nfs4_confirm`
* `nfs4_lock`
* `nfs4_lockt`
* `nfs4_locku`
* `nfs4_access`
* `nfs4_getattr`
* `nfs4_lookup`
* `nfs4_lookup_root`
* `nfs4_remove`
* `nfs4_rename`
* `nfs4_link`
* `nfs4_symlink`
* `nfs4_create`
* `nfs4_pathconf`
* `nfs4_statfs`
* `nfs4_readlink`
* `nfs4_readdir`
* `nfs4_server_caps`
* `nfs4_delegreturn`
* `nfs4_getacl`
* `nfs4_setacl`
* `nfs4_rel_lkowner`
* `nfs4_exchange_id`
* `nfs4_create_session`
* `nfs4_destroy_session`
* `nfs4_sequence`
* `nfs4_get_lease_time`
* `nfs4_reclaim_comp`
* `nfs4_secinfo_no`
* `nfs4_bind_conn_to_ses`

View File

@ -14,23 +14,29 @@ import (
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
)
// First part contains the code for the general NfsCollector.
// Later, the general NfsCollector is more limited to Nfs3- and Nfs4Collector.
const NFSSTAT_EXEC = `nfsstat`
type NfsCollectorData struct {
current int64
last int64
}
type NfsCollector struct {
type nfsCollector struct {
metricCollector
tags map[string]string
version string
config struct {
Nfsutils string `json:"nfsutils"`
Nfsstats string `json:"nfsstat"`
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
data map[string]map[string]NfsCollectorData
data map[string]NfsCollectorData
}
func (m *NfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
func (m *nfsCollector) initStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`)
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@ -39,17 +45,16 @@ func (m *NfsCollector) initStats() error {
if len(lf) != 5 {
continue
}
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
if lf[1] == m.version {
name := strings.Trim(lf[3], ":")
if _, exist := m.data[lf[1]][name]; !exist {
if _, exist := m.data[name]; !exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[lf[1]][name]
x := m.data[name]
x.current = value
x.last = 0
m.data[lf[1]][name] = x
m.data[name] = x
}
}
}
}
@ -57,8 +62,8 @@ func (m *NfsCollector) initStats() error {
return err
}
func (m *NfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsutils, "-l")
func (m *nfsCollector) updateStats() error {
cmd := exec.Command(m.config.Nfsstats, `-l`)
cmd.Wait()
buffer, err := cmd.Output()
if err == nil {
@ -67,17 +72,16 @@ func (m *NfsCollector) updateStats() error {
if len(lf) != 5 {
continue
}
if _, exist := m.data[lf[1]]; !exist {
m.data[lf[1]] = make(map[string]NfsCollectorData)
}
if lf[1] == m.version {
name := strings.Trim(lf[3], ":")
if _, exist := m.data[lf[1]][name]; exist {
if _, exist := m.data[name]; exist {
value, err := strconv.ParseInt(lf[4], 0, 64)
if err == nil {
x := m.data[lf[1]][name]
x := m.data[name]
x.last = x.current
x.current = value
m.data[lf[1]][name] = x
m.data[name] = x
}
}
}
}
@ -85,17 +89,11 @@ func (m *NfsCollector) updateStats() error {
return err
}
func (m *NfsCollector) Init(config json.RawMessage) error {
var err error
m.name = "NfsCollector"
m.setup()
// Set default mmpmon binary
m.config.Nfsutils = "/usr/sbin/nfsstat"
func (m *nfsCollector) MainInit(config json.RawMessage) error {
m.config.Nfsstats = string(NFSSTAT_EXEC)
// Read JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
err := json.Unmarshal(config, &m.config)
if err != nil {
log.Print(err.Error())
return err
@ -108,40 +106,69 @@ func (m *NfsCollector) Init(config json.RawMessage) error {
m.tags = map[string]string{
"type": "node",
}
// Check if mmpmon is in executable search path
_, err = exec.LookPath(m.config.Nfsutils)
// Check if nfsstat is in executable search path
_, err := exec.LookPath(m.config.Nfsstats)
if err != nil {
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsutils, err)
return fmt.Errorf("NfsCollector.Init(): Failed to find nfsstat binary '%s': %v", m.config.Nfsstats, err)
}
m.data = make(map[string]map[string]NfsCollectorData)
m.data = make(map[string]NfsCollectorData)
m.initStats()
m.init = true
return nil
}
func (m *NfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
func (m *nfsCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
timestamp := time.Now()
m.updateStats()
prefix := ""
switch m.version {
case "v3":
prefix = "nfs3"
case "v4":
prefix = "nfs4"
default:
prefix = "nfs"
}
for version, metrics := range m.data {
for name, data := range metrics {
for name, data := range m.data {
if _, skip := stringArrayContains(m.config.ExcludeMetrics, name); skip {
continue
}
value := data.current - data.last
y, err := lp.New(fmt.Sprintf("nfs_%s", name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
y, err := lp.New(fmt.Sprintf("%s_%s", prefix, name), m.tags, m.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddMeta("version", version)
y.AddMeta("version", m.version)
output <- y
}
}
}
}
func (m *NfsCollector) Close() {
func (m *nfsCollector) Close() {
m.init = false
}
type Nfs3Collector struct {
nfsCollector
}
type Nfs4Collector struct {
nfsCollector
}
func (m *Nfs3Collector) Init(config json.RawMessage) error {
m.name = "Nfs3Collector"
m.version = `v3`
m.setup()
return m.MainInit(config)
}
func (m *Nfs4Collector) Init(config json.RawMessage) error {
m.name = "Nfs4Collector"
m.version = `v4`
m.setup()
return m.MainInit(config)
}