Fixing format with gofmt

This commit is contained in:
Thomas Roehl 2021-03-25 17:47:08 +01:00
parent e9a74a4c9c
commit f32ff9d006
9 changed files with 310 additions and 325 deletions

View File

@ -5,8 +5,8 @@ import (
"io/ioutil"
"log"
"os/exec"
"strings"
"strconv"
"strings"
"time"
)
@ -17,11 +17,11 @@ type InfinibandCollector struct {
}
func (m *InfinibandCollector) Init() {
m.name = "InfinibandCollector"
m.name = "InfinibandCollector"
m.setup()
}
func (m *InfinibandCollector) Read(interval time.Duration){
func (m *InfinibandCollector) Read(interval time.Duration) {
buffer, err := ioutil.ReadFile(string(LIDFILE))
if err != nil {
@ -35,30 +35,30 @@ func (m *InfinibandCollector) Read(interval time.Duration){
command.Wait()
stdout, err := command.Output()
if err != nil {
log.Print(err)
log.Print(err)
return
}
ll := strings.Split(string(stdout), "\n")
for _, line := range ll {
if strings.HasPrefix(line, "PortRcvData") || strings.HasPrefix(line, "RcvData") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
m.node["ib_recv"] = float64(v)
}
}
if strings.HasPrefix(line, "PortRcvData") || strings.HasPrefix(line, "RcvData") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
m.node["ib_recv"] = float64(v)
}
}
if strings.HasPrefix(line, "PortXmitData") || strings.HasPrefix(line, "XmtData") {
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
m.node["ib_xmit"] = float64(v)
}
}
lv := strings.Fields(line)
v, err := strconv.ParseFloat(lv[1], 64)
if err == nil {
m.node["ib_xmit"] = float64(v)
}
}
}
}
func (m *InfinibandCollector) Close() {
return
return
}

View File

@ -1,4 +1,5 @@
package main
/*
#cgo CFLAGS: -I.
#cgo LDFLAGS: -L. -llikwid -llikwid-hwloc -lm
@ -11,7 +12,7 @@ import "unsafe"
func main() {
var topo C.CpuTopology_t
C.topology_init();
C.topology_init()
topo = C.get_cpuTopology()
cpulist := make([]C.int, topo.numHWThreads)
for a := 0; a < int(topo.numHWThreads); a++ {
@ -27,5 +28,5 @@ func main() {
fmt.Println(v)
C.free(unsafe.Pointer(gstring))
C.perfmon_finalize()
C.topology_finalize();
C.topology_finalize()
}

View File

@ -1,4 +1,5 @@
package collectors
/*
#cgo CFLAGS: -I./likwid
#cgo LDFLAGS: -L./likwid -llikwid -llikwid-hwloc -lm
@ -9,77 +10,73 @@ import "C"
import (
"errors"
// "io/ioutil"
// "log"
// "strconv"
// "io/ioutil"
// "log"
// "strconv"
"fmt"
"log"
"strings"
"fmt"
"time"
"unsafe"
"log"
//protocol "github.com/influxdata/line-protocol"
)
type LikwidCollector struct {
MetricCollector
cpulist []C.int
cpulist []C.int
sock2tid map[int]int
metrics map[C.int]map[string]int
groups map[string]C.int
init bool
metrics map[C.int]map[string]int
groups map[string]C.int
init bool
}
type LikwidMetric struct {
name string
search string
socket_scope bool
group_idx int
name string
search string
socket_scope bool
group_idx int
}
const GROUPPATH = `/home/unrz139/Work/cc-metric-collector/collectors/likwid/groups`
var likwid_metrics = map[string][]LikwidMetric{
"MEM_DP": {LikwidMetric{name: "mem_bw", search: "Memory bandwidth [MBytes/s]", socket_scope: true},
LikwidMetric{name: "pwr1", search: "Power [W]", socket_scope: true},
LikwidMetric{name: "pwr2", search: "Power DRAM [W]", socket_scope: true},
LikwidMetric{name: "flops_dp", search: "DP [MFLOP/s]", socket_scope: false}},
"FLOPS_SP" : {LikwidMetric{name: "clock", search: "Clock [MHz]", socket_scope: false},
LikwidMetric{name: "cpi", search: "CPI", socket_scope: false},
LikwidMetric{name: "flops_sp", search: "SP [MFLOP/s]", socket_scope: false}},
"MEM_DP": {LikwidMetric{name: "mem_bw", search: "Memory bandwidth [MBytes/s]", socket_scope: true},
LikwidMetric{name: "pwr1", search: "Power [W]", socket_scope: true},
LikwidMetric{name: "pwr2", search: "Power DRAM [W]", socket_scope: true},
LikwidMetric{name: "flops_dp", search: "DP [MFLOP/s]", socket_scope: false}},
"FLOPS_SP": {LikwidMetric{name: "clock", search: "Clock [MHz]", socket_scope: false},
LikwidMetric{name: "cpi", search: "CPI", socket_scope: false},
LikwidMetric{name: "flops_sp", search: "SP [MFLOP/s]", socket_scope: false}},
}
func getMetricId(group C.int, search string) (int, error) {
for i := 0; i < int(C.perfmon_getNumberOfMetrics(group)); i++ {
mname := C.perfmon_getMetricName(group, C.int(i))
go_mname := C.GoString(mname)
if strings.Contains(go_mname, search) {
return i, nil
}
for i := 0; i < int(C.perfmon_getNumberOfMetrics(group)); i++ {
mname := C.perfmon_getMetricName(group, C.int(i))
go_mname := C.GoString(mname)
if strings.Contains(go_mname, search) {
return i, nil
}
}
return -1, errors.New(fmt.Sprintf("Cannot find metric for search string '%s' in group %d", search, int(group)))
}
return -1, errors.New(fmt.Sprintf("Cannot find metric for search string '%s' in group %d", search, int(group)))
}
func getSocketCpus() map[C.int]int {
slist := SocketList()
var cpu C.int
outmap := make(map[C.int]int)
for _, s := range slist {
t := C.CString(fmt.Sprintf("S%d", s))
clen := C.cpustr_to_cpulist(t, &cpu, 1)
if int(clen) == 1 {
outmap[cpu] = s
}
}
return outmap
slist := SocketList()
var cpu C.int
outmap := make(map[C.int]int)
for _, s := range slist {
t := C.CString(fmt.Sprintf("S%d", s))
clen := C.cpustr_to_cpulist(t, &cpu, 1)
if int(clen) == 1 {
outmap[cpu] = s
}
}
return outmap
}
func (m *LikwidCollector) Init() {
m.name = "LikwidCollector"
m.name = "LikwidCollector"
m.setup()
cpulist := CpuList()
m.cpulist = make([]C.int, len(cpulist))
@ -87,88 +84,88 @@ func (m *LikwidCollector) Init() {
m.sock2tid = make(map[int]int)
for i, c := range cpulist {
m.cpulist[i] = C.int(c)
if sid, found := slist[m.cpulist[i]]; found {
m.sock2tid[sid] = i
}
m.cpulist[i] = C.int(c)
if sid, found := slist[m.cpulist[i]]; found {
m.sock2tid[sid] = i
}
}
m.metrics = make(map[C.int]map[string]int)
C.topology_init()
C.perfmon_init(C.int(len(m.cpulist)), &m.cpulist[0])
gpath := C.CString(GROUPPATH)
C.config_setGroupPath(gpath)
C.free(unsafe.Pointer(gpath))
C.free(unsafe.Pointer(gpath))
m.init = true
m.groups = make(map[string]C.int)
for g, metrics := range likwid_metrics {
cstr := C.CString(g)
gid := C.perfmon_addEventSet(cstr)
if gid >= 0 {
m.groups[g] = gid
for i, metric := range metrics {
idx, err := getMetricId(gid, metric.search)
if err != nil {
log.Print(err)
} else {
likwid_metrics[g][i].group_idx = idx
}
}
} else {
log.Print("Failed to add events set ", g)
}
C.free(unsafe.Pointer(cstr))
}
cstr := C.CString(g)
gid := C.perfmon_addEventSet(cstr)
if gid >= 0 {
m.groups[g] = gid
for i, metric := range metrics {
idx, err := getMetricId(gid, metric.search)
if err != nil {
log.Print(err)
} else {
likwid_metrics[g][i].group_idx = idx
}
}
} else {
log.Print("Failed to add events set ", g)
}
C.free(unsafe.Pointer(cstr))
}
}
func (m *LikwidCollector) Read(interval time.Duration){
if m.init {
for gname, gid := range m.groups {
C.perfmon_setupCounters(gid)
C.perfmon_startCounters()
time.Sleep(interval)
C.perfmon_stopCounters()
func (m *LikwidCollector) Read(interval time.Duration) {
if m.init {
for gname, gid := range m.groups {
C.perfmon_setupCounters(gid)
C.perfmon_startCounters()
time.Sleep(interval)
C.perfmon_stopCounters()
for _, lmetric := range likwid_metrics[gname] {
if lmetric.socket_scope {
for sid, tid := range m.sock2tid {
res := C.perfmon_getLastMetric(gid, C.int(lmetric.group_idx), C.int(tid))
m.sockets[int(sid)][lmetric.name] = float64(res)
// log.Print("Metric '", lmetric.name,"' on Socket ",int(sid)," returns ", m.sockets[int(sid)][lmetric.name])
}
} else {
for tid, cpu := range m.cpulist {
res := C.perfmon_getLastMetric(gid, C.int(lmetric.group_idx), C.int(tid))
m.cpus[int(cpu)][lmetric.name] = float64(res)
// log.Print("Metric '", lmetric.name,"' on CPU ",int(cpu)," returns ", m.cpus[int(cpu)][lmetric.name])
}
}
}
for cpu, _ := range m.cpus {
if flops_dp, found := m.cpus[cpu]["flops_dp"]; found {
if flops_sp, found := m.cpus[cpu]["flops_sp"]; found {
m.cpus[cpu]["flops_any"] = flops_dp.(float64) + flops_sp.(float64)
}
}
}
for sid, _ := range m.sockets {
if pwr1, found := m.sockets[int(sid)]["pwr1"]; found {
if pwr2, found := m.sockets[int(sid)]["pwr2"]; found {
sum := pwr1.(float64) + pwr2.(float64)
if sum > 0 {
m.sockets[int(sid)]["power"] = sum
}
delete(m.sockets[int(sid)], "pwr2")
}
delete(m.sockets[int(sid)], "pwr1")
}
}
}
for _, lmetric := range likwid_metrics[gname] {
if lmetric.socket_scope {
for sid, tid := range m.sock2tid {
res := C.perfmon_getLastMetric(gid, C.int(lmetric.group_idx), C.int(tid))
m.sockets[int(sid)][lmetric.name] = float64(res)
// log.Print("Metric '", lmetric.name,"' on Socket ",int(sid)," returns ", m.sockets[int(sid)][lmetric.name])
}
} else {
for tid, cpu := range m.cpulist {
res := C.perfmon_getLastMetric(gid, C.int(lmetric.group_idx), C.int(tid))
m.cpus[int(cpu)][lmetric.name] = float64(res)
// log.Print("Metric '", lmetric.name,"' on CPU ",int(cpu)," returns ", m.cpus[int(cpu)][lmetric.name])
}
}
}
for cpu := range m.cpus {
if flops_dp, found := m.cpus[cpu]["flops_dp"]; found {
if flops_sp, found := m.cpus[cpu]["flops_sp"]; found {
m.cpus[cpu]["flops_any"] = flops_dp.(float64) + flops_sp.(float64)
}
}
}
for sid := range m.sockets {
if pwr1, found := m.sockets[int(sid)]["pwr1"]; found {
if pwr2, found := m.sockets[int(sid)]["pwr2"]; found {
sum := pwr1.(float64) + pwr2.(float64)
if sum > 0 {
m.sockets[int(sid)]["power"] = sum
}
delete(m.sockets[int(sid)], "pwr2")
}
delete(m.sockets[int(sid)], "pwr1")
}
}
}
}
}
func (m *LikwidCollector) Close() {
C.perfmon_finalize()
C.topology_finalize()
m.init = false
return
C.perfmon_finalize()
C.topology_finalize()
m.init = false
return
}

View File

@ -3,8 +3,8 @@ package collectors
import (
"io/ioutil"
"strconv"
"time"
"strings"
"time"
)
const LOADAVGFILE = `/proc/loadavg`
@ -13,13 +13,12 @@ type LoadavgCollector struct {
MetricCollector
}
func (m *LoadavgCollector) Init() {
m.name = "LoadavgCollector"
m.name = "LoadavgCollector"
m.setup()
}
func (m *LoadavgCollector) Read(interval time.Duration){
func (m *LoadavgCollector) Read(interval time.Duration) {
buffer, err := ioutil.ReadFile(string(LOADAVGFILE))
if err != nil {
@ -36,5 +35,5 @@ func (m *LoadavgCollector) Read(interval time.Duration){
}
func (m *LoadavgCollector) Close() {
return
return
}

View File

@ -1,7 +1,7 @@
package collectors
import (
// "errors"
// "errors"
"io/ioutil"
"log"
"strconv"
@ -15,13 +15,12 @@ type LustreCollector struct {
MetricCollector
}
func (m *LustreCollector) Init() {
m.name = "LustreCollector"
m.name = "LustreCollector"
m.setup()
}
func (m *LustreCollector) Read(interval time.Duration){
func (m *LustreCollector) Read(interval time.Duration) {
buffer, err := ioutil.ReadFile(string(LUSTREFILE))
if err != nil {
@ -32,31 +31,31 @@ func (m *LustreCollector) Read(interval time.Duration){
for _, line := range strings.Split(string(buffer), "\n") {
lf := strings.Fields(line)
if len(lf) > 1 {
switch lf[0] {
case "read_bytes":
m.node["read_bytes"], err = strconv.ParseInt(lf[6], 0, 64)
m.node["read_requests"], err = strconv.ParseInt(lf[1], 0, 64)
case "write_bytes":
m.node["write_bytes"], err = strconv.ParseInt(lf[6], 0, 64)
m.node["write_requests"], err = strconv.ParseInt(lf[1], 0, 64)
case "open":
m.node["open"], err = strconv.ParseInt(lf[1], 0, 64)
case "close":
m.node["close"], err = strconv.ParseInt(lf[1], 0, 64)
case "setattr":
m.node["setattr"], err = strconv.ParseInt(lf[1], 0, 64)
case "getattr":
m.node["getattr"], err = strconv.ParseInt(lf[1], 0, 64)
case "statfs":
m.node["statfs"], err = strconv.ParseInt(lf[1], 0, 64)
case "inode_permission":
m.node["inode_permission"], err = strconv.ParseInt(lf[1], 0, 64)
}
switch lf[0] {
case "read_bytes":
m.node["read_bytes"], err = strconv.ParseInt(lf[6], 0, 64)
m.node["read_requests"], err = strconv.ParseInt(lf[1], 0, 64)
case "write_bytes":
m.node["write_bytes"], err = strconv.ParseInt(lf[6], 0, 64)
m.node["write_requests"], err = strconv.ParseInt(lf[1], 0, 64)
case "open":
m.node["open"], err = strconv.ParseInt(lf[1], 0, 64)
case "close":
m.node["close"], err = strconv.ParseInt(lf[1], 0, 64)
case "setattr":
m.node["setattr"], err = strconv.ParseInt(lf[1], 0, 64)
case "getattr":
m.node["getattr"], err = strconv.ParseInt(lf[1], 0, 64)
case "statfs":
m.node["statfs"], err = strconv.ParseInt(lf[1], 0, 64)
case "inode_permission":
m.node["inode_permission"], err = strconv.ParseInt(lf[1], 0, 64)
}
}
}
}
func (m *LustreCollector) Close() {
return
return
}

View File

@ -7,7 +7,6 @@ import (
"strconv"
"strings"
"time"
//protocol "github.com/influxdata/line-protocol"
)
@ -17,13 +16,12 @@ type MemstatCollector struct {
MetricCollector
}
func (m *MemstatCollector) Init() {
m.name = "MemstatCollector"
m.name = "MemstatCollector"
m.setup()
}
func (m *MemstatCollector) Read(interval time.Duration){
func (m *MemstatCollector) Read(interval time.Duration) {
buffer, err := ioutil.ReadFile(string(MEMSTATFILE))
if err != nil {
@ -53,5 +51,5 @@ func (m *MemstatCollector) Read(interval time.Duration){
}
func (m *MemstatCollector) Close() {
return
return
}

View File

@ -1,15 +1,15 @@
package collectors
import (
"time"
"io/ioutil"
"strings"
"log"
"strconv"
"strings"
"time"
)
type MetricGetter interface {
Name() string
Name() string
Init()
Read(time.Duration)
Close()
@ -19,14 +19,12 @@ type MetricGetter interface {
}
type MetricCollector struct {
name string
node map[string]interface{}
sockets map[int]map[string]interface{}
cpus map[int]map[string]interface{}
name string
node map[string]interface{}
sockets map[int]map[string]interface{}
cpus map[int]map[string]interface{}
}
func (c *MetricCollector) Name() string {
return c.name
}
@ -44,76 +42,75 @@ func (c *MetricCollector) GetCpuMetrics() map[int]map[string]interface{} {
}
func (c *MetricCollector) setup() error {
slist := SocketList()
clist := CpuList()
c.node = make(map[string]interface{})
c.sockets = make(map[int]map[string]interface{}, len(slist))
for _, s := range slist {
c.sockets[s] = make(map[string]interface{})
}
c.cpus = make(map[int]map[string]interface{}, len(clist))
for _, s := range clist {
c.cpus[s] = make(map[string]interface{})
}
return nil
slist := SocketList()
clist := CpuList()
c.node = make(map[string]interface{})
c.sockets = make(map[int]map[string]interface{}, len(slist))
for _, s := range slist {
c.sockets[s] = make(map[string]interface{})
}
c.cpus = make(map[int]map[string]interface{}, len(clist))
for _, s := range clist {
c.cpus[s] = make(map[string]interface{})
}
return nil
}
func intArrayContains(array []int, str int) (int, bool) {
for i, a := range array {
if a == str {
return i, true
}
}
return -1, false
for i, a := range array {
if a == str {
return i, true
}
}
return -1, false
}
func SocketList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
var packs []int
for _, line := range ll {
if strings.HasPrefix(line, "physical id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return packs
}
_, found := intArrayContains(packs, int(id))
if !found {
packs = append(packs, int(id))
}
}
if strings.HasPrefix(line, "physical id") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[3], 10, 32)
if err != nil {
log.Print(err)
return packs
}
_, found := intArrayContains(packs, int(id))
if !found {
packs = append(packs, int(id))
}
}
}
return packs
}
func CpuList() []int {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Print(err)
return nil
}
ll := strings.Split(string(buffer), "\n")
var cpulist []int
for _, line := range ll {
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[2], 10, 32)
if err != nil {
log.Print(err)
return cpulist
}
_, found := intArrayContains(cpulist, int(id))
if !found {
cpulist = append(cpulist, int(id))
}
}
if strings.HasPrefix(line, "processor") {
lv := strings.Fields(line)
id, err := strconv.ParseInt(lv[2], 10, 32)
if err != nil {
log.Print(err)
return cpulist
}
_, found := intArrayContains(cpulist, int(id))
if !found {
cpulist = append(cpulist, int(id))
}
}
}
return cpulist
}

View File

@ -1,12 +1,12 @@
package collectors
import (
"fmt"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
"log"
"fmt"
)
const NETSTATFILE = `/proc/net/dev`
@ -15,13 +15,12 @@ type NetstatCollector struct {
MetricCollector
}
func (m *NetstatCollector) Init() {
m.name = "NetstatCollector"
m.name = "NetstatCollector"
m.setup()
}
func (m *NetstatCollector) Read(interval time.Duration){
func (m *NetstatCollector) Read(interval time.Duration) {
data, err := ioutil.ReadFile(string(NETSTATFILE))
if err != nil {
log.Print(err.Error())
@ -55,5 +54,5 @@ func (m *NetstatCollector) Read(interval time.Duration){
}
func (m *NetstatCollector) Close() {
return
return
}

View File

@ -3,23 +3,24 @@ package main
import (
"encoding/json"
"fmt"
"github.com/ClusterCockpit/cc-metric-collector/collectors"
protocol "github.com/influxdata/line-protocol"
"log"
"os"
"os/signal"
"sync"
"time"
"github.com/ClusterCockpit/cc-metric-collector/collectors"
protocol "github.com/influxdata/line-protocol"
)
var Collectors = map[string]collectors.MetricGetter{
"likwid": &collectors.LikwidCollector{},
"loadavg": &collectors.LoadavgCollector{},
"memstat": &collectors.MemstatCollector{},
"netstat": &collectors.NetstatCollector{},
"ibstat": &collectors.InfinibandCollector{},
"lustrestat": &collectors.LustreCollector{},
"likwid": &collectors.LikwidCollector{},
"loadavg": &collectors.LoadavgCollector{},
"memstat": &collectors.MemstatCollector{},
"netstat": &collectors.NetstatCollector{},
"ibstat": &collectors.InfinibandCollector{},
"lustrestat": &collectors.LustreCollector{},
}
type GlobalConfig struct {
Sink struct {
User string `json:"user"`
@ -32,8 +33,6 @@ type GlobalConfig struct {
Collectors []string `json:"collectors"`
}
func LoadConfiguration(file string, config *GlobalConfig) error {
configFile, err := os.Open(file)
defer configFile.Close()
@ -45,8 +44,6 @@ func LoadConfiguration(file string, config *GlobalConfig) error {
return err
}
func shutdown(wg *sync.WaitGroup, config *GlobalConfig) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, os.Interrupt)
@ -69,23 +66,23 @@ func main() {
var wg sync.WaitGroup
wg.Add(1)
host, err := os.Hostname()
if err != nil {
log.Print(err)
return
}
var tags = map[string]string {"host": host}
if err != nil {
log.Print(err)
return
}
var tags = map[string]string{"host": host}
LoadConfiguration("config.json", &config)
if config.Interval <= 0 || time.Duration(config.Interval) * time.Second <= 0 {
log.Print("Configuration value 'interval' must be greater than zero")
return
if config.Interval <= 0 || time.Duration(config.Interval)*time.Second <= 0 {
log.Print("Configuration value 'interval' must be greater than zero")
return
}
if config.Duration <= 0 {
log.Print("Configuration value 'duration' must be greater than zero")
return
log.Print("Configuration value 'duration' must be greater than zero")
return
}
shutdown(&wg, &config)
serializer := protocol.NewEncoder(os.Stdout)
serializer := protocol.NewEncoder(os.Stdout)
serializer.SetPrecision(time.Second)
serializer.SetMaxLineBytes(1024)
for _, c := range config.Collectors {
@ -93,91 +90,89 @@ func main() {
col.Init()
log.Print("Start ", col.Name())
}
log.Print(config.Interval, time.Duration(config.Interval) * time.Second)
log.Print(config.Interval, time.Duration(config.Interval)*time.Second)
ticker := time.NewTicker(time.Duration(config.Interval) * time.Second)
done := make(chan bool)
node_fields := make(map[string]interface{})
slist := collectors.SocketList()
sockets_fields := make(map[int]map[string]interface{}, len(slist))
for _, s := range slist {
sockets_fields[s] = make(map[string]interface{})
}
clist := collectors.CpuList()
cpu_fields := make(map[int]map[string]interface{}, len(clist))
for _, s := range clist {
cpu_fields[s] = make(map[string]interface{})
}
sockets_fields := make(map[int]map[string]interface{}, len(slist))
for _, s := range slist {
sockets_fields[s] = make(map[string]interface{})
}
clist := collectors.CpuList()
cpu_fields := make(map[int]map[string]interface{}, len(clist))
for _, s := range clist {
cpu_fields[s] = make(map[string]interface{})
}
go func() {
for {
select {
case <-done:
return
case t:= <-ticker.C:
case t := <-ticker.C:
scount := 0
ccount := 0
for _, c := range config.Collectors {
col := Collectors[c]
col.Read(time.Duration(config.Duration))
for key, val := range col.GetNodeMetric() {
node_fields[key] = val
}
for sid, socket := range col.GetSocketMetrics() {
for key, val := range socket {
sockets_fields[sid][key] = val
scount++
}
}
for cid, cpu := range col.GetCpuMetrics() {
for key, val := range cpu {
cpu_fields[cid][key] = val
ccount++
}
}
scount := 0
ccount := 0
for _, c := range config.Collectors {
col := Collectors[c]
col.Read(time.Duration(config.Duration))
for key, val := range col.GetNodeMetric() {
node_fields[key] = val
}
for sid, socket := range col.GetSocketMetrics() {
for key, val := range socket {
sockets_fields[sid][key] = val
scount++
}
}
for cid, cpu := range col.GetCpuMetrics() {
for key, val := range cpu {
cpu_fields[cid][key] = val
ccount++
}
}
}
var CurrentNode protocol.MutableMetric
CurrentNode, err = protocol.New("node", tags, node_fields, t)
if err != nil {
log.Print(err)
}
log.Print(err)
}
_, err := serializer.Encode(CurrentNode)
if err != nil {
log.Print(err)
}
if scount > 0 {
for sid, socket := range sockets_fields {
var CurrentSocket protocol.MutableMetric
var stags = map[string]string {"socket": fmt.Sprintf("%d", sid), "host": host}
CurrentSocket, err = protocol.New("socket", stags, socket, t)
if err != nil {
log.Print(err)
}
_, err := serializer.Encode(CurrentSocket)
if err != nil {
log.Print(err)
}
}
}
if ccount > 0 {
for cid, cpu := range cpu_fields {
var CurrentCpu protocol.MutableMetric
var ctags = map[string]string {"host": host, "cpu": fmt.Sprintf("%d", cid)}
CurrentCpu, err = protocol.New("cpu", ctags, cpu, t)
if err != nil {
log.Print(err)
}
_, err := serializer.Encode(CurrentCpu)
if err != nil {
log.Print(err)
}
}
}
if err != nil {
log.Print(err)
}
if scount > 0 {
for sid, socket := range sockets_fields {
var CurrentSocket protocol.MutableMetric
var stags = map[string]string{"socket": fmt.Sprintf("%d", sid), "host": host}
CurrentSocket, err = protocol.New("socket", stags, socket, t)
if err != nil {
log.Print(err)
}
_, err := serializer.Encode(CurrentSocket)
if err != nil {
log.Print(err)
}
}
}
if ccount > 0 {
for cid, cpu := range cpu_fields {
var CurrentCpu protocol.MutableMetric
var ctags = map[string]string{"host": host, "cpu": fmt.Sprintf("%d", cid)}
CurrentCpu, err = protocol.New("cpu", ctags, cpu, t)
if err != nil {
log.Print(err)
}
_, err := serializer.Encode(CurrentCpu)
if err != nil {
log.Print(err)
}
}
}
}
}
}()
wg.Wait()
}