mirror of
https://github.com/ClusterCockpit/cc-metric-collector.git
synced 2025-08-15 15:13:00 +02:00
Merge branch 'develop' into use_metric_pointers
This commit is contained in:
@@ -11,59 +11,84 @@ import (
|
||||
// Most functions are derived from github.com/influxdata/line-protocol/metric.go
|
||||
// The metric type is extended with an extra meta information list re-using the Tag
|
||||
// type.
|
||||
|
||||
//
|
||||
// See: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/
|
||||
type ccMetric struct {
|
||||
name string
|
||||
tags []*lp.Tag
|
||||
fields []*lp.Field
|
||||
tm time.Time
|
||||
meta []*lp.Tag
|
||||
name string // Measurement name
|
||||
meta map[string]string // map of meta data tags
|
||||
tags map[string]string // map of of tags
|
||||
fields []*lp.Field // unordered list of of fields
|
||||
tm time.Time // timestamp
|
||||
}
|
||||
|
||||
// ccmetric access functions
|
||||
type CCMetric interface {
|
||||
lp.MutableMetric
|
||||
AddMeta(key, value string)
|
||||
MetaList() []*lp.Tag
|
||||
RemoveTag(key string)
|
||||
GetTag(key string) (string, bool)
|
||||
GetMeta(key string) (string, bool)
|
||||
GetField(key string) (interface{}, bool)
|
||||
HasField(key string) bool
|
||||
RemoveField(key string)
|
||||
lp.Metric // Time(), Name(), TagList(), FieldList()
|
||||
|
||||
SetName(name string)
|
||||
SetTime(t time.Time)
|
||||
|
||||
Meta() map[string]string // Map of meta data tags
|
||||
MetaList() []*lp.Tag // Ordered list of meta data
|
||||
AddMeta(key, value string) // Add a meta data tag
|
||||
GetMeta(key string) (string, bool) // Get a meta data tab addressed by its key
|
||||
|
||||
Tags() map[string]string // Map of tags
|
||||
AddTag(key, value string) // Add a tag
|
||||
GetTag(key string) (string, bool) // Get a tag by its key
|
||||
RemoveTag(key string) // Remove a tag by its key
|
||||
|
||||
GetField(key string) (interface{}, bool) // Get a field addressed by its key
|
||||
HasField(key string) bool // Check if a field key is present
|
||||
RemoveField(key string) // Remove a field addressed by its key
|
||||
}
|
||||
|
||||
// Meta returns the meta data tags as key-value mapping
|
||||
func (m *ccMetric) Meta() map[string]string {
|
||||
meta := make(map[string]string, len(m.meta))
|
||||
for _, m := range m.meta {
|
||||
meta[m.Key] = m.Value
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
func (m *ccMetric) MetaList() []*lp.Tag {
|
||||
return m.meta
|
||||
}
|
||||
|
||||
func (m *ccMetric) String() string {
|
||||
return fmt.Sprintf("%s %v %v %v %d", m.name, m.Tags(), m.Meta(), m.Fields(), m.tm.UnixNano())
|
||||
// MetaList returns the the list of meta data tags as sorted list of key value tags
|
||||
func (m *ccMetric) MetaList() []*lp.Tag {
|
||||
|
||||
ml := make([]*lp.Tag, 0, len(m.meta))
|
||||
for key, value := range m.meta {
|
||||
ml = append(ml, &lp.Tag{Key: key, Value: value})
|
||||
}
|
||||
sort.Slice(ml, func(i, j int) bool { return ml[i].Key < ml[j].Key })
|
||||
return ml
|
||||
}
|
||||
|
||||
// String implements the stringer interface for data type ccMetric
|
||||
func (m *ccMetric) String() string {
|
||||
return fmt.Sprintf("%s %v %v %v %d", m.name, m.tags, m.meta, m.Fields(), m.tm.UnixNano())
|
||||
}
|
||||
|
||||
// Name returns the measurement name
|
||||
func (m *ccMetric) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *ccMetric) Tags() map[string]string {
|
||||
tags := make(map[string]string, len(m.tags))
|
||||
for _, tag := range m.tags {
|
||||
tags[tag.Key] = tag.Value
|
||||
}
|
||||
return tags
|
||||
func (m *ccMetric) SetName(name string) {
|
||||
m.name = name
|
||||
}
|
||||
|
||||
func (m *ccMetric) TagList() []*lp.Tag {
|
||||
// Tags returns the the list of tags as key-value-mapping
|
||||
func (m *ccMetric) Tags() map[string]string {
|
||||
return m.tags
|
||||
}
|
||||
|
||||
// TagList returns the the list of tags as sorted list of key value tags
|
||||
func (m *ccMetric) TagList() []*lp.Tag {
|
||||
tl := make([]*lp.Tag, 0, len(m.tags))
|
||||
for key, value := range m.tags {
|
||||
tl = append(tl, &lp.Tag{Key: key, Value: value})
|
||||
}
|
||||
sort.Slice(tl, func(i, j int) bool { return tl[i].Key < tl[j].Key })
|
||||
return tl
|
||||
}
|
||||
|
||||
// Fields returns the list of fields as key-value-mapping
|
||||
func (m *ccMetric) Fields() map[string]interface{} {
|
||||
fields := make(map[string]interface{}, len(m.fields))
|
||||
for _, field := range m.fields {
|
||||
@@ -73,116 +98,70 @@ func (m *ccMetric) Fields() map[string]interface{} {
|
||||
return fields
|
||||
}
|
||||
|
||||
// FieldList returns the list of fields
|
||||
func (m *ccMetric) FieldList() []*lp.Field {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
// Time returns timestamp
|
||||
func (m *ccMetric) Time() time.Time {
|
||||
return m.tm
|
||||
}
|
||||
|
||||
// SetTime sets the timestamp
|
||||
func (m *ccMetric) SetTime(t time.Time) {
|
||||
m.tm = t
|
||||
}
|
||||
|
||||
// HasTag checks if a tag with key equal to <key> is present in the list of tags
|
||||
func (m *ccMetric) HasTag(key string) bool {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
_, ok := m.tags[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetTag returns the tag with tag's key equal to <key>
|
||||
func (m *ccMetric) GetTag(key string) (string, bool) {
|
||||
for _, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
return tag.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
value, ok := m.tags[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// RemoveTag removes the tag with tag's key equal to <key>
|
||||
// and keeps the tag list ordered by the keys
|
||||
func (m *ccMetric) RemoveTag(key string) {
|
||||
for i, tag := range m.tags {
|
||||
if tag.Key == key {
|
||||
copy(m.tags[i:], m.tags[i+1:])
|
||||
m.tags[len(m.tags)-1] = nil
|
||||
m.tags = m.tags[:len(m.tags)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(m.tags, key)
|
||||
}
|
||||
|
||||
// AddTag adds a tag (consisting of key and value)
|
||||
// and keeps the tag list ordered by the keys
|
||||
func (m *ccMetric) AddTag(key, value string) {
|
||||
for i, tag := range m.tags {
|
||||
if key > tag.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, nil)
|
||||
copy(m.tags[i+1:], m.tags[i:])
|
||||
m.tags[i] = &lp.Tag{Key: key, Value: value}
|
||||
return
|
||||
}
|
||||
|
||||
m.tags = append(m.tags, &lp.Tag{Key: key, Value: value})
|
||||
m.tags[key] = value
|
||||
}
|
||||
|
||||
// HasTag checks if a meta data tag with meta data's key equal to <key> is present in the list of meta data tags
|
||||
func (m *ccMetric) HasMeta(key string) bool {
|
||||
for _, tag := range m.meta {
|
||||
if tag.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
_, ok := m.meta[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetMeta returns the meta data tag with meta data's key equal to <key>
|
||||
func (m *ccMetric) GetMeta(key string) (string, bool) {
|
||||
for _, tag := range m.meta {
|
||||
if tag.Key == key {
|
||||
return tag.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
value, ok := m.meta[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// RemoveMeta removes the meta data tag with tag's key equal to <key>
|
||||
// and keeps the meta data tag list ordered by the keys
|
||||
func (m *ccMetric) RemoveMeta(key string) {
|
||||
for i, tag := range m.meta {
|
||||
if tag.Key == key {
|
||||
copy(m.meta[i:], m.meta[i+1:])
|
||||
m.meta[len(m.meta)-1] = nil
|
||||
m.meta = m.meta[:len(m.meta)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(m.meta, key)
|
||||
}
|
||||
|
||||
// AddMeta adds a meta data tag (consisting of key and value)
|
||||
// and keeps the meta data list ordered by the keys
|
||||
func (m *ccMetric) AddMeta(key, value string) {
|
||||
for i, tag := range m.meta {
|
||||
if key > tag.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if key == tag.Key {
|
||||
tag.Value = value
|
||||
return
|
||||
}
|
||||
|
||||
m.meta = append(m.meta, nil)
|
||||
copy(m.meta[i+1:], m.meta[i:])
|
||||
m.meta[i] = &lp.Tag{Key: key, Value: value}
|
||||
return
|
||||
}
|
||||
|
||||
m.meta = append(m.meta, &lp.Tag{Key: key, Value: value})
|
||||
m.meta[key] = value
|
||||
}
|
||||
|
||||
// AddField adds a field (consisting of key and value) to the unordered list of fields
|
||||
func (m *ccMetric) AddField(key string, value interface{}) {
|
||||
for i, field := range m.fields {
|
||||
if key == field.Key {
|
||||
@@ -193,6 +172,7 @@ func (m *ccMetric) AddField(key string, value interface{}) {
|
||||
m.fields = append(m.fields, &lp.Field{Key: key, Value: convertField(value)})
|
||||
}
|
||||
|
||||
// GetField returns the field with field's key equal to <key>
|
||||
func (m *ccMetric) GetField(key string) (interface{}, bool) {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
@@ -202,6 +182,7 @@ func (m *ccMetric) GetField(key string) (interface{}, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// HasField checks if a field with field's key equal to <key> is present in the list of fields
|
||||
func (m *ccMetric) HasField(key string) bool {
|
||||
for _, field := range m.fields {
|
||||
if field.Key == key {
|
||||
@@ -211,6 +192,8 @@ func (m *ccMetric) HasField(key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveField removes the field with field's key equal to <key>
|
||||
// from the unordered list of fields
|
||||
func (m *ccMetric) RemoveField(key string) {
|
||||
for i, field := range m.fields {
|
||||
if field.Key == key {
|
||||
@@ -222,6 +205,7 @@ func (m *ccMetric) RemoveField(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new measurement point
|
||||
func New(
|
||||
name string,
|
||||
tags map[string]string,
|
||||
@@ -231,58 +215,49 @@ func New(
|
||||
) (CCMetric, error) {
|
||||
m := &ccMetric{
|
||||
name: name,
|
||||
tags: nil,
|
||||
fields: nil,
|
||||
tags: make(map[string]string, len(tags)),
|
||||
meta: make(map[string]string, len(meta)),
|
||||
fields: make([]*lp.Field, 0, len(fields)),
|
||||
tm: tm,
|
||||
meta: nil,
|
||||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
m.tags = make([]*lp.Tag, 0, len(tags))
|
||||
for k, v := range tags {
|
||||
m.tags = append(m.tags,
|
||||
&lp.Tag{Key: k, Value: v})
|
||||
}
|
||||
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
|
||||
// deep copy tags
|
||||
for k, v := range tags {
|
||||
m.tags[k] = v
|
||||
}
|
||||
|
||||
if len(meta) > 0 {
|
||||
m.meta = make([]*lp.Tag, 0, len(meta))
|
||||
for k, v := range meta {
|
||||
m.meta = append(m.meta,
|
||||
&lp.Tag{Key: k, Value: v})
|
||||
}
|
||||
sort.Slice(m.meta, func(i, j int) bool { return m.meta[i].Key < m.meta[j].Key })
|
||||
// deep copy meta data tags
|
||||
for k, v := range meta {
|
||||
m.meta[k] = v
|
||||
}
|
||||
|
||||
if len(fields) > 0 {
|
||||
m.fields = make([]*lp.Field, 0, len(fields))
|
||||
for k, v := range fields {
|
||||
v := convertField(v)
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
m.AddField(k, v)
|
||||
// Unsorted list of fields
|
||||
for k, v := range fields {
|
||||
v := convertField(v)
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
m.AddField(k, v)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func FromMetric(other CCMetric) CCMetric {
|
||||
// FromMetric copies the metric <other>
|
||||
func FromMetric(other ccMetric) CCMetric {
|
||||
m := &ccMetric{
|
||||
name: other.Name(),
|
||||
tags: make([]*lp.Tag, len(other.TagList())),
|
||||
tags: make(map[string]string),
|
||||
fields: make([]*lp.Field, len(other.FieldList())),
|
||||
meta: make([]*lp.Tag, len(other.MetaList())),
|
||||
meta: make(map[string]string),
|
||||
tm: other.Time(),
|
||||
}
|
||||
|
||||
for i, tag := range other.TagList() {
|
||||
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
|
||||
for key, value := range other.Tags() {
|
||||
m.tags[key] = value
|
||||
}
|
||||
for i, s := range other.MetaList() {
|
||||
m.meta[i] = &lp.Tag{Key: s.Key, Value: s.Value}
|
||||
for key, value := range other.Meta() {
|
||||
m.meta[key] = value
|
||||
}
|
||||
|
||||
for i, field := range other.FieldList() {
|
||||
@@ -291,25 +266,35 @@ func FromMetric(other CCMetric) CCMetric {
|
||||
return m
|
||||
}
|
||||
|
||||
// FromInfluxMetric copies the influxDB line protocol metric <other>
|
||||
func FromInfluxMetric(other lp.Metric) CCMetric {
|
||||
m := &ccMetric{
|
||||
name: other.Name(),
|
||||
tags: make([]*lp.Tag, len(other.TagList())),
|
||||
tags: make(map[string]string),
|
||||
fields: make([]*lp.Field, len(other.FieldList())),
|
||||
meta: make([]*lp.Tag, 0),
|
||||
meta: make(map[string]string),
|
||||
tm: other.Time(),
|
||||
}
|
||||
|
||||
for i, tag := range other.TagList() {
|
||||
m.tags[i] = &lp.Tag{Key: tag.Key, Value: tag.Value}
|
||||
for _, otherTag := range other.TagList() {
|
||||
m.tags[otherTag.Key] = otherTag.Value
|
||||
}
|
||||
|
||||
for i, field := range other.FieldList() {
|
||||
m.fields[i] = &lp.Field{Key: field.Key, Value: field.Value}
|
||||
for i, otherField := range other.FieldList() {
|
||||
m.fields[i] = &lp.Field{
|
||||
Key: otherField.Key,
|
||||
Value: otherField.Value,
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// convertField converts data types of fields by the following schemata:
|
||||
// *float32, *float64, float32, float64 -> float64
|
||||
// *int, *int8, *int16, *int32, *int64, int, int8, int16, int32, int64 -> int64
|
||||
// *uint, *uint8, *uint16, *uint32, *uint64, uint, uint8, uint16, uint32, uint64 -> uint64
|
||||
// *[]byte, *string, []byte, string -> string
|
||||
// *bool, bool -> bool
|
||||
func convertField(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
|
@@ -24,17 +24,23 @@ func intArrayContains(array []int, str int) (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
// stringArrayContains scans an array of strings if the value str is present in the array
|
||||
// If the specified value is found, the corresponding array index is returned.
|
||||
// The bool value is used to signal success or failure
|
||||
// func stringArrayContains(array []string, str string) (int, bool) {
|
||||
// for i, a := range array {
|
||||
// if a == str {
|
||||
// return i, true
|
||||
// }
|
||||
// }
|
||||
// return -1, false
|
||||
// }
|
||||
func fileToInt(path string) int {
|
||||
buffer, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
cclogger.ComponentError("ccTopology", "Reading", path, ":", err.Error())
|
||||
return -1
|
||||
}
|
||||
sbuffer := strings.Replace(string(buffer), "\n", "", -1)
|
||||
var id int64
|
||||
//_, err = fmt.Scanf("%d", sbuffer, &id)
|
||||
id, err = strconv.ParseInt(sbuffer, 10, 32)
|
||||
if err != nil {
|
||||
cclogger.ComponentError("ccTopology", "Parsing", path, ":", sbuffer, err.Error())
|
||||
return -1
|
||||
}
|
||||
return int(id)
|
||||
}
|
||||
|
||||
func SocketList() []int {
|
||||
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
|
||||
@@ -68,7 +74,7 @@ func CpuList() []int {
|
||||
return nil
|
||||
}
|
||||
ll := strings.Split(string(buffer), "\n")
|
||||
var cpulist []int
|
||||
cpulist := make([]int, 0)
|
||||
for _, line := range ll {
|
||||
if strings.HasPrefix(line, "processor") {
|
||||
lv := strings.Fields(line)
|
||||
@@ -86,6 +92,67 @@ func CpuList() []int {
|
||||
return cpulist
|
||||
}
|
||||
|
||||
func CoreList() []int {
|
||||
buffer, err := ioutil.ReadFile("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return nil
|
||||
}
|
||||
ll := strings.Split(string(buffer), "\n")
|
||||
corelist := make([]int, 0)
|
||||
for _, line := range ll {
|
||||
if strings.HasPrefix(line, "core id") {
|
||||
lv := strings.Fields(line)
|
||||
id, err := strconv.ParseInt(lv[3], 10, 32)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return corelist
|
||||
}
|
||||
_, found := intArrayContains(corelist, int(id))
|
||||
if !found {
|
||||
corelist = append(corelist, int(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
return corelist
|
||||
}
|
||||
|
||||
func NumaNodeList() []int {
|
||||
numalist := make([]int, 0)
|
||||
files, err := filepath.Glob("/sys/devices/system/node/node*")
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
for _, f := range files {
|
||||
finfo, err := os.Lstat(f)
|
||||
if err == nil && (finfo.IsDir() || finfo.Mode()&os.ModeSymlink != 0) {
|
||||
var id int
|
||||
parts := strings.Split(f, "/")
|
||||
_, err = fmt.Scanf("node%d", parts[len(parts)-1], &id)
|
||||
if err == nil {
|
||||
_, found := intArrayContains(numalist, int(id))
|
||||
if !found {
|
||||
numalist = append(numalist, int(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return numalist
|
||||
}
|
||||
|
||||
func DieList() []int {
|
||||
cpulist := CpuList()
|
||||
dielist := make([]int, 0)
|
||||
for _, c := range cpulist {
|
||||
dieid := fileToInt(fmt.Sprintf("/sys/devices/system/cpu/cpu%d/topology/die_id", c))
|
||||
_, found := intArrayContains(dielist, int(dieid))
|
||||
if !found {
|
||||
dielist = append(dielist, int(dieid))
|
||||
}
|
||||
}
|
||||
return dielist
|
||||
}
|
||||
|
||||
type CpuEntry struct {
|
||||
Cpuid int
|
||||
SMT int
|
||||
@@ -203,6 +270,7 @@ type CpuInformation struct {
|
||||
SMTWidth int
|
||||
NumSockets int
|
||||
NumDies int
|
||||
NumCores int
|
||||
NumNumaDomains int
|
||||
}
|
||||
|
||||
@@ -213,6 +281,7 @@ func CpuInfo() CpuInformation {
|
||||
numa := 0
|
||||
die := 0
|
||||
socket := 0
|
||||
core := 0
|
||||
cdata := CpuData()
|
||||
for _, d := range cdata {
|
||||
if d.SMT > smt {
|
||||
@@ -227,10 +296,14 @@ func CpuInfo() CpuInformation {
|
||||
if d.Socket > socket {
|
||||
socket = d.Socket
|
||||
}
|
||||
if d.Core > core {
|
||||
core = d.Core
|
||||
}
|
||||
}
|
||||
c.NumNumaDomains = numa + 1
|
||||
c.SMTWidth = smt + 1
|
||||
c.NumDies = die + 1
|
||||
c.NumCores = core + 1
|
||||
c.NumSockets = socket + 1
|
||||
c.NumHWthreads = len(cdata)
|
||||
return c
|
||||
@@ -275,3 +348,47 @@ func GetCpuCore(cpuid int) int {
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func GetSocketCpus(socket int) []int {
|
||||
all := CpuData()
|
||||
cpulist := make([]int, 0)
|
||||
for _, d := range all {
|
||||
if d.Socket == socket {
|
||||
cpulist = append(cpulist, d.Cpuid)
|
||||
}
|
||||
}
|
||||
return cpulist
|
||||
}
|
||||
|
||||
func GetNumaDomainCpus(domain int) []int {
|
||||
all := CpuData()
|
||||
cpulist := make([]int, 0)
|
||||
for _, d := range all {
|
||||
if d.Numadomain == domain {
|
||||
cpulist = append(cpulist, d.Cpuid)
|
||||
}
|
||||
}
|
||||
return cpulist
|
||||
}
|
||||
|
||||
func GetDieCpus(die int) []int {
|
||||
all := CpuData()
|
||||
cpulist := make([]int, 0)
|
||||
for _, d := range all {
|
||||
if d.Die == die {
|
||||
cpulist = append(cpulist, d.Cpuid)
|
||||
}
|
||||
}
|
||||
return cpulist
|
||||
}
|
||||
|
||||
func GetCoreCpus(core int) []int {
|
||||
all := CpuData()
|
||||
cpulist := make([]int, 0)
|
||||
for _, d := range all {
|
||||
if d.Core == core {
|
||||
cpulist = append(cpulist, d.Cpuid)
|
||||
}
|
||||
}
|
||||
return cpulist
|
||||
}
|
||||
|
@@ -6,6 +6,8 @@ The CCMetric router sits in between the collectors and the sinks and can be used
|
||||
|
||||
```json
|
||||
{
|
||||
"num_cache_intervals" : 1,
|
||||
"interval_timestamp" : true,
|
||||
"add_tags" : [
|
||||
{
|
||||
"key" : "cluster",
|
||||
@@ -25,16 +27,58 @@ The CCMetric router sits in between the collectors and the sinks and can be used
|
||||
"if" : "*"
|
||||
}
|
||||
],
|
||||
"interval_timestamp" : true
|
||||
"interval_aggregates" : [
|
||||
{
|
||||
"name" : "temp_cores_avg",
|
||||
"if" : "match('temp_core_%d+', metric.Name())",
|
||||
"function" : "avg(values)",
|
||||
"tags" : {
|
||||
"type" : "node"
|
||||
},
|
||||
"meta" : {
|
||||
"group": "IPMI",
|
||||
"unit": "degC",
|
||||
"source": "TempCollector"
|
||||
}
|
||||
}
|
||||
],
|
||||
"drop_metrics" : [
|
||||
"not_interesting_metric_at_all"
|
||||
],
|
||||
"drop_metrics_if" : [
|
||||
"match('temp_core_%d+', metric.Name())"
|
||||
],
|
||||
"rename_metrics" : {
|
||||
"metric_12345" : "mymetric"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
There are three main options `add_tags`, `delete_tags` and `interval_timestamp`. `add_tags` and `delete_tags` are lists consisting of dicts with `key`, `value` and `if`. The `value` can be omitted in the `delete_tags` part as it only uses the `key` for removal. The `interval_timestamp` setting means that a unique timestamp is applied to all metrics traversing the router during an interval.
|
||||
# The `interval_timestamp` option
|
||||
|
||||
# Conditional manipulation of tags
|
||||
The collectors' `Read()` functions are not called simultaneously and therefore the metrics gathered in an interval can have different timestamps. If you want to avoid that and have a common timestamp (the beginning of the interval), set this option to `true` and the MetricRouter sets the time.
|
||||
|
||||
The `if` setting allows conditional testing of a single metric like in the example:
|
||||
# The `num_cache_intervals` option
|
||||
|
||||
If the MetricRouter should buffer metrics of intervals in a MetricCache, this option specifies the number of past intervals that should be kept. If `num_cache_intervals = 0`, the cache is disabled. With `num_cache_intervals = 1`, only the metrics of the last interval are buffered.
|
||||
|
||||
A `num_cache_intervals > 0` is required to use the `interval_aggregates` option.
|
||||
|
||||
# The `rename_metrics` option
|
||||
|
||||
In the ClusterCockpit world we specified a set of standard metrics. Since some collectors determine the metric names based on files, execuables and libraries, they might change from system to system (or installation to installtion, OS to OS, ...). In order to get the common names, you can rename incoming metrics before sending them to the sink. If the metric name matches the `oldname`, it is changed to `newname`
|
||||
|
||||
```json
|
||||
{
|
||||
"oldname" : "newname",
|
||||
"clock_mhz" : "clock"
|
||||
}
|
||||
```
|
||||
|
||||
# Conditional manipulation of tags (`add_tags` and `del_tags`)
|
||||
|
||||
Common config format:
|
||||
```json
|
||||
{
|
||||
"key" : "test",
|
||||
@@ -43,8 +87,131 @@ The `if` setting allows conditional testing of a single metric like in the examp
|
||||
}
|
||||
```
|
||||
|
||||
If the CCMetric name is equal to 'temp_package_id_0', it adds an additional tag `test=testing` to the metric.
|
||||
## The `del_tags` option
|
||||
|
||||
In order to match all metrics, you can use `*`, so in order to add a flag per default, like the `cluster=testcluster` tag in the example.
|
||||
The collectors are free to add whatever `key=value` pair to the metric tags (although the usage of tags should be minimized). If you want to delete a tag afterwards, you can do that. When the `if` condition matches on a metric, the `key` is removed from the metric's tags.
|
||||
|
||||
If you want to remove a tag for all metrics, use the condition wildcard `*`. The `value` field can be omitted in the `del_tags` case.
|
||||
|
||||
Never delete tags:
|
||||
- `hostname`
|
||||
- `type`
|
||||
- `type-id`
|
||||
|
||||
## The `add_tags` option
|
||||
|
||||
In some cases, metrics should be tagged or an existing tag changed based on some condition. This can be done in the `add_tags` section. When the `if` condition evaluates to `true`, the tag `key` is added or gets changed to the new `value`.
|
||||
|
||||
If the CCMetric name is equal to `temp_package_id_0`, it adds an additional tag `test=testing` to the metric.
|
||||
|
||||
For this metric, a more useful example would be:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"key" : "type",
|
||||
"value" : "socket",
|
||||
"if" : "name == 'temp_package_id_0'"
|
||||
},
|
||||
{
|
||||
"key" : "type-id",
|
||||
"value" : "0",
|
||||
"if" : "name == 'temp_package_id_0'"
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
The metric `temp_package_id_0` corresponds to the tempature of the first CPU socket (=package). With the above configuration, the tags would reflect that because commonly the [TempCollector](../../collectors/tempMetric.md) submits only `node` metrics.
|
||||
|
||||
In order to match all metrics, you can use `*`, so in order to add a flag per default. This is useful to attached system-specific tags like `cluster=testcluster`:
|
||||
|
||||
```json
|
||||
{
|
||||
"key" : "cluster",
|
||||
"value" : "testcluster",
|
||||
"if" : "*"
|
||||
}
|
||||
```
|
||||
|
||||
# Dropping metrics
|
||||
|
||||
In some cases, you want to drop a metric and don't get it forwarded to the sinks. There are two options based on the required specification:
|
||||
- Based only on the metric name -> `drop_metrics` section
|
||||
- An evaluable condition with more overhead -> `drop_metrics_if` section
|
||||
|
||||
## The `drop_metrics` section
|
||||
|
||||
The argument is a list of metric names. No futher checks are performed, only a comparison of the metric name
|
||||
|
||||
```json
|
||||
{
|
||||
"drop_metrics" : [
|
||||
"drop_metric_1",
|
||||
"drop_metric_2"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The example drops all metrics with the name `drop_metric_1` and `drop_metric_2`.
|
||||
|
||||
## The `drop_metrics_if` section
|
||||
|
||||
This option takes a list of evaluable conditions and performs them one after the other on **all** metrics incoming from the collectors and the metric cache (aka `interval_aggregates`).
|
||||
|
||||
```json
|
||||
{
|
||||
"drop_metrics_if" : [
|
||||
"match('drop_metric_%d+', name)",
|
||||
"match('cpu', type) && type-id == 0"
|
||||
]
|
||||
}
|
||||
```
|
||||
The first line is comparable with the example in `drop_metrics`, it drops all metrics starting with `drop_metric_` and ending with a number. The second line drops all metrics of the first hardware thread (**not** recommended)
|
||||
|
||||
|
||||
# Aggregate metric values of the current interval with the `interval_aggregates` option
|
||||
|
||||
**Note:** `interval_aggregates` works only if `num_cache_intervals` > 0
|
||||
|
||||
In some cases, you need to derive new metrics based on the metrics arriving during an interval. This can be done in the `interval_aggregates` section. The logic is similar to the other metric manipulation and filtering options. A cache stores all metrics that arrive during an interval. At the beginning of the *next* interval, the list of metrics is submitted to the MetricAggregator. It derives new metrics and submits them back to the MetricRouter, so they are sent in the next interval but have the timestamp of the previous interval beginning.
|
||||
|
||||
```json
|
||||
"interval_aggregates" : [
|
||||
{
|
||||
"name" : "new_metric_name",
|
||||
"if" : "match('sub_metric_%d+', metric.Name())",
|
||||
"function" : "avg(values)",
|
||||
"tags" : {
|
||||
"key" : "value",
|
||||
"type" : "node"
|
||||
},
|
||||
"meta" : {
|
||||
"key" : "value",
|
||||
"group": "IPMI",
|
||||
"unit": "<copy>",
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The above configuration, collects all metric values for metrics evaluating `if` to `true`. Afterwards it calculates the average `avg` of the `values` (list of all metrics' field `value`) and creates a new CCMetric with the name `new_metric_name` and adds the tags in `tags` and the meta information in `meta`. The special value `<copy>` searches the input metrics and copies the value of the first match of `key` to the new CCMetric.
|
||||
|
||||
If you are not interested in the input metrics `sub_metric_%d+` at all, you can add the same condition used here to the `drop_metrics_if` section to drop them.
|
||||
|
||||
Use cases for `interval_aggregates`:
|
||||
- Combine multiple metrics of the a collector to a new one like the [MemstatCollector](../../collectors/memstatMetric.md) does it for `mem_used`)):
|
||||
```json
|
||||
{
|
||||
"name" : "mem_used",
|
||||
"if" : "source == 'MemstatCollector'",
|
||||
"function" : "sum(mem_total) - (sum(mem_free) + sum(mem_buffers) + sum(mem_cached))",
|
||||
"tags" : {
|
||||
"type" : "node"
|
||||
},
|
||||
"meta" : {
|
||||
"group": "<copy>",
|
||||
"unit": "<copy>",
|
||||
"source": "<copy>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@@ -3,6 +3,7 @@ package metricRouter
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -84,7 +85,7 @@ func (c *metricAggregator) Init(output chan *lp.CCMetric) error {
|
||||
c.constants["smtWidth"] = cinfo.SMTWidth
|
||||
|
||||
c.language = gval.NewLanguage(
|
||||
gval.Base(),
|
||||
gval.Full(),
|
||||
metricCacheLanguage,
|
||||
)
|
||||
|
||||
@@ -283,6 +284,86 @@ func (c *metricAggregator) AddFunction(name string, function func(args ...interf
|
||||
c.language = gval.NewLanguage(c.language, gval.Function(name, function))
|
||||
}
|
||||
|
||||
|
||||
func EvalBoolCondition(condition string, params map[string]interface{}) (bool, error) {
|
||||
newcond := strings.ReplaceAll(condition, "'", "\"")
|
||||
newcond = strings.ReplaceAll(newcond, "%", "\\")
|
||||
language := gval.NewLanguage(
|
||||
gval.Full(),
|
||||
metricCacheLanguage,
|
||||
)
|
||||
value, err := gval.Evaluate(newcond, params, language)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var endResult bool = false
|
||||
err = nil
|
||||
switch r := value.(type) {
|
||||
case bool:
|
||||
endResult = r
|
||||
case float64:
|
||||
if r != 0.0 {
|
||||
endResult = true
|
||||
}
|
||||
case float32:
|
||||
if r != 0.0 {
|
||||
endResult = true
|
||||
}
|
||||
case int:
|
||||
if r != 0 {
|
||||
endResult = true
|
||||
}
|
||||
case int64:
|
||||
if r != 0 {
|
||||
endResult = true
|
||||
}
|
||||
case int32:
|
||||
if r != 0 {
|
||||
endResult = true
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("cannot evaluate '%s' to bool", newcond)
|
||||
}
|
||||
return endResult, err
|
||||
}
|
||||
|
||||
func EvalFloat64Condition(condition string, params map[string]interface{}) (float64, error) {
|
||||
var endResult float64 = math.NaN()
|
||||
newcond := strings.ReplaceAll(condition, "'", "\"")
|
||||
newcond = strings.ReplaceAll(newcond, "%", "\\")
|
||||
language := gval.NewLanguage(
|
||||
gval.Full(),
|
||||
metricCacheLanguage,
|
||||
)
|
||||
value, err := gval.Evaluate(newcond, params, language)
|
||||
if err != nil {
|
||||
cclog.ComponentDebug("MetricRouter", condition, " = ", err.Error())
|
||||
return endResult, err
|
||||
}
|
||||
err = nil
|
||||
switch r := value.(type) {
|
||||
case bool:
|
||||
if r {
|
||||
endResult = 1.0
|
||||
} else {
|
||||
endResult = 0.0
|
||||
}
|
||||
case float64:
|
||||
endResult = r
|
||||
case float32:
|
||||
endResult = float64(r)
|
||||
case int:
|
||||
endResult = float64(r)
|
||||
case int64:
|
||||
endResult = float64(r)
|
||||
case int32:
|
||||
endResult = float64(r)
|
||||
default:
|
||||
err = fmt.Errorf("cannot evaluate '%s' to float64", newcond)
|
||||
}
|
||||
return endResult, err
|
||||
}
|
||||
|
||||
func NewAggregator(output chan *lp.CCMetric) (MetricAggregator, error) {
|
||||
a := new(metricAggregator)
|
||||
err := a.Init(output)
|
||||
|
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
lp "github.com/ClusterCockpit/cc-metric-collector/internal/ccMetric"
|
||||
mct "github.com/ClusterCockpit/cc-metric-collector/internal/multiChanTicker"
|
||||
"gopkg.in/Knetic/govaluate.v2"
|
||||
)
|
||||
|
||||
// Metric router tag configuration
|
||||
@@ -26,8 +25,12 @@ type metricRouterConfig struct {
|
||||
AddTags []metricRouterTagConfig `json:"add_tags"` // List of tags that are added when the condition is met
|
||||
DelTags []metricRouterTagConfig `json:"delete_tags"` // List of tags that are removed when the condition is met
|
||||
IntervalAgg []metricAggregatorIntervalConfig `json:"interval_aggregates"` // List of aggregation function processed at the end of an interval
|
||||
DropMetrics []string `json:"drop_metrics"` // List of metric names to drop. For fine-grained dropping use drop_metrics_if
|
||||
DropMetricsIf []string `json:"drop_metrics_if"` // List of evaluatable terms to drop metrics
|
||||
RenameMetrics map[string]string `json:"rename_metrics"` // Map to rename metric name from key to value
|
||||
IntervalStamp bool `json:"interval_timestamp"` // Update timestamp periodically by ticker each interval?
|
||||
NumCacheIntervals int `json:"num_cache_intervals"` // Number of intervals of cached metrics for evaluation
|
||||
dropMetrics map[string]bool // Internal map for O(1) lookup
|
||||
}
|
||||
|
||||
// Metric router data structure
|
||||
@@ -92,17 +95,19 @@ func (r *metricRouter) Init(ticker mct.MultiChanTicker, wg *sync.WaitGroup, rout
|
||||
cclog.ComponentError("MetricRouter", err.Error())
|
||||
return err
|
||||
}
|
||||
numIntervals := r.config.NumCacheIntervals
|
||||
if numIntervals <= 0 {
|
||||
numIntervals = 1
|
||||
if r.config.NumCacheIntervals >= 0 {
|
||||
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, r.config.NumCacheIntervals)
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", "MetricCache initialization failed:", err.Error())
|
||||
return err
|
||||
}
|
||||
for _, agg := range r.config.IntervalAgg {
|
||||
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
|
||||
}
|
||||
}
|
||||
r.cache, err = NewCache(r.cache_input, r.ticker, &r.cachewg, numIntervals)
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", "MetricCache initialization failed:", err.Error())
|
||||
return err
|
||||
}
|
||||
for _, agg := range r.config.IntervalAgg {
|
||||
r.cache.AddAggregation(agg.Name, agg.Function, agg.Condition, agg.Tags, agg.Meta)
|
||||
r.config.dropMetrics = make(map[string]bool)
|
||||
for _, mname := range r.config.DropMetrics {
|
||||
r.config.dropMetrics[mname] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -130,48 +135,34 @@ func (r *metricRouter) StartTimer() {
|
||||
cclog.ComponentDebug("MetricRouter", "TIMER START")
|
||||
}
|
||||
|
||||
// EvalCondition evaluates condition cond for metric data from point
|
||||
func (r *metricRouter) EvalCondition(cond string, pptr *lp.CCMetric) (bool, error) {
|
||||
point := *pptr
|
||||
expression, err := govaluate.NewEvaluableExpression(cond)
|
||||
if err != nil {
|
||||
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Add metric name, tags, meta data, fields and timestamp to the parameter list
|
||||
func getParamMap(point lp.CCMetric) map[string]interface{} {
|
||||
params := make(map[string]interface{})
|
||||
params["metric"] = point
|
||||
params["name"] = point.Name()
|
||||
for _, t := range point.TagList() {
|
||||
params[t.Key] = t.Value
|
||||
for key, value := range point.Tags() {
|
||||
params[key] = value
|
||||
}
|
||||
for _, m := range point.MetaList() {
|
||||
params[m.Key] = m.Value
|
||||
for key, value := range point.Meta() {
|
||||
params[key] = value
|
||||
}
|
||||
for _, f := range point.FieldList() {
|
||||
params[f.Key] = f.Value
|
||||
}
|
||||
params["timestamp"] = point.Time()
|
||||
|
||||
// evaluate condition
|
||||
result, err := expression.Evaluate(params)
|
||||
if err != nil {
|
||||
cclog.ComponentDebug("MetricRouter", cond, " = ", err.Error())
|
||||
return false, err
|
||||
}
|
||||
return bool(result.(bool)), err
|
||||
return params
|
||||
}
|
||||
|
||||
// DoAddTags adds a tag when condition is fullfiled
|
||||
func (r *metricRouter) DoAddTags(point *lp.CCMetric) {
|
||||
for _, m := range r.config.AddTags {
|
||||
var conditionMatches bool
|
||||
var conditionMatches bool = false
|
||||
|
||||
if m.Condition == "*" {
|
||||
conditionMatches = true
|
||||
} else {
|
||||
var err error
|
||||
conditionMatches, err = r.EvalCondition(m.Condition, point)
|
||||
conditionMatches, err = EvalBoolCondition(m.Condition, getParamMap(point))
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", err.Error())
|
||||
conditionMatches = false
|
||||
@@ -186,13 +177,13 @@ func (r *metricRouter) DoAddTags(point *lp.CCMetric) {
|
||||
// DoDelTags removes a tag when condition is fullfiled
|
||||
func (r *metricRouter) DoDelTags(point *lp.CCMetric) {
|
||||
for _, m := range r.config.DelTags {
|
||||
var conditionMatches bool
|
||||
var conditionMatches bool = false
|
||||
|
||||
if m.Condition == "*" {
|
||||
conditionMatches = true
|
||||
} else {
|
||||
var err error
|
||||
conditionMatches, err = r.EvalCondition(m.Condition, point)
|
||||
conditionMatches, err = EvalBoolCondition(m.Condition, getParamMap(point))
|
||||
if err != nil {
|
||||
cclog.ComponentError("MetricRouter", err.Error())
|
||||
conditionMatches = false
|
||||
@@ -204,9 +195,24 @@ func (r *metricRouter) DoDelTags(point *lp.CCMetric) {
|
||||
}
|
||||
}
|
||||
|
||||
// Conditional test whether a metric should be dropped
|
||||
func (r *metricRouter) dropMetric(point *lp.CCMetric) bool {
|
||||
// Simple drop check
|
||||
if _, ok := r.config.dropMetrics[(*point).Name()]; ok {
|
||||
return true
|
||||
}
|
||||
// Checking the dropping conditions
|
||||
for _, m := range r.config.DropMetricsIf {
|
||||
conditionMatches, err := EvalBoolCondition(m, getParamMap((*point)))
|
||||
if conditionMatches || err != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Start starts the metric router
|
||||
func (r *metricRouter) Start() {
|
||||
|
||||
// start timer if configured
|
||||
r.timestamp = time.Now()
|
||||
if r.config.IntervalStamp {
|
||||
@@ -225,13 +231,21 @@ func (r *metricRouter) Start() {
|
||||
cclog.ComponentDebug("MetricRouter", "FORWARD", *point)
|
||||
r.DoAddTags(point)
|
||||
r.DoDelTags(point)
|
||||
if new, ok := r.config.RenameMetrics[point.Name()]; ok {
|
||||
point.SetName(new)
|
||||
}
|
||||
r.DoAddTags(point)
|
||||
r.DoDelTags(point)
|
||||
|
||||
for _, o := range r.outputs {
|
||||
o <- point
|
||||
}
|
||||
}
|
||||
|
||||
// Start Metric Cache
|
||||
r.cache.Start()
|
||||
if r.config.NumCacheIntervals > 0 {
|
||||
r.cache.Start()
|
||||
}
|
||||
|
||||
r.wg.Add(1)
|
||||
go func() {
|
||||
@@ -248,20 +262,30 @@ func (r *metricRouter) Start() {
|
||||
if r.config.IntervalStamp {
|
||||
(*p).SetTime(r.timestamp)
|
||||
}
|
||||
forward(p)
|
||||
r.cache.Add(p)
|
||||
if !r.dropMetric(p) {
|
||||
forward(p)
|
||||
}
|
||||
// even if the metric is dropped, it is stored in the cache for
|
||||
// aggregations
|
||||
if r.config.NumCacheIntervals > 0 {
|
||||
r.cache.Add(p)
|
||||
}
|
||||
|
||||
case p := <-r.recv_input:
|
||||
// receive from receive manager
|
||||
if r.config.IntervalStamp {
|
||||
(*p).SetTime(r.timestamp)
|
||||
}
|
||||
forward(p)
|
||||
if !r.dropMetric(p) {
|
||||
forward(p)
|
||||
}
|
||||
|
||||
case p := <-r.cache_input:
|
||||
// receive from metric cache and aggregator
|
||||
(*p).AddTag("hostname", r.hostname)
|
||||
forward(p)
|
||||
// receive from metric collector
|
||||
if !r.dropMetric(p) {
|
||||
(*p).AddTag("hostname", r.hostname)
|
||||
forward(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -295,8 +319,10 @@ func (r *metricRouter) Close() {
|
||||
// wait for close of channel r.timerdone
|
||||
<-r.timerdone
|
||||
}
|
||||
r.cache.Close()
|
||||
r.cachewg.Wait()
|
||||
if r.config.NumCacheIntervals > 0 {
|
||||
r.cache.Close()
|
||||
r.cachewg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new initialized metric router
|
||||
|
Reference in New Issue
Block a user