2021-08-31 10:52:22 +02:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"sync"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Default buffer capacity.
|
|
|
|
// `buffer.data` will only ever grow up to it's capacity and a new link
|
|
|
|
// in the buffer chain will be created if needed so that no copying
|
2021-09-07 09:21:08 +02:00
|
|
|
// of data or reallocation needs to happen on writes.
|
2021-08-31 10:52:22 +02:00
|
|
|
const (
|
|
|
|
BUFFER_CAP int = 1024
|
|
|
|
)
|
|
|
|
|
|
|
|
// So that we can reuse allocations
|
|
|
|
var bufferPool sync.Pool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
2021-10-11 10:56:38 +02:00
|
|
|
return &buffer{
|
|
|
|
data: make([]Float, 0, BUFFER_CAP),
|
|
|
|
}
|
2021-08-31 10:52:22 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-09-07 09:21:08 +02:00
|
|
|
var (
|
|
|
|
ErrNoData error = errors.New("no data for this metric/level")
|
|
|
|
ErrDataDoesNotAlign error = errors.New("data from lower granularities does not align")
|
|
|
|
)
|
|
|
|
|
2021-08-31 10:52:22 +02:00
|
|
|
// Each metric on each level has it's own buffer.
|
|
|
|
// This is where the actual values go.
|
2021-08-31 15:17:36 +02:00
|
|
|
// If `cap(data)` is reached, a new buffer is created and
|
|
|
|
// becomes the new head of a buffer list.
|
2021-08-31 10:52:22 +02:00
|
|
|
type buffer struct {
|
|
|
|
frequency int64 // Time between two "slots"
|
|
|
|
start int64 // Timestamp of when `data[0]` was written.
|
|
|
|
data []Float // The slice should never reallocacte as `cap(data)` is respected.
|
|
|
|
prev, next *buffer // `prev` contains older data, `next` newer data.
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBuffer(ts, freq int64) *buffer {
|
2021-10-11 10:56:38 +02:00
|
|
|
b := bufferPool.Get().(*buffer)
|
|
|
|
b.frequency = freq
|
|
|
|
b.start = ts
|
|
|
|
b.prev = nil
|
|
|
|
b.next = nil
|
|
|
|
return b
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If a new buffer was created, the new head is returnd.
|
|
|
|
// Otherwise, the existing buffer is returnd.
|
2021-08-31 15:17:36 +02:00
|
|
|
// Normaly, only "newer" data should be written, but if the value would
|
|
|
|
// end up in the same buffer anyways it is allowed.
|
2021-08-31 10:52:22 +02:00
|
|
|
func (b *buffer) write(ts int64, value Float) (*buffer, error) {
|
|
|
|
if ts < b.start {
|
|
|
|
return nil, errors.New("cannot write value to buffer from past")
|
|
|
|
}
|
|
|
|
|
|
|
|
idx := int((ts - b.start) / b.frequency)
|
|
|
|
if idx >= cap(b.data) {
|
|
|
|
newbuf := newBuffer(ts, b.frequency)
|
|
|
|
newbuf.prev = b
|
|
|
|
b.next = newbuf
|
|
|
|
b = newbuf
|
|
|
|
idx = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Overwriting value or writing value from past
|
|
|
|
if idx < len(b.data) {
|
|
|
|
b.data[idx] = value
|
|
|
|
return b, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill up unwritten slots with NaN
|
|
|
|
for i := len(b.data); i < idx; i++ {
|
|
|
|
b.data = append(b.data, NaN)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.data = append(b.data, value)
|
|
|
|
return b, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return all known values from `from` to `to`. Gaps of information are
|
|
|
|
// represented by NaN. If values at the start or end are missing,
|
|
|
|
// instead of NaN values, the second and thrid return values contain
|
|
|
|
// the actual `from`/`to`.
|
2021-08-31 15:17:36 +02:00
|
|
|
// This function goes back the buffer chain if `from` is older than the
|
|
|
|
// currents buffer start.
|
2021-09-07 09:21:08 +02:00
|
|
|
// The loaded values are added to `data` and `data` is returned, possibly with a shorter length.
|
|
|
|
// If `data` is not long enough to hold all values, this function will panic!
|
|
|
|
func (b *buffer) read(from, to int64, data []Float) ([]Float, int64, int64, error) {
|
2021-08-31 10:52:22 +02:00
|
|
|
if from < b.start {
|
|
|
|
if b.prev != nil {
|
2021-09-07 09:21:08 +02:00
|
|
|
return b.prev.read(from, to, data)
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
from = b.start
|
|
|
|
}
|
|
|
|
|
2021-09-07 09:21:08 +02:00
|
|
|
var i int = 0
|
|
|
|
var t int64 = from
|
|
|
|
for ; t < to; t += b.frequency {
|
2021-08-31 10:52:22 +02:00
|
|
|
idx := int((t - b.start) / b.frequency)
|
|
|
|
if idx >= cap(b.data) {
|
|
|
|
b = b.next
|
|
|
|
if b == nil {
|
|
|
|
return data, from, t, nil
|
|
|
|
}
|
|
|
|
idx = 0
|
|
|
|
}
|
|
|
|
|
2021-09-20 10:29:55 +02:00
|
|
|
if idx >= len(b.data) {
|
|
|
|
if b.next == nil || to <= b.next.start {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
data[i] += NaN
|
|
|
|
} else if t < b.start {
|
2021-09-07 09:21:08 +02:00
|
|
|
data[i] += NaN
|
2021-08-31 10:52:22 +02:00
|
|
|
} else {
|
2021-09-07 09:21:08 +02:00
|
|
|
data[i] += b.data[idx]
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
2021-09-07 09:21:08 +02:00
|
|
|
i++
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
2021-09-07 09:21:08 +02:00
|
|
|
return data[:i], from, t, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free up and free all buffers in the chain only containing data
|
|
|
|
// older than `t`.
|
|
|
|
func (b *buffer) free(t int64) (int, error) {
|
|
|
|
end := b.start + int64(len(b.data))*b.frequency
|
|
|
|
if end < t && b.next != nil {
|
|
|
|
b.next.prev = nil
|
|
|
|
n := 0
|
|
|
|
for b != nil {
|
|
|
|
prev := b.prev
|
|
|
|
if prev != nil && prev.start > b.start {
|
|
|
|
panic("time travel?")
|
|
|
|
}
|
|
|
|
|
|
|
|
n += 1
|
2021-10-11 10:56:38 +02:00
|
|
|
b.frequency = 0
|
|
|
|
b.start = 0
|
2021-09-07 09:21:08 +02:00
|
|
|
b.next = nil
|
|
|
|
b.prev = nil
|
2021-10-11 10:56:38 +02:00
|
|
|
bufferPool.Put(b)
|
2021-09-07 09:21:08 +02:00
|
|
|
b = prev
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if b.prev != nil {
|
|
|
|
return b.prev.free(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0, nil
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Could also be called "node" as this forms a node in a tree structure.
|
|
|
|
// Called level because "node" might be confusing here.
|
2021-08-31 15:17:36 +02:00
|
|
|
// Can be both a leaf or a inner node. In this tree structue, inner nodes can
|
2021-08-31 10:52:22 +02:00
|
|
|
// also hold data (in `metrics`).
|
|
|
|
type level struct {
|
2021-09-08 10:29:36 +02:00
|
|
|
lock sync.RWMutex
|
|
|
|
metrics []*buffer // Every level can store metrics.
|
|
|
|
children map[string]*level // Lower levels.
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find the correct level for the given selector, creating it if
|
|
|
|
// it does not exist. Example selector in the context of the
|
|
|
|
// ClusterCockpit could be: []string{ "emmy", "host123", "cpu", "0" }
|
|
|
|
// This function would probably benefit a lot from `level.children` beeing a `sync.Map`?
|
2021-09-08 10:29:36 +02:00
|
|
|
func (l *level) findLevelOrCreate(selector []string, nMetrics int) *level {
|
2021-08-31 10:52:22 +02:00
|
|
|
if len(selector) == 0 {
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
2021-09-01 08:47:57 +02:00
|
|
|
// Allow concurrent reads:
|
|
|
|
l.lock.RLock()
|
2021-09-08 10:29:36 +02:00
|
|
|
var child *level
|
|
|
|
var ok bool
|
|
|
|
if l.children == nil {
|
|
|
|
// Children map needs to be created...
|
|
|
|
l.lock.RUnlock()
|
|
|
|
} else {
|
|
|
|
child, ok := l.children[selector[0]]
|
|
|
|
l.lock.RUnlock()
|
|
|
|
if ok {
|
|
|
|
return child.findLevelOrCreate(selector[1:], nMetrics)
|
|
|
|
}
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
2021-09-01 08:47:57 +02:00
|
|
|
// The level does not exist, take write lock for unqiue access:
|
|
|
|
l.lock.Lock()
|
|
|
|
// While this thread waited for the write lock, another thread
|
|
|
|
// could have created the child node.
|
2021-09-08 10:29:36 +02:00
|
|
|
if l.children != nil {
|
|
|
|
child, ok = l.children[selector[0]]
|
|
|
|
if ok {
|
|
|
|
l.lock.Unlock()
|
|
|
|
return child.findLevelOrCreate(selector[1:], nMetrics)
|
|
|
|
}
|
2021-09-01 08:47:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
child = &level{
|
2021-09-08 10:29:36 +02:00
|
|
|
metrics: make([]*buffer, nMetrics),
|
|
|
|
children: nil,
|
2021-09-01 08:47:57 +02:00
|
|
|
}
|
2021-09-08 10:29:36 +02:00
|
|
|
|
2021-09-13 12:28:33 +02:00
|
|
|
if l.children != nil {
|
|
|
|
l.children[selector[0]] = child
|
|
|
|
} else {
|
|
|
|
l.children = map[string]*level{selector[0]: child}
|
|
|
|
}
|
2021-08-31 10:52:22 +02:00
|
|
|
l.lock.Unlock()
|
2021-09-08 10:29:36 +02:00
|
|
|
return child.findLevelOrCreate(selector[1:], nMetrics)
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
2021-09-13 12:28:33 +02:00
|
|
|
// For aggregation over multiple values at different cpus/sockets/..., not time!
|
2021-09-08 10:29:36 +02:00
|
|
|
type AggregationStrategy int
|
|
|
|
|
|
|
|
const (
|
|
|
|
NoAggregation AggregationStrategy = iota
|
|
|
|
SumAggregation
|
|
|
|
AvgAggregation
|
|
|
|
)
|
|
|
|
|
2021-08-31 10:52:22 +02:00
|
|
|
type MemoryStore struct {
|
|
|
|
root level // root of the tree structure
|
2021-09-08 10:29:36 +02:00
|
|
|
metrics map[string]struct {
|
|
|
|
offset int
|
|
|
|
aggregation AggregationStrategy
|
|
|
|
frequency int64
|
|
|
|
}
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
2021-09-13 12:28:33 +02:00
|
|
|
// Return a new, initialized instance of a MemoryStore.
|
|
|
|
// Will panic if values in the metric configurations are invalid.
|
2021-08-31 10:52:22 +02:00
|
|
|
func NewMemoryStore(metrics map[string]MetricConfig) *MemoryStore {
|
2021-09-08 10:29:36 +02:00
|
|
|
ms := make(map[string]struct {
|
|
|
|
offset int
|
|
|
|
aggregation AggregationStrategy
|
|
|
|
frequency int64
|
|
|
|
})
|
|
|
|
|
|
|
|
offset := 0
|
|
|
|
for key, config := range metrics {
|
|
|
|
aggregation := NoAggregation
|
|
|
|
if config.Aggregation == "sum" {
|
|
|
|
aggregation = SumAggregation
|
|
|
|
} else if config.Aggregation == "avg" {
|
|
|
|
aggregation = AvgAggregation
|
|
|
|
} else if config.Aggregation != "" {
|
|
|
|
panic("invalid aggregation strategy: " + config.Aggregation)
|
|
|
|
}
|
|
|
|
|
|
|
|
ms[key] = struct {
|
|
|
|
offset int
|
|
|
|
aggregation AggregationStrategy
|
|
|
|
frequency int64
|
|
|
|
}{
|
|
|
|
offset: offset,
|
|
|
|
aggregation: aggregation,
|
|
|
|
frequency: config.Frequency,
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += 1
|
|
|
|
}
|
|
|
|
|
2021-08-31 10:52:22 +02:00
|
|
|
return &MemoryStore{
|
|
|
|
root: level{
|
2021-09-08 10:29:36 +02:00
|
|
|
metrics: make([]*buffer, len(metrics)),
|
2021-08-31 10:52:22 +02:00
|
|
|
children: make(map[string]*level),
|
|
|
|
},
|
2021-09-08 10:29:36 +02:00
|
|
|
metrics: ms,
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write all values in `metrics` to the level specified by `selector` for time `ts`.
|
|
|
|
// Look at `findLevelOrCreate` for how selectors work.
|
|
|
|
func (m *MemoryStore) Write(selector []string, ts int64, metrics []Metric) error {
|
2021-09-08 10:29:36 +02:00
|
|
|
l := m.root.findLevelOrCreate(selector, len(m.metrics))
|
2021-09-01 08:47:57 +02:00
|
|
|
l.lock.Lock()
|
2021-08-31 10:52:22 +02:00
|
|
|
defer l.lock.Unlock()
|
|
|
|
|
|
|
|
for _, metric := range metrics {
|
2021-09-08 10:29:36 +02:00
|
|
|
minfo, ok := m.metrics[metric.Name]
|
2021-08-31 10:52:22 +02:00
|
|
|
if !ok {
|
2021-10-07 14:59:07 +02:00
|
|
|
// return errors.New("Unknown metric: " + metric.Name)
|
|
|
|
continue
|
2021-09-08 10:29:36 +02:00
|
|
|
}
|
2021-08-31 10:52:22 +02:00
|
|
|
|
2021-09-08 10:29:36 +02:00
|
|
|
b := l.metrics[minfo.offset]
|
|
|
|
if b == nil {
|
2021-08-31 10:52:22 +02:00
|
|
|
// First write to this metric and level
|
2021-09-08 10:29:36 +02:00
|
|
|
b = newBuffer(ts, minfo.frequency)
|
|
|
|
l.metrics[minfo.offset] = b
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
nb, err := b.write(ts, metric.Value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Last write created a new buffer...
|
|
|
|
if b != nb {
|
2021-09-08 10:29:36 +02:00
|
|
|
l.metrics[minfo.offset] = nb
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-13 12:28:33 +02:00
|
|
|
// Returns all values for metric `metric` from `from` to `to` for the selected level(s).
|
2021-09-07 09:21:08 +02:00
|
|
|
// If the level does not hold the metric itself, the data will be aggregated recursively from the children.
|
2021-09-13 12:28:33 +02:00
|
|
|
// The second and third return value are the actual from/to for the data. Those can be different from
|
|
|
|
// the range asked for if no data was available.
|
2021-09-08 12:17:10 +02:00
|
|
|
func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]Float, int64, int64, error) {
|
2021-08-31 10:52:22 +02:00
|
|
|
if from > to {
|
|
|
|
return nil, 0, 0, errors.New("invalid time range")
|
|
|
|
}
|
|
|
|
|
|
|
|
minfo, ok := m.metrics[metric]
|
|
|
|
if !ok {
|
|
|
|
return nil, 0, 0, errors.New("unkown metric: " + metric)
|
|
|
|
}
|
|
|
|
|
2021-09-08 12:17:10 +02:00
|
|
|
n, data := 0, make([]Float, (to-from)/minfo.frequency+1)
|
|
|
|
err := m.root.findBuffers(selector, minfo.offset, func(b *buffer) error {
|
|
|
|
cdata, cfrom, cto, err := b.read(from, to, data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if n == 0 {
|
|
|
|
from, to = cfrom, cto
|
|
|
|
} else if from != cfrom || to != cto || len(data) != len(cdata) {
|
|
|
|
return ErrDataDoesNotAlign
|
|
|
|
}
|
|
|
|
|
|
|
|
data = cdata
|
|
|
|
n += 1
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2021-09-07 09:21:08 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
2021-09-08 12:17:10 +02:00
|
|
|
} else if n == 0 {
|
|
|
|
return nil, 0, 0, errors.New("metric not found")
|
|
|
|
} else if n > 1 {
|
2021-09-08 10:29:36 +02:00
|
|
|
if minfo.aggregation == AvgAggregation {
|
2021-09-07 09:21:08 +02:00
|
|
|
normalize := 1. / Float(n)
|
|
|
|
for i := 0; i < len(data); i++ {
|
|
|
|
data[i] *= normalize
|
|
|
|
}
|
2021-09-08 10:29:36 +02:00
|
|
|
} else if minfo.aggregation != SumAggregation {
|
|
|
|
return nil, 0, 0, errors.New("invalid aggregation")
|
2021-09-07 09:21:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-08 12:17:10 +02:00
|
|
|
return data, from, to, nil
|
2021-09-07 09:21:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release all buffers for the selected level and all its children that contain only
|
|
|
|
// values older than `t`.
|
2021-09-08 12:17:10 +02:00
|
|
|
func (m *MemoryStore) Free(selector Selector, t int64) (int, error) {
|
|
|
|
n := 0
|
|
|
|
err := m.root.findBuffers(selector, -1, func(b *buffer) error {
|
|
|
|
m, err := b.free(t)
|
|
|
|
n += m
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return n, err
|
2021-08-31 10:52:22 +02:00
|
|
|
}
|