mirror of
https://github.com/ClusterCockpit/cc-metric-store.git
synced 2024-11-10 05:07:25 +01:00
Refactor and simplify
This commit is contained in:
parent
66be268b57
commit
3619d7a157
7
api.go
7
api.go
@ -62,8 +62,8 @@ func (data *ApiMetricData) PadDataWithNull(from, to int64, metric string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data.From / minfo.frequency) > (from / minfo.frequency) {
|
if (data.From / minfo.Frequency) > (from / minfo.Frequency) {
|
||||||
padfront := int((data.From / minfo.frequency) - (from / minfo.frequency))
|
padfront := int((data.From / minfo.Frequency) - (from / minfo.Frequency))
|
||||||
ndata := make([]Float, 0, padfront+len(data.Data))
|
ndata := make([]Float, 0, padfront+len(data.Data))
|
||||||
for i := 0; i < padfront; i++ {
|
for i := 0; i < padfront; i++ {
|
||||||
ndata = append(ndata, NaN)
|
ndata = append(ndata, NaN)
|
||||||
@ -101,7 +101,7 @@ func handleFree(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bodyDec := json.NewDecoder(r.Body)
|
bodyDec := json.NewDecoder(r.Body)
|
||||||
var selectors []Selector
|
var selectors [][]string
|
||||||
err = bodyDec.Decode(&selectors)
|
err = bodyDec.Decode(&selectors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||||
@ -137,6 +137,7 @@ func handleWrite(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
dec := lineprotocol.NewDecoderWithBytes(bytes)
|
dec := lineprotocol.NewDecoderWithBytes(bytes)
|
||||||
if err := decodeLine(dec, r.URL.Query().Get("cluster")); err != nil {
|
if err := decodeLine(dec, r.URL.Query().Get("cluster")); err != nil {
|
||||||
|
log.Printf("/api/write error: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@ -15,12 +16,43 @@ import (
|
|||||||
"github.com/google/gops/agent"
|
"github.com/google/gops/agent"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// For aggregation over multiple values at different cpus/sockets/..., not time!
|
||||||
|
type AggregationStrategy int
|
||||||
|
|
||||||
|
const (
|
||||||
|
NoAggregation AggregationStrategy = iota
|
||||||
|
SumAggregation
|
||||||
|
AvgAggregation
|
||||||
|
)
|
||||||
|
|
||||||
|
func (as *AggregationStrategy) UnmarshalJSON(data []byte) error {
|
||||||
|
var str string
|
||||||
|
if err := json.Unmarshal(data, &str); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch str {
|
||||||
|
case "":
|
||||||
|
*as = NoAggregation
|
||||||
|
case "sum":
|
||||||
|
*as = SumAggregation
|
||||||
|
case "avg":
|
||||||
|
*as = AvgAggregation
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid aggregation strategy: %#v", str)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type MetricConfig struct {
|
type MetricConfig struct {
|
||||||
// Interval in seconds at which measurements will arive.
|
// Interval in seconds at which measurements will arive.
|
||||||
Frequency int64 `json:"frequency"`
|
Frequency int64 `json:"frequency"`
|
||||||
|
|
||||||
// Can be 'sum', 'avg' or null. Describes how to aggregate metrics from the same timestep over the hierarchy.
|
// Can be 'sum', 'avg' or null. Describes how to aggregate metrics from the same timestep over the hierarchy.
|
||||||
Aggregation string `json:"aggregation"`
|
Aggregation AggregationStrategy `json:"aggregation"`
|
||||||
|
|
||||||
|
// Private, used internally...
|
||||||
|
offset int
|
||||||
}
|
}
|
||||||
|
|
||||||
type HttpConfig struct {
|
type HttpConfig struct {
|
||||||
@ -107,7 +139,7 @@ func intervals(wg *sync.WaitGroup, ctx context.Context) {
|
|||||||
case <-ticks:
|
case <-ticks:
|
||||||
t := time.Now().Add(-d)
|
t := time.Now().Add(-d)
|
||||||
log.Printf("start freeing buffers (older than %s)...\n", t.Format(time.RFC3339))
|
log.Printf("start freeing buffers (older than %s)...\n", t.Format(time.RFC3339))
|
||||||
freed, err := memoryStore.Free(Selector{}, t.Unix())
|
freed, err := memoryStore.Free(nil, t.Unix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("freeing up buffers failed: %s\n", err.Error())
|
log.Printf("freeing up buffers failed: %s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
|
106
lineprotocol.go
106
lineprotocol.go
@ -114,9 +114,9 @@ func reorder(buf, prefix []byte) []byte {
|
|||||||
func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
||||||
// Reduce allocations in loop:
|
// Reduce allocations in loop:
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
metrics := make([]Metric, 0, 10)
|
metric, metricBuf := Metric{}, make([]byte, 0, 16)
|
||||||
selector := make([]string, 0, 4)
|
selector := make([]string, 0, 4)
|
||||||
typeBuf, subTypeBuf := make([]byte, 0, 20), make([]byte, 0)
|
typeBuf, subTypeBuf := make([]byte, 0, 16), make([]byte, 0)
|
||||||
|
|
||||||
// Optimize for the case where all lines in a "batch" are about the same
|
// Optimize for the case where all lines in a "batch" are about the same
|
||||||
// cluster and host. By using `WriteToLevel` (level = host), we do not need
|
// cluster and host. By using `WriteToLevel` (level = host), we do not need
|
||||||
@ -124,24 +124,21 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
var lvl *level = nil
|
var lvl *level = nil
|
||||||
var prevCluster, prevHost string = "", ""
|
var prevCluster, prevHost string = "", ""
|
||||||
|
|
||||||
|
var ok bool
|
||||||
for dec.Next() {
|
for dec.Next() {
|
||||||
metrics = metrics[:0]
|
|
||||||
rawmeasurement, err := dec.Measurement()
|
rawmeasurement, err := dec.Measurement()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// A more dense lp format if supported if the measurement is 'data'.
|
// Needs to be copied because another call to dec.* would
|
||||||
// In that case, the field keys are used as metric names.
|
// invalidate the returned slice.
|
||||||
if string(rawmeasurement) != "data" {
|
metricBuf = append(metricBuf[:0], rawmeasurement...)
|
||||||
minfo, ok := memoryStore.metrics[string(rawmeasurement)]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics = append(metrics, Metric{
|
// The go compiler optimizes map[string(byteslice)] lookups:
|
||||||
minfo: minfo,
|
metric.mc, ok = memoryStore.metrics[string(rawmeasurement)]
|
||||||
})
|
if !ok {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
typeBuf, subTypeBuf := typeBuf[:0], subTypeBuf[:0]
|
typeBuf, subTypeBuf := typeBuf[:0], subTypeBuf[:0]
|
||||||
@ -176,6 +173,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We cannot be sure that the "type" tag comes before the "type-id" tag:
|
||||||
if len(typeBuf) == 0 {
|
if len(typeBuf) == 0 {
|
||||||
typeBuf = append(typeBuf, val...)
|
typeBuf = append(typeBuf, val...)
|
||||||
} else {
|
} else {
|
||||||
@ -184,6 +182,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
case "type-id":
|
case "type-id":
|
||||||
typeBuf = append(typeBuf, val...)
|
typeBuf = append(typeBuf, val...)
|
||||||
case "subtype":
|
case "subtype":
|
||||||
|
// We cannot be sure that the "subtype" tag comes before the "stype-id" tag:
|
||||||
if len(subTypeBuf) == 0 {
|
if len(subTypeBuf) == 0 {
|
||||||
subTypeBuf = append(subTypeBuf, val...)
|
subTypeBuf = append(subTypeBuf, val...)
|
||||||
} else {
|
} else {
|
||||||
@ -197,6 +196,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the cluster or host changed, the lvl was set to nil
|
||||||
if lvl == nil {
|
if lvl == nil {
|
||||||
selector = selector[:2]
|
selector = selector[:2]
|
||||||
selector[0], selector[1] = cluster, host
|
selector[0], selector[1] = cluster, host
|
||||||
@ -204,6 +204,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
prevCluster, prevHost = cluster, host
|
prevCluster, prevHost = cluster, host
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// subtypes:
|
||||||
selector = selector[:0]
|
selector = selector[:0]
|
||||||
if len(typeBuf) > 0 {
|
if len(typeBuf) > 0 {
|
||||||
selector = append(selector, string(typeBuf)) // <- Allocation :(
|
selector = append(selector, string(typeBuf)) // <- Allocation :(
|
||||||
@ -212,71 +213,34 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics) == 0 {
|
for {
|
||||||
for {
|
key, val, err := dec.NextField()
|
||||||
key, val, err := dec.NextField()
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if key == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var value Float
|
|
||||||
if val.Kind() == lineprotocol.Float {
|
|
||||||
value = Float(val.FloatV())
|
|
||||||
} else if val.Kind() == lineprotocol.Int {
|
|
||||||
value = Float(val.IntV())
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unsupported value type in message: %s", val.Kind().String())
|
|
||||||
}
|
|
||||||
|
|
||||||
minfo, ok := memoryStore.metrics[string(key)]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics = append(metrics, Metric{
|
|
||||||
minfo: minfo,
|
|
||||||
Value: value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var value Float
|
|
||||||
for {
|
|
||||||
key, val, err := dec.NextField()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if key == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(key) != "value" {
|
|
||||||
return fmt.Errorf("unkown field: '%s' (value: %#v)", string(key), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val.Kind() == lineprotocol.Float {
|
|
||||||
value = Float(val.FloatV())
|
|
||||||
} else if val.Kind() == lineprotocol.Int {
|
|
||||||
value = Float(val.IntV())
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unsupported value type in message: %s", val.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics[0].Value = value
|
if key == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(key) != "value" {
|
||||||
|
return fmt.Errorf("unkown field: '%s' (value: %#v)", string(key), val)
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.Kind() == lineprotocol.Float {
|
||||||
|
metric.Value = Float(val.FloatV())
|
||||||
|
} else if val.Kind() == lineprotocol.Int {
|
||||||
|
metric.Value = Float(val.IntV())
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unsupported value type in message: %s", val.Kind().String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err = dec.Time(lineprotocol.Second, t)
|
if t, err = dec.Time(lineprotocol.Second, t); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Printf("write: %s (%v) -> %v\n", string(measurement), selector, value)
|
if err := memoryStore.WriteToLevel(lvl, selector, t.Unix(), []Metric{metric}); err != nil {
|
||||||
if err := memoryStore.WriteToLevel(lvl, selector, t.Unix(), metrics); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
145
memstore.go
145
memstore.go
@ -192,35 +192,26 @@ func (b *buffer) read(from, to int64, data []Float) ([]Float, int64, int64, erro
|
|||||||
return data[:i], from, t, nil
|
return data[:i], from, t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free up and free all buffers in the chain only containing data
|
// Returns true if this buffer needs to be freed.
|
||||||
// older than `t`.
|
func (b *buffer) free(t int64) (delme bool, n int) {
|
||||||
func (b *buffer) free(t int64) (int, error) {
|
|
||||||
end := b.end()
|
|
||||||
if end < t && b.next != nil {
|
|
||||||
b.next.prev = nil
|
|
||||||
n := 0
|
|
||||||
for b != nil {
|
|
||||||
prev := b.prev
|
|
||||||
if prev != nil && prev.start > b.start {
|
|
||||||
panic("time travel?")
|
|
||||||
}
|
|
||||||
|
|
||||||
n += 1
|
|
||||||
// Buffers that come from the
|
|
||||||
// archive should not be reused.
|
|
||||||
if cap(b.data) == BUFFER_CAP {
|
|
||||||
bufferPool.Put(b)
|
|
||||||
}
|
|
||||||
b = prev
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.prev != nil {
|
if b.prev != nil {
|
||||||
return b.prev.free(t)
|
delme, m := b.prev.free(t)
|
||||||
|
n += m
|
||||||
|
if delme {
|
||||||
|
b.prev.next = nil
|
||||||
|
if cap(b.prev.data) == BUFFER_CAP {
|
||||||
|
bufferPool.Put(b.prev)
|
||||||
|
}
|
||||||
|
b.prev = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, nil
|
end := b.end()
|
||||||
|
if end < t {
|
||||||
|
return true, n + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, n
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call `callback` on every buffer that contains data in the range from `from` to `to`.
|
// Call `callback` on every buffer that contains data in the range from `from` to `to`.
|
||||||
@ -300,52 +291,54 @@ func (l *level) findLevelOrCreate(selector []string, nMetrics int) *level {
|
|||||||
return child.findLevelOrCreate(selector[1:], nMetrics)
|
return child.findLevelOrCreate(selector[1:], nMetrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For aggregation over multiple values at different cpus/sockets/..., not time!
|
func (l *level) free(t int64) (int, error) {
|
||||||
type AggregationStrategy int
|
l.lock.Lock()
|
||||||
|
defer l.lock.Unlock()
|
||||||
|
|
||||||
const (
|
n := 0
|
||||||
NoAggregation AggregationStrategy = iota
|
for i, b := range l.metrics {
|
||||||
SumAggregation
|
if b != nil {
|
||||||
AvgAggregation
|
delme, m := b.free(t)
|
||||||
)
|
n += m
|
||||||
|
if delme {
|
||||||
|
if cap(b.data) == BUFFER_CAP {
|
||||||
|
bufferPool.Put(b)
|
||||||
|
}
|
||||||
|
l.metrics[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type metricInfo struct {
|
for _, l := range l.children {
|
||||||
offset int
|
m, err := l.free(t)
|
||||||
aggregation AggregationStrategy
|
n += m
|
||||||
frequency int64
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type MemoryStore struct {
|
type MemoryStore struct {
|
||||||
root level // root of the tree structure
|
root level // root of the tree structure
|
||||||
metrics map[string]metricInfo
|
metrics map[string]MetricConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a new, initialized instance of a MemoryStore.
|
// Return a new, initialized instance of a MemoryStore.
|
||||||
// Will panic if values in the metric configurations are invalid.
|
// Will panic if values in the metric configurations are invalid.
|
||||||
func NewMemoryStore(metrics map[string]MetricConfig) *MemoryStore {
|
func NewMemoryStore(metrics map[string]MetricConfig) *MemoryStore {
|
||||||
ms := make(map[string]metricInfo)
|
|
||||||
|
|
||||||
offset := 0
|
offset := 0
|
||||||
for key, config := range metrics {
|
for key, config := range metrics {
|
||||||
aggregation := NoAggregation
|
|
||||||
if config.Aggregation == "sum" {
|
|
||||||
aggregation = SumAggregation
|
|
||||||
} else if config.Aggregation == "avg" {
|
|
||||||
aggregation = AvgAggregation
|
|
||||||
} else if config.Aggregation != "" {
|
|
||||||
panic("invalid aggregation strategy: " + config.Aggregation)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.Frequency == 0 {
|
if config.Frequency == 0 {
|
||||||
panic("invalid frequency")
|
panic("invalid frequency")
|
||||||
}
|
}
|
||||||
|
|
||||||
ms[key] = metricInfo{
|
metrics[key] = MetricConfig{
|
||||||
|
Frequency: config.Frequency,
|
||||||
|
Aggregation: config.Aggregation,
|
||||||
offset: offset,
|
offset: offset,
|
||||||
aggregation: aggregation,
|
|
||||||
frequency: config.Frequency,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += 1
|
offset += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,7 +347,7 @@ func NewMemoryStore(metrics map[string]MetricConfig) *MemoryStore {
|
|||||||
metrics: make([]*buffer, len(metrics)),
|
metrics: make([]*buffer, len(metrics)),
|
||||||
children: make(map[string]*level),
|
children: make(map[string]*level),
|
||||||
},
|
},
|
||||||
metrics: ms,
|
metrics: metrics,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -363,10 +356,10 @@ func NewMemoryStore(metrics map[string]MetricConfig) *MemoryStore {
|
|||||||
func (m *MemoryStore) Write(selector []string, ts int64, metrics []Metric) error {
|
func (m *MemoryStore) Write(selector []string, ts int64, metrics []Metric) error {
|
||||||
var ok bool
|
var ok bool
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
if metric.minfo.frequency == 0 {
|
if metric.mc.Frequency == 0 {
|
||||||
metric.minfo, ok = m.metrics[metric.Name]
|
metric.mc, ok = m.metrics[metric.Name]
|
||||||
if !ok {
|
if !ok {
|
||||||
metric.minfo.frequency = 0
|
metric.mc.Frequency = 0
|
||||||
}
|
}
|
||||||
metrics[i] = metric
|
metrics[i] = metric
|
||||||
}
|
}
|
||||||
@ -386,15 +379,15 @@ func (m *MemoryStore) WriteToLevel(l *level, selector []string, ts int64, metric
|
|||||||
defer l.lock.Unlock()
|
defer l.lock.Unlock()
|
||||||
|
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
if metric.minfo.frequency == 0 {
|
if metric.mc.Frequency == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
b := l.metrics[metric.minfo.offset]
|
b := l.metrics[metric.mc.offset]
|
||||||
if b == nil {
|
if b == nil {
|
||||||
// First write to this metric and level
|
// First write to this metric and level
|
||||||
b = newBuffer(ts, metric.minfo.frequency)
|
b = newBuffer(ts, metric.mc.Frequency)
|
||||||
l.metrics[metric.minfo.offset] = b
|
l.metrics[metric.mc.offset] = b
|
||||||
}
|
}
|
||||||
|
|
||||||
nb, err := b.write(ts, metric.Value)
|
nb, err := b.write(ts, metric.Value)
|
||||||
@ -404,7 +397,7 @@ func (m *MemoryStore) WriteToLevel(l *level, selector []string, ts int64, metric
|
|||||||
|
|
||||||
// Last write created a new buffer...
|
// Last write created a new buffer...
|
||||||
if b != nb {
|
if b != nb {
|
||||||
l.metrics[metric.minfo.offset] = nb
|
l.metrics[metric.mc.offset] = nb
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -424,7 +417,7 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
return nil, 0, 0, errors.New("unkown metric: " + metric)
|
return nil, 0, 0, errors.New("unkown metric: " + metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
n, data := 0, make([]Float, (to-from)/minfo.frequency+1)
|
n, data := 0, make([]Float, (to-from)/minfo.Frequency+1)
|
||||||
err := m.root.findBuffers(selector, minfo.offset, func(b *buffer) error {
|
err := m.root.findBuffers(selector, minfo.offset, func(b *buffer) error {
|
||||||
cdata, cfrom, cto, err := b.read(from, to, data)
|
cdata, cfrom, cto, err := b.read(from, to, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -434,7 +427,17 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
if n == 0 {
|
if n == 0 {
|
||||||
from, to = cfrom, cto
|
from, to = cfrom, cto
|
||||||
} else if from != cfrom || to != cto || len(data) != len(cdata) {
|
} else if from != cfrom || to != cto || len(data) != len(cdata) {
|
||||||
return ErrDataDoesNotAlign
|
missingfront, missingback := int((from-cfrom)/minfo.Frequency), int((to-cto)/minfo.Frequency)
|
||||||
|
if missingfront != 0 {
|
||||||
|
return ErrDataDoesNotAlign
|
||||||
|
}
|
||||||
|
|
||||||
|
cdata = cdata[0 : len(cdata)-missingback]
|
||||||
|
if len(cdata) != len(data) {
|
||||||
|
return ErrDataDoesNotAlign
|
||||||
|
}
|
||||||
|
|
||||||
|
from, to = cfrom, cto
|
||||||
}
|
}
|
||||||
|
|
||||||
data = cdata
|
data = cdata
|
||||||
@ -447,12 +450,12 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
} else if n == 0 {
|
} else if n == 0 {
|
||||||
return nil, 0, 0, errors.New("metric or host not found")
|
return nil, 0, 0, errors.New("metric or host not found")
|
||||||
} else if n > 1 {
|
} else if n > 1 {
|
||||||
if minfo.aggregation == AvgAggregation {
|
if minfo.Aggregation == AvgAggregation {
|
||||||
normalize := 1. / Float(n)
|
normalize := 1. / Float(n)
|
||||||
for i := 0; i < len(data); i++ {
|
for i := 0; i < len(data); i++ {
|
||||||
data[i] *= normalize
|
data[i] *= normalize
|
||||||
}
|
}
|
||||||
} else if minfo.aggregation != SumAggregation {
|
} else if minfo.Aggregation != SumAggregation {
|
||||||
return nil, 0, 0, errors.New("invalid aggregation")
|
return nil, 0, 0, errors.New("invalid aggregation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -462,14 +465,8 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
|
|
||||||
// Release all buffers for the selected level and all its children that contain only
|
// Release all buffers for the selected level and all its children that contain only
|
||||||
// values older than `t`.
|
// values older than `t`.
|
||||||
func (m *MemoryStore) Free(selector Selector, t int64) (int, error) {
|
func (m *MemoryStore) Free(selector []string, t int64) (int, error) {
|
||||||
n := 0
|
return m.GetLevel(selector).free(t)
|
||||||
err := m.root.findBuffers(selector, -1, func(b *buffer) error {
|
|
||||||
m, err := b.free(t)
|
|
||||||
n += m
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
return n, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryStore) FreeAll() error {
|
func (m *MemoryStore) FreeAll() error {
|
||||||
|
@ -201,8 +201,8 @@ func TestMemoryStoreMissingDatapoints(t *testing.T) {
|
|||||||
func TestMemoryStoreAggregation(t *testing.T) {
|
func TestMemoryStoreAggregation(t *testing.T) {
|
||||||
count := 3000
|
count := 3000
|
||||||
store := NewMemoryStore(map[string]MetricConfig{
|
store := NewMemoryStore(map[string]MetricConfig{
|
||||||
"a": {Frequency: 1, Aggregation: "sum"},
|
"a": {Frequency: 1, Aggregation: SumAggregation},
|
||||||
"b": {Frequency: 2, Aggregation: "avg"},
|
"b": {Frequency: 2, Aggregation: AvgAggregation},
|
||||||
})
|
})
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
@ -269,7 +269,7 @@ func TestMemoryStoreStats(t *testing.T) {
|
|||||||
count := 3000
|
count := 3000
|
||||||
store := NewMemoryStore(map[string]MetricConfig{
|
store := NewMemoryStore(map[string]MetricConfig{
|
||||||
"a": {Frequency: 1},
|
"a": {Frequency: 1},
|
||||||
"b": {Frequency: 1, Aggregation: "avg"},
|
"b": {Frequency: 1, Aggregation: AvgAggregation},
|
||||||
})
|
})
|
||||||
|
|
||||||
sel1 := []string{"cluster", "host1"}
|
sel1 := []string{"cluster", "host1"}
|
||||||
@ -415,7 +415,7 @@ func TestMemoryStoreFree(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := store.Free(Selector{{String: "cluster"}, {String: "host"}}, int64(BUFFER_CAP*2)+100)
|
n, err := store.Free([]string{"cluster", "host"}, int64(BUFFER_CAP*2)+100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -501,7 +501,7 @@ func BenchmarkMemoryStoreAggregation(b *testing.B) {
|
|||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
count := 2000
|
count := 2000
|
||||||
store := NewMemoryStore(map[string]MetricConfig{
|
store := NewMemoryStore(map[string]MetricConfig{
|
||||||
"flops_any": {Frequency: 1, Aggregation: "avg"},
|
"flops_any": {Frequency: 1, Aggregation: AvgAggregation},
|
||||||
})
|
})
|
||||||
|
|
||||||
sel := []string{"testcluster", "host123", "cpu0"}
|
sel := []string{"testcluster", "host123", "cpu0"}
|
||||||
|
17
selector.go
17
selector.go
@ -55,20 +55,9 @@ func (l *level) findBuffers(selector Selector, offset int, f func(b *buffer) err
|
|||||||
defer l.lock.RUnlock()
|
defer l.lock.RUnlock()
|
||||||
|
|
||||||
if len(selector) == 0 {
|
if len(selector) == 0 {
|
||||||
if offset == -1 {
|
b := l.metrics[offset]
|
||||||
for _, b := range l.metrics {
|
if b != nil {
|
||||||
if b != nil {
|
return f(b)
|
||||||
err := f(b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b := l.metrics[offset]
|
|
||||||
if b != nil {
|
|
||||||
return f(b)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl := range l.children {
|
for _, lvl := range l.children {
|
||||||
|
4
stats.go
4
stats.go
@ -103,9 +103,9 @@ func (m *MemoryStore) Stats(selector Selector, metric string, from, to int64) (*
|
|||||||
return nil, 0, 0, ErrNoData
|
return nil, 0, 0, ErrNoData
|
||||||
}
|
}
|
||||||
|
|
||||||
if minfo.aggregation == AvgAggregation {
|
if minfo.Aggregation == AvgAggregation {
|
||||||
avg /= Float(n)
|
avg /= Float(n)
|
||||||
} else if n > 1 && minfo.aggregation != SumAggregation {
|
} else if n > 1 && minfo.Aggregation != SumAggregation {
|
||||||
return nil, 0, 0, errors.New("invalid aggregation")
|
return nil, 0, 0, errors.New("invalid aggregation")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user