mirror of
https://github.com/ClusterCockpit/cc-metric-store.git
synced 2025-07-19 19:31:41 +02:00
Compare commits
9 Commits
v0.0.1
...
store_unit
Author | SHA1 | Date | |
---|---|---|---|
26335a2216 | |||
eb5b22e3d5 | |||
04cf55534f | |||
|
2d77dae30c | ||
|
5e5586f319 | ||
|
e71e1b123b | ||
|
7c891e1593 | ||
|
051cba4666 | ||
|
89acbe8db2 |
16
Makefile
16
Makefile
@@ -2,13 +2,13 @@
|
|||||||
APP = cc-metric-store
|
APP = cc-metric-store
|
||||||
GOSRC_APP := cc-metric-store.go
|
GOSRC_APP := cc-metric-store.go
|
||||||
GOSRC_FILES := api.go \
|
GOSRC_FILES := api.go \
|
||||||
memstore.go \
|
memstore.go \
|
||||||
archive.go \
|
archive.go \
|
||||||
debug.go \
|
debug.go \
|
||||||
float.go \
|
float.go \
|
||||||
lineprotocol.go \
|
lineprotocol.go \
|
||||||
selector.go \
|
selector.go \
|
||||||
stats.go
|
stats.go
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ BINDIR ?= bin
|
|||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: $(APP)
|
all: $(APP)
|
||||||
|
|
||||||
$(APP): $(GOSRC)
|
$(APP): $(GOSRC) $(GOSRC_APP)
|
||||||
go get
|
go get
|
||||||
go build -o $(APP) $(GOSRC_APP) $(GOSRC_FILES)
|
go build -o $(APP) $(GOSRC_APP) $(GOSRC_FILES)
|
||||||
|
|
||||||
|
4
api.go
4
api.go
@@ -30,6 +30,7 @@ type ApiMetricData struct {
|
|||||||
Avg Float `json:"avg"`
|
Avg Float `json:"avg"`
|
||||||
Min Float `json:"min"`
|
Min Float `json:"min"`
|
||||||
Max Float `json:"max"`
|
Max Float `json:"max"`
|
||||||
|
Unit string `json:"unit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Optimize this, just like the stats endpoint!
|
// TODO: Optimize this, just like the stats endpoint!
|
||||||
@@ -182,6 +183,7 @@ type ApiQueryRequest struct {
|
|||||||
From int64 `json:"from"`
|
From int64 `json:"from"`
|
||||||
To int64 `json:"to"`
|
To int64 `json:"to"`
|
||||||
WithStats bool `json:"with-stats"`
|
WithStats bool `json:"with-stats"`
|
||||||
|
WithUnit bool `json:"with-unit,omitempty"`
|
||||||
WithData bool `json:"with-data"`
|
WithData bool `json:"with-data"`
|
||||||
WithPadding bool `json:"with-padding"`
|
WithPadding bool `json:"with-padding"`
|
||||||
Queries []ApiQuery `json:"queries"`
|
Queries []ApiQuery `json:"queries"`
|
||||||
@@ -281,7 +283,7 @@ func handleQuery(rw http.ResponseWriter, r *http.Request) {
|
|||||||
res := make([]ApiMetricData, 0, len(sels))
|
res := make([]ApiMetricData, 0, len(sels))
|
||||||
for _, sel := range sels {
|
for _, sel := range sels {
|
||||||
data := ApiMetricData{}
|
data := ApiMetricData{}
|
||||||
data.Data, data.From, data.To, err = memoryStore.Read(sel, query.Metric, req.From, req.To)
|
data.Data, data.From, data.To, data.Unit, err = memoryStore.Read(sel, query.Metric, req.From, req.To)
|
||||||
// log.Printf("data: %#v, %#v, %#v, %#v", data.Data, data.From, data.To, err)
|
// log.Printf("data: %#v, %#v, %#v, %#v", data.Data, data.From, data.To, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := err.Error()
|
msg := err.Error()
|
||||||
|
@@ -24,6 +24,7 @@ import (
|
|||||||
type CheckpointMetrics struct {
|
type CheckpointMetrics struct {
|
||||||
Frequency int64 `json:"frequency"`
|
Frequency int64 `json:"frequency"`
|
||||||
Start int64 `json:"start"`
|
Start int64 `json:"start"`
|
||||||
|
Unit string `json:"unit,omitempty"`
|
||||||
Data []Float `json:"data"`
|
Data []Float `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,6 +37,7 @@ func (cm *CheckpointMetrics) MarshalJSON() ([]byte, error) {
|
|||||||
buf = strconv.AppendInt(buf, cm.Frequency, 10)
|
buf = strconv.AppendInt(buf, cm.Frequency, 10)
|
||||||
buf = append(buf, `,"start":`...)
|
buf = append(buf, `,"start":`...)
|
||||||
buf = strconv.AppendInt(buf, cm.Start, 10)
|
buf = strconv.AppendInt(buf, cm.Start, 10)
|
||||||
|
buf = append(buf, fmt.Sprintf(`,"unit":"%s"`, cm.Unit)...)
|
||||||
buf = append(buf, `,"data":[`...)
|
buf = append(buf, `,"data":[`...)
|
||||||
for i, x := range cm.Data {
|
for i, x := range cm.Data {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
@@ -179,6 +181,7 @@ func (l *level) toCheckpointFile(from, to int64, m *MemoryStore) (*CheckpointFil
|
|||||||
Frequency: b.frequency,
|
Frequency: b.frequency,
|
||||||
Start: start,
|
Start: start,
|
||||||
Data: data,
|
Data: data,
|
||||||
|
Unit: b.unit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -309,6 +312,7 @@ func (l *level) loadFile(cf *CheckpointFile, m *MemoryStore) error {
|
|||||||
frequency: metric.Frequency,
|
frequency: metric.Frequency,
|
||||||
start: metric.Start,
|
start: metric.Start,
|
||||||
data: metric.Data[0:n:n], // Space is wasted here :(
|
data: metric.Data[0:n:n], // Space is wasted here :(
|
||||||
|
unit: metric.Unit,
|
||||||
prev: nil,
|
prev: nil,
|
||||||
next: nil,
|
next: nil,
|
||||||
archived: true,
|
archived: true,
|
||||||
|
33
archive_test.go
Normal file
33
archive_test.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFromCheckpoint(t *testing.T) {
|
||||||
|
m := NewMemoryStore(map[string]MetricConfig{
|
||||||
|
"cpi": {Frequency: 5, Aggregation: AvgAggregation},
|
||||||
|
"flops_any": {Frequency: 5, Aggregation: SumAggregation},
|
||||||
|
"flops_dp": {Frequency: 5, Aggregation: SumAggregation},
|
||||||
|
"flops_sp": {Frequency: 5, Aggregation: SumAggregation},
|
||||||
|
})
|
||||||
|
|
||||||
|
startupTime := time.Now()
|
||||||
|
files, err := m.FromCheckpoint("./testdata/checkpoints", 1692628930)
|
||||||
|
loadedData := m.SizeInBytes() / 1024 / 1024 // In MB
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Checkpoints loaded (%d files, %d MB, that took %fs)\n", files, loadedData, time.Since(startupTime).Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
m.DebugDump(bufio.NewWriter(os.Stdout), nil)
|
||||||
|
|
||||||
|
if files != 2 {
|
||||||
|
t.Errorf("expected: %d, got: %d\n", 2, files)
|
||||||
|
}
|
||||||
|
}
|
16
config.json
16
config.json
@@ -1,27 +1,23 @@
|
|||||||
{
|
{
|
||||||
"metrics": {
|
"metrics": {
|
||||||
|
"cpi": { "frequency": 15, "aggregation": "avg" },
|
||||||
"flops_any": { "frequency": 15, "aggregation": "sum" },
|
"flops_any": { "frequency": 15, "aggregation": "sum" },
|
||||||
"flops_dp": { "frequency": 15, "aggregation": "sum" },
|
"flops_dp": { "frequency": 15, "aggregation": "sum" },
|
||||||
"flops_sp": { "frequency": 15, "aggregation": "sum" },
|
"flops_sp": { "frequency": 15, "aggregation": "sum" }
|
||||||
"mem_bw": { "frequency": 15, "aggregation": "sum" },
|
|
||||||
"load_one": { "frequency": 15, "aggregation": null },
|
|
||||||
"load_five": { "frequency": 15, "aggregation": null }
|
|
||||||
},
|
},
|
||||||
"checkpoints": {
|
"checkpoints": {
|
||||||
"interval": "12h",
|
"interval": "12h",
|
||||||
"directory": "./var/checkpoints",
|
"directory": "./testdata/checkpoints",
|
||||||
"restore": "48h"
|
"restore": "48h"
|
||||||
},
|
},
|
||||||
"archive": {
|
"archive": {
|
||||||
"interval": "168h",
|
"interval": "168h",
|
||||||
"directory": "./var/archive"
|
"directory": "./testdata/archive"
|
||||||
},
|
},
|
||||||
"http-api": {
|
"http-api": {
|
||||||
"address": "0.0.0.0:8081",
|
"address": "127.0.0.1:8081",
|
||||||
"https-cert-file": null,
|
"https-cert-file": null,
|
||||||
"https-key-file": null
|
"https-key-file": null
|
||||||
},
|
},
|
||||||
"retention-in-memory": "48h",
|
"retention-in-memory": "48h"
|
||||||
"nats": null,
|
|
||||||
"jwt-public-key": "kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
|
||||||
}
|
}
|
||||||
|
3
debug.go
3
debug.go
@@ -21,6 +21,9 @@ func (b *buffer) debugDump(buf []byte) []byte {
|
|||||||
if b.archived {
|
if b.archived {
|
||||||
buf = append(buf, `,"saved":true`...)
|
buf = append(buf, `,"saved":true`...)
|
||||||
}
|
}
|
||||||
|
if b.unit != "" {
|
||||||
|
buf = append(buf, fmt.Sprintf(`,"unit":"%s"`, b.unit)...)
|
||||||
|
}
|
||||||
if b.next != nil {
|
if b.next != nil {
|
||||||
buf = append(buf, `},`...)
|
buf = append(buf, `},`...)
|
||||||
} else {
|
} else {
|
||||||
|
@@ -16,6 +16,7 @@ import (
|
|||||||
type Metric struct {
|
type Metric struct {
|
||||||
Name string
|
Name string
|
||||||
Value Float
|
Value Float
|
||||||
|
Unit string
|
||||||
|
|
||||||
mc MetricConfig
|
mc MetricConfig
|
||||||
}
|
}
|
||||||
@@ -206,7 +207,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
typeBuf, subTypeBuf := typeBuf[:0], subTypeBuf[:0]
|
typeBuf, subTypeBuf := typeBuf[:0], subTypeBuf[:0]
|
||||||
cluster, host := clusterDefault, ""
|
cluster, host, unit := clusterDefault, "", ""
|
||||||
for {
|
for {
|
||||||
key, val, err := dec.NextTag()
|
key, val, err := dec.NextTag()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -232,6 +233,8 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
host = string(val)
|
host = string(val)
|
||||||
lvl = nil
|
lvl = nil
|
||||||
}
|
}
|
||||||
|
case "unit":
|
||||||
|
unit = string(val)
|
||||||
case "type":
|
case "type":
|
||||||
if string(val) == "node" {
|
if string(val) == "node" {
|
||||||
break
|
break
|
||||||
@@ -300,6 +303,7 @@ func decodeLine(dec *lineprotocol.Decoder, clusterDefault string) error {
|
|||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("unsupported value type in message: %s", val.Kind().String())
|
return fmt.Errorf("unsupported value type in message: %s", val.Kind().String())
|
||||||
}
|
}
|
||||||
|
metric.Unit = unit
|
||||||
}
|
}
|
||||||
|
|
||||||
if t, err = dec.Time(lineprotocol.Second, t); err != nil {
|
if t, err = dec.Time(lineprotocol.Second, t); err != nil {
|
||||||
|
24
memstore.go
24
memstore.go
@@ -35,6 +35,7 @@ var (
|
|||||||
type buffer struct {
|
type buffer struct {
|
||||||
frequency int64 // Time between two "slots"
|
frequency int64 // Time between two "slots"
|
||||||
start int64 // Timestamp of when `data[0]` was written.
|
start int64 // Timestamp of when `data[0]` was written.
|
||||||
|
unit string // Unit for the data in this buffer
|
||||||
data []Float // The slice should never reallocacte as `cap(data)` is respected.
|
data []Float // The slice should never reallocacte as `cap(data)` is respected.
|
||||||
prev, next *buffer // `prev` contains older data, `next` newer data.
|
prev, next *buffer // `prev` contains older data, `next` newer data.
|
||||||
archived bool // If true, this buffer is already archived
|
archived bool // If true, this buffer is already archived
|
||||||
@@ -50,12 +51,13 @@ type buffer struct {
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBuffer(ts, freq int64) *buffer {
|
func newBuffer(ts, freq int64, unit string) *buffer {
|
||||||
b := bufferPool.Get().(*buffer)
|
b := bufferPool.Get().(*buffer)
|
||||||
b.frequency = freq
|
b.frequency = freq
|
||||||
b.start = ts - (freq / 2)
|
b.start = ts - (freq / 2)
|
||||||
b.prev = nil
|
b.prev = nil
|
||||||
b.next = nil
|
b.next = nil
|
||||||
|
b.unit = unit
|
||||||
b.archived = false
|
b.archived = false
|
||||||
b.closed = false
|
b.closed = false
|
||||||
b.data = b.data[:0]
|
b.data = b.data[:0]
|
||||||
@@ -74,7 +76,7 @@ func (b *buffer) write(ts int64, value Float) (*buffer, error) {
|
|||||||
// idx := int((ts - b.start + (b.frequency / 3)) / b.frequency)
|
// idx := int((ts - b.start + (b.frequency / 3)) / b.frequency)
|
||||||
idx := int((ts - b.start) / b.frequency)
|
idx := int((ts - b.start) / b.frequency)
|
||||||
if idx >= cap(b.data) {
|
if idx >= cap(b.data) {
|
||||||
newbuf := newBuffer(ts, b.frequency)
|
newbuf := newBuffer(ts, b.frequency, b.unit)
|
||||||
newbuf.prev = b
|
newbuf.prev = b
|
||||||
b.next = newbuf
|
b.next = newbuf
|
||||||
b.close()
|
b.close()
|
||||||
@@ -412,7 +414,7 @@ func (m *MemoryStore) WriteToLevel(l *level, selector []string, ts int64, metric
|
|||||||
b := l.metrics[metric.mc.offset]
|
b := l.metrics[metric.mc.offset]
|
||||||
if b == nil {
|
if b == nil {
|
||||||
// First write to this metric and level
|
// First write to this metric and level
|
||||||
b = newBuffer(ts, metric.mc.Frequency)
|
b = newBuffer(ts, metric.mc.Frequency, metric.Unit)
|
||||||
l.metrics[metric.mc.offset] = b
|
l.metrics[metric.mc.offset] = b
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,14 +435,15 @@ func (m *MemoryStore) WriteToLevel(l *level, selector []string, ts int64, metric
|
|||||||
// If the level does not hold the metric itself, the data will be aggregated recursively from the children.
|
// If the level does not hold the metric itself, the data will be aggregated recursively from the children.
|
||||||
// The second and third return value are the actual from/to for the data. Those can be different from
|
// The second and third return value are the actual from/to for the data. Those can be different from
|
||||||
// the range asked for if no data was available.
|
// the range asked for if no data was available.
|
||||||
func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]Float, int64, int64, error) {
|
func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]Float, int64, int64, string, error) {
|
||||||
|
var unit string = ""
|
||||||
if from > to {
|
if from > to {
|
||||||
return nil, 0, 0, errors.New("invalid time range")
|
return nil, 0, 0, "", errors.New("invalid time range")
|
||||||
}
|
}
|
||||||
|
|
||||||
minfo, ok := m.metrics[metric]
|
minfo, ok := m.metrics[metric]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, 0, 0, errors.New("unkown metric: " + metric)
|
return nil, 0, 0, "", errors.New("unkown metric: " + metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
n, data := 0, make([]Float, (to-from)/minfo.Frequency+1)
|
n, data := 0, make([]Float, (to-from)/minfo.Frequency+1)
|
||||||
@@ -449,6 +452,7 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
unit = b.unit
|
||||||
|
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
from, to = cfrom, cto
|
from, to = cfrom, cto
|
||||||
@@ -476,9 +480,9 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, "", err
|
||||||
} else if n == 0 {
|
} else if n == 0 {
|
||||||
return nil, 0, 0, errors.New("metric or host not found")
|
return nil, 0, 0, "", errors.New("metric or host not found")
|
||||||
} else if n > 1 {
|
} else if n > 1 {
|
||||||
if minfo.Aggregation == AvgAggregation {
|
if minfo.Aggregation == AvgAggregation {
|
||||||
normalize := 1. / Float(n)
|
normalize := 1. / Float(n)
|
||||||
@@ -486,11 +490,11 @@ func (m *MemoryStore) Read(selector Selector, metric string, from, to int64) ([]
|
|||||||
data[i] *= normalize
|
data[i] *= normalize
|
||||||
}
|
}
|
||||||
} else if minfo.Aggregation != SumAggregation {
|
} else if minfo.Aggregation != SumAggregation {
|
||||||
return nil, 0, 0, errors.New("invalid aggregation")
|
return nil, 0, 0, "", errors.New("invalid aggregation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, from, to, nil
|
return data, from, to, unit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release all buffers for the selected level and all its children that contain only
|
// Release all buffers for the selected level and all its children that contain only
|
||||||
|
@@ -28,12 +28,12 @@ func TestMemoryStoreBasics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sel := Selector{{String: "testhost"}}
|
sel := Selector{{String: "testhost"}}
|
||||||
adata, from, to, err := store.Read(sel, "a", start, start+count*frequency)
|
adata, from, to, unit, err := store.Read(sel, "a", start, start+count*frequency)
|
||||||
if err != nil || from != start || to != start+count*frequency {
|
if err != nil || from != start || to != start+count*frequency || unit != "" {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bdata, _, _, err := store.Read(sel, "b", start, start+count*frequency)
|
bdata, _, _, unit, err := store.Read(sel, "b", start, start+count*frequency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
@@ -83,23 +83,23 @@ func TestMemoryStoreTooMuchWrites(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
end := start + int64(count)*frequency
|
end := start + int64(count)*frequency
|
||||||
data, from, to, err := store.Read(Selector{{String: "test"}}, "a", start, end)
|
data, from, to, unit, err := store.Read(Selector{{String: "test"}}, "a", start, end)
|
||||||
if len(data) != count || from != start || to != end || err != nil {
|
if len(data) != count || from != start || to != end || err != nil || unit != "" {
|
||||||
t.Fatalf("a: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
t.Fatalf("a: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, from, to, err = store.Read(Selector{{String: "test"}}, "b", start, end)
|
data, from, to, unit, err = store.Read(Selector{{String: "test"}}, "b", start, end)
|
||||||
if len(data) != count/2 || from != start || to != end || err != nil {
|
if len(data) != count/2 || from != start || to != end || err != nil || unit != "" {
|
||||||
t.Fatalf("b: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
t.Fatalf("b: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, from, to, err = store.Read(Selector{{String: "test"}}, "c", start, end)
|
data, from, to, unit, err = store.Read(Selector{{String: "test"}}, "c", start, end)
|
||||||
if len(data) != count*2-1 || from != start || to != end-frequency/2 || err != nil {
|
if len(data) != count*2-1 || from != start || to != end-frequency/2 || err != nil || unit != "" {
|
||||||
t.Fatalf("c: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
t.Fatalf("c: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, from, to, err = store.Read(Selector{{String: "test"}}, "d", start, end)
|
data, from, to, unit, err = store.Read(Selector{{String: "test"}}, "d", start, end)
|
||||||
if len(data) != count/3+1 || from != start || to != end+frequency*2 || err != nil {
|
if len(data) != count/3+1 || from != start || to != end+frequency*2 || err != nil || unit != "" {
|
||||||
t.Errorf("expected: err=nil, from=%d, to=%d, len(data)=%d\n", start, end+frequency*2, count/3)
|
t.Errorf("expected: err=nil, from=%d, to=%d, len(data)=%d\n", start, end+frequency*2, count/3)
|
||||||
t.Fatalf("d: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
t.Fatalf("d: err=%#v, from=%d, to=%d, data=%#v\n", err, from, to, data)
|
||||||
}
|
}
|
||||||
@@ -121,7 +121,7 @@ func TestMemoryStoreOutOfBounds(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sel := Selector{{String: "cluster"}, {String: "host"}, {String: "cpu"}}
|
sel := Selector{{String: "cluster"}, {String: "host"}, {String: "cpu"}}
|
||||||
data, from, to, err := store.Read(sel, "a", 500, int64(toffset+count*60+500))
|
data, from, to, unit, err := store.Read(sel, "a", 500, int64(toffset+count*60+500))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -136,14 +136,14 @@ func TestMemoryStoreOutOfBounds(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testfrom, testlen := int64(100000000), int64(10000)
|
testfrom, testlen := int64(100000000), int64(10000)
|
||||||
data, from, to, err = store.Read(sel, "a", testfrom, testfrom+testlen)
|
data, from, to, unit, err = store.Read(sel, "a", testfrom, testfrom+testlen)
|
||||||
if len(data) != 0 || from != testfrom || to != testfrom || err != nil {
|
if len(data) != 0 || from != testfrom || to != testfrom || err != nil || unit != "" {
|
||||||
t.Fatal("Unexpected data returned when reading range after valid data")
|
t.Fatal("Unexpected data returned when reading range after valid data")
|
||||||
}
|
}
|
||||||
|
|
||||||
testfrom, testlen = 0, 10
|
testfrom, testlen = 0, 10
|
||||||
data, from, to, err = store.Read(sel, "a", testfrom, testfrom+testlen)
|
data, from, to, unit, err = store.Read(sel, "a", testfrom, testfrom+testlen)
|
||||||
if len(data) != 0 || from/60 != int64(toffset)/60 || to/60 != int64(toffset)/60 || err != nil {
|
if len(data) != 0 || from/60 != int64(toffset)/60 || to/60 != int64(toffset)/60 || err != nil || unit != "" {
|
||||||
t.Fatal("Unexpected data returned when reading range before valid data")
|
t.Fatal("Unexpected data returned when reading range before valid data")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -169,7 +169,7 @@ func TestMemoryStoreMissingDatapoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sel := Selector{{String: "testhost"}}
|
sel := Selector{{String: "testhost"}}
|
||||||
adata, _, _, err := store.Read(sel, "a", 0, int64(count))
|
adata, _, _, _, err := store.Read(sel, "a", 0, int64(count))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
@@ -219,7 +219,7 @@ func TestMemoryStoreAggregation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
adata, from, to, err := store.Read(Selector{{String: "host0"}}, "a", int64(0), int64(count))
|
adata, from, to, _, err := store.Read(Selector{{String: "host0"}}, "a", int64(0), int64(count))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
@@ -352,7 +352,7 @@ func TestMemoryStoreArchive(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sel := Selector{{String: "cluster"}, {String: "host"}, {String: "cpu0"}}
|
sel := Selector{{String: "cluster"}, {String: "host"}, {String: "cpu0"}}
|
||||||
adata, from, to, err := store2.Read(sel, "a", 100, int64(100+count))
|
adata, from, to, _, err := store2.Read(sel, "a", 100, int64(100+count))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
@@ -398,7 +398,7 @@ func TestMemoryStoreFree(t *testing.T) {
|
|||||||
t.Fatal("two buffers expected to be released")
|
t.Fatal("two buffers expected to be released")
|
||||||
}
|
}
|
||||||
|
|
||||||
adata, from, to, err := store.Read(Selector{{String: "cluster"}, {String: "host"}, {String: "1"}}, "a", 0, int64(count))
|
adata, from, to, _, err := store.Read(Selector{{String: "cluster"}, {String: "host"}, {String: "1"}}, "a", 0, int64(count))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -451,7 +451,7 @@ func BenchmarkMemoryStoreConcurrentWrites(b *testing.B) {
|
|||||||
for g := 0; g < goroutines; g++ {
|
for g := 0; g < goroutines; g++ {
|
||||||
host := fmt.Sprintf("host%d", g)
|
host := fmt.Sprintf("host%d", g)
|
||||||
sel := Selector{{String: "cluster"}, {String: host}, {String: "cpu0"}}
|
sel := Selector{{String: "cluster"}, {String: host}, {String: "cpu0"}}
|
||||||
adata, _, _, err := store.Read(sel, "a", 0, int64(count)*frequency)
|
adata, _, _, _, err := store.Read(sel, "a", 0, int64(count)*frequency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
@@ -500,7 +500,7 @@ func BenchmarkMemoryStoreAggregation(b *testing.B) {
|
|||||||
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
data, from, to, err := store.Read(Selector{{String: "testcluster"}, {String: "host123"}}, "flops_any", 0, int64(count))
|
data, from, to, _, err := store.Read(Selector{{String: "testcluster"}, {String: "host123"}}, "flops_any", 0, int64(count))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
1
testdata/checkpoints/fritz/f0131/1692628930.json
vendored
Normal file
1
testdata/checkpoints/fritz/f0131/1692628930.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"from":1692628930,"to":1692629003,"metrics":{},"children":{"hwthread0":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[2.2,0.7,2.1,1.6,2.3,0.7,0.9,1.8,0.7,1.7,1.5,1.8,0.9,1.6]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.3,0.0,0.0,0.0,0.0,28.2,5.1,4.7,0.1,0.3,0.0,0.2,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.5,0.1,0.0,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.3,0.0,0.0,0.0,0.0,27.2,4.8,4.7,0.0,0.3,0.0,0.2,0.0,0.0]}},"children":{}},"hwthread1":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[0.9,4.2,4.2,3.8,3.1,0.7,1.3,0.7,1.8,1.6,5.0,3.0,4.9,5.2]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.5,0.0,0.0,0.0,0.0,49.8,5.0,0.1,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.4,0.1,0.0,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.5,0.0,0.0,0.0,0.0,49.1,4.9,0.1,0.0,0.0,0.0,0.0,0.0,0.0]}},"children":{}},"hwthread2":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[1.8,0.9,2.2,2.1,4.3,1.4,0.8,1.0,1.1,1.6,3.9,4.9,2.7,5.1]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,16.2,4.4,0.0,0.1,0.0,0.0,0.0,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.1,0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,16.1,3.9,0.0,0.1,0.0,0.0,0.0,0.0,0.0]}},"children":{}},"hwthread3":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[2.0,0.6,6.0,3.5,2.3,0.9,1.2,1.2,1.1,1.0,2.0,0.7,1.1,3.6]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,1.6,11.5,0.0,0.0,0.0,0.0,0.2,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.1,0.4,0.0,0.0,0.0,0.0,0.1,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,1.4,10.6,0.0,0.0,0.0,0.0,0.0,0.0,0.0]}},"children":{}},"hwthread4":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[1.0,1.5,1.3,0.9,0.8,0.8,1.2,1.2,1.4,1.0,0.8,2.3,2.7,0.8]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.8,0.0,0.0,0.0,0.0,3.4,1.1,2.2,0.4,0.0,0.0,0.0,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.7,0.3,0.2,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.8,0.0,0.0,0.0,0.0,2.0,0.5,1.8,0.4,0.0,0.0,0.0,0.0,0.0]}},"children":{}},"hwthread5":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[0.9,2.3,1.1,0.7,0.8,1.5,1.0,0.9,0.9,1.0,0.8,1.1,1.0,2.0]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.5,0.0,1.9,0.1,0.3,3.0,13.8,0.0,1.7,1.1,0.0,0.0,0.5,1.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.3,0.0,0.1,0.0,0.2,0.1,0.9,0.0,0.8,0.6,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,1.7,0.0,0.0,2.9,12.0,0.0,0.0,0.0,0.0,0.0,0.5,1.0]}},"children":{}},"hwthread6":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[1.4,0.8,0.8,1.2,1.4,0.8,1.2,1.1,1.5,0.9,1.5,1.5,1.4,0.8]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.1,0.0,3.9,10.6,3.0,1.4,7.0,0.0,1.0,0.2,0.0,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.1,0.0,0.0,0.0,0.1,0.1,0.2,0.0,0.4,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,3.9,10.6,2.9,1.2,6.6,0.0,0.1,0.2,0.0,0.0,0.0]}},"children":{}},"hwthread7":{"from":1692628930,"to":1692629003,"metrics":{"cpi":{"frequency":5,"start":1692628930,"unit":"","data":[1.2,1.2,1.3,0.8,1.2,1.5,1.0,0.7,1.7,1.8,3.5,0.7,1.5,4.6]},"flops_any":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.3,0.0,2.3,4.7,0.0,12.5,0.7,0.2,0.2,0.0,0.0]},"flops_dp":{"frequency":5,"start":1692628931,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.0,0.0,0.1,0.9,0.0,0.0,0.0,0.0,0.0,0.0,0.0]},"flops_sp":{"frequency":5,"start":1692628930,"unit":"MFLOP/s","data":[0.0,0.0,0.0,0.3,0.0,2.2,2.8,0.0,12.5,0.7,0.2,0.2,0.0,0.0]}},"children":{}}}}
|
Reference in New Issue
Block a user