mirror of
https://github.com/ClusterCockpit/cc-metric-store.git
synced 2024-11-10 05:07:25 +01:00
Optionally provide statistics on the timeseries enpoint
This is not useful for ClusterCockpit currently, but when archiving a job or for the job-view, this can speed things up in the future.
This commit is contained in:
parent
579a05e4df
commit
38d605b6c4
37
api.go
37
api.go
@ -9,6 +9,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -34,6 +35,34 @@ type ApiMetricData struct {
|
|||||||
From int64 `json:"from"`
|
From int64 `json:"from"`
|
||||||
To int64 `json:"to"`
|
To int64 `json:"to"`
|
||||||
Data []Float `json:"data"`
|
Data []Float `json:"data"`
|
||||||
|
Avg *float64 `json:"avg"`
|
||||||
|
Min *float64 `json:"min"`
|
||||||
|
Max *float64 `json:"max"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Optimize this, just like the stats endpoint!
|
||||||
|
func (data *ApiMetricData) AddStats() {
|
||||||
|
if len(data.Data) == 0 || data.Error != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 0
|
||||||
|
sum, min, max := 0.0, float64(data.Data[0]), float64(data.Data[0])
|
||||||
|
for _, x := range data.Data {
|
||||||
|
if x.IsNaN() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n += 1
|
||||||
|
sum += float64(x)
|
||||||
|
min = math.Min(min, float64(x))
|
||||||
|
max = math.Max(max, float64(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
avg := sum / float64(n)
|
||||||
|
data.Avg = &avg
|
||||||
|
data.Min = &min
|
||||||
|
data.Max = &max
|
||||||
}
|
}
|
||||||
|
|
||||||
type ApiStatsData struct {
|
type ApiStatsData struct {
|
||||||
@ -64,6 +93,8 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
withStats := r.URL.Query().Get("with-stats") == "true"
|
||||||
|
|
||||||
bodyDec := json.NewDecoder(r.Body)
|
bodyDec := json.NewDecoder(r.Body)
|
||||||
var reqBody ApiRequestBody
|
var reqBody ApiRequestBody
|
||||||
err = bodyDec.Decode(&reqBody)
|
err = bodyDec.Decode(&reqBody)
|
||||||
@ -84,11 +115,15 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics[metric] = ApiMetricData{
|
amd := ApiMetricData{
|
||||||
From: f,
|
From: f,
|
||||||
To: t,
|
To: t,
|
||||||
Data: data,
|
Data: data,
|
||||||
}
|
}
|
||||||
|
if withStats {
|
||||||
|
amd.AddStats()
|
||||||
|
}
|
||||||
|
metrics[metric] = amd
|
||||||
}
|
}
|
||||||
res = append(res, metrics)
|
res = append(res, metrics)
|
||||||
}
|
}
|
||||||
|
@ -215,11 +215,12 @@ func main() {
|
|||||||
memoryStore = NewMemoryStore(conf.Metrics)
|
memoryStore = NewMemoryStore(conf.Metrics)
|
||||||
|
|
||||||
restoreFrom := startupTime.Add(-time.Duration(conf.Checkpoints.Restore) * time.Second)
|
restoreFrom := startupTime.Add(-time.Duration(conf.Checkpoints.Restore) * time.Second)
|
||||||
|
log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339))
|
||||||
files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix())
|
files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
|
log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Checkpoints loaded (%d files, from %s on)\n", files, restoreFrom.Format(time.RFC3339))
|
log.Printf("Checkpoints loaded (%d files)\n", files)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, shutdown := context.WithCancel(context.Background())
|
ctx, shutdown := context.WithCancel(context.Background())
|
||||||
|
Loading…
Reference in New Issue
Block a user