mirror of
https://github.com/ClusterCockpit/cc-metric-store.git
synced 2024-11-10 05:07:25 +01:00
Optionally provide statistics on the timeseries enpoint
This is not useful for ClusterCockpit currently, but when archiving a job or for the job-view, this can speed things up in the future.
This commit is contained in:
parent
579a05e4df
commit
38d605b6c4
45
api.go
45
api.go
@ -9,6 +9,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -30,10 +31,38 @@ type ApiRequestBody struct {
|
||||
}
|
||||
|
||||
type ApiMetricData struct {
|
||||
Error *string `json:"error"`
|
||||
From int64 `json:"from"`
|
||||
To int64 `json:"to"`
|
||||
Data []Float `json:"data"`
|
||||
Error *string `json:"error"`
|
||||
From int64 `json:"from"`
|
||||
To int64 `json:"to"`
|
||||
Data []Float `json:"data"`
|
||||
Avg *float64 `json:"avg"`
|
||||
Min *float64 `json:"min"`
|
||||
Max *float64 `json:"max"`
|
||||
}
|
||||
|
||||
// TODO: Optimize this, just like the stats endpoint!
|
||||
func (data *ApiMetricData) AddStats() {
|
||||
if len(data.Data) == 0 || data.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n := 0
|
||||
sum, min, max := 0.0, float64(data.Data[0]), float64(data.Data[0])
|
||||
for _, x := range data.Data {
|
||||
if x.IsNaN() {
|
||||
continue
|
||||
}
|
||||
|
||||
n += 1
|
||||
sum += float64(x)
|
||||
min = math.Min(min, float64(x))
|
||||
max = math.Max(max, float64(x))
|
||||
}
|
||||
|
||||
avg := sum / float64(n)
|
||||
data.Avg = &avg
|
||||
data.Min = &min
|
||||
data.Max = &max
|
||||
}
|
||||
|
||||
type ApiStatsData struct {
|
||||
@ -64,6 +93,8 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
withStats := r.URL.Query().Get("with-stats") == "true"
|
||||
|
||||
bodyDec := json.NewDecoder(r.Body)
|
||||
var reqBody ApiRequestBody
|
||||
err = bodyDec.Decode(&reqBody)
|
||||
@ -84,11 +115,15 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
|
||||
continue
|
||||
}
|
||||
|
||||
metrics[metric] = ApiMetricData{
|
||||
amd := ApiMetricData{
|
||||
From: f,
|
||||
To: t,
|
||||
Data: data,
|
||||
}
|
||||
if withStats {
|
||||
amd.AddStats()
|
||||
}
|
||||
metrics[metric] = amd
|
||||
}
|
||||
res = append(res, metrics)
|
||||
}
|
||||
|
@ -215,11 +215,12 @@ func main() {
|
||||
memoryStore = NewMemoryStore(conf.Metrics)
|
||||
|
||||
restoreFrom := startupTime.Add(-time.Duration(conf.Checkpoints.Restore) * time.Second)
|
||||
log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339))
|
||||
files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix())
|
||||
if err != nil {
|
||||
log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
|
||||
} else {
|
||||
log.Printf("Checkpoints loaded (%d files, from %s on)\n", files, restoreFrom.Format(time.RFC3339))
|
||||
log.Printf("Checkpoints loaded (%d files)\n", files)
|
||||
}
|
||||
|
||||
ctx, shutdown := context.WithCancel(context.Background())
|
||||
|
Loading…
Reference in New Issue
Block a user