mirror of
				https://github.com/ClusterCockpit/cc-metric-store.git
				synced 2025-11-04 10:45:07 +01:00 
			
		
		
		
	Optionally provide statistics on the timeseries enpoint
This is not useful for ClusterCockpit currently, but when archiving a job or for the job-view, this can speed things up in the future.
This commit is contained in:
		
							
								
								
									
										37
									
								
								api.go
									
									
									
									
									
								
							
							
						
						
									
										37
									
								
								api.go
									
									
									
									
									
								
							@@ -9,6 +9,7 @@ import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"log"
 | 
			
		||||
	"math"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
@@ -34,6 +35,34 @@ type ApiMetricData struct {
 | 
			
		||||
	From  int64    `json:"from"`
 | 
			
		||||
	To    int64    `json:"to"`
 | 
			
		||||
	Data  []Float  `json:"data"`
 | 
			
		||||
	Avg   *float64 `json:"avg"`
 | 
			
		||||
	Min   *float64 `json:"min"`
 | 
			
		||||
	Max   *float64 `json:"max"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO: Optimize this, just like the stats endpoint!
 | 
			
		||||
func (data *ApiMetricData) AddStats() {
 | 
			
		||||
	if len(data.Data) == 0 || data.Error != nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	n := 0
 | 
			
		||||
	sum, min, max := 0.0, float64(data.Data[0]), float64(data.Data[0])
 | 
			
		||||
	for _, x := range data.Data {
 | 
			
		||||
		if x.IsNaN() {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		n += 1
 | 
			
		||||
		sum += float64(x)
 | 
			
		||||
		min = math.Min(min, float64(x))
 | 
			
		||||
		max = math.Max(max, float64(x))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	avg := sum / float64(n)
 | 
			
		||||
	data.Avg = &avg
 | 
			
		||||
	data.Min = &min
 | 
			
		||||
	data.Max = &max
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ApiStatsData struct {
 | 
			
		||||
@@ -64,6 +93,8 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	withStats := r.URL.Query().Get("with-stats") == "true"
 | 
			
		||||
 | 
			
		||||
	bodyDec := json.NewDecoder(r.Body)
 | 
			
		||||
	var reqBody ApiRequestBody
 | 
			
		||||
	err = bodyDec.Decode(&reqBody)
 | 
			
		||||
@@ -84,11 +115,15 @@ func handleTimeseries(rw http.ResponseWriter, r *http.Request) {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			metrics[metric] = ApiMetricData{
 | 
			
		||||
			amd := ApiMetricData{
 | 
			
		||||
				From: f,
 | 
			
		||||
				To:   t,
 | 
			
		||||
				Data: data,
 | 
			
		||||
			}
 | 
			
		||||
			if withStats {
 | 
			
		||||
				amd.AddStats()
 | 
			
		||||
			}
 | 
			
		||||
			metrics[metric] = amd
 | 
			
		||||
		}
 | 
			
		||||
		res = append(res, metrics)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -215,11 +215,12 @@ func main() {
 | 
			
		||||
	memoryStore = NewMemoryStore(conf.Metrics)
 | 
			
		||||
 | 
			
		||||
	restoreFrom := startupTime.Add(-time.Duration(conf.Checkpoints.Restore) * time.Second)
 | 
			
		||||
	log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339))
 | 
			
		||||
	files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
 | 
			
		||||
	} else {
 | 
			
		||||
		log.Printf("Checkpoints loaded (%d files, from %s on)\n", files, restoreFrom.Format(time.RFC3339))
 | 
			
		||||
		log.Printf("Checkpoints loaded (%d files)\n", files)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, shutdown := context.WithCancel(context.Background())
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user