Optionally delete checkpoints instead of archiving

This commit is contained in:
Lou Knauer 2022-03-14 08:48:22 +01:00
parent 0b66f7c5d7
commit fdbf94f2a1
4 changed files with 60 additions and 12 deletions

View File

@ -449,7 +449,7 @@ func findFiles(direntries []fs.DirEntry, t int64, findMoreRecentFiles bool) ([]s
// ZIP all checkpoint files older than `from` together and write them to the `archiveDir`, // ZIP all checkpoint files older than `from` together and write them to the `archiveDir`,
// deleting them from the `checkpointsDir`. // deleting them from the `checkpointsDir`.
func ArchiveCheckpoints(checkpointsDir, archiveDir string, from int64) (int, error) { func ArchiveCheckpoints(checkpointsDir, archiveDir string, from int64, deleteInstead bool) (int, error) {
entries1, err := os.ReadDir(checkpointsDir) entries1, err := os.ReadDir(checkpointsDir)
if err != nil { if err != nil {
return 0, err return 0, err
@ -469,7 +469,7 @@ func ArchiveCheckpoints(checkpointsDir, archiveDir string, from int64) (int, err
go func() { go func() {
defer wg.Done() defer wg.Done()
for workItem := range work { for workItem := range work {
m, err := archiveCheckpoints(workItem.cdir, workItem.adir, from) m, err := archiveCheckpoints(workItem.cdir, workItem.adir, from, deleteInstead)
if err != nil { if err != nil {
log.Printf("error while archiving %s/%s: %s", workItem.cluster, workItem.host, err.Error()) log.Printf("error while archiving %s/%s: %s", workItem.cluster, workItem.host, err.Error())
atomic.AddInt32(&errs, 1) atomic.AddInt32(&errs, 1)
@ -509,7 +509,7 @@ func ArchiveCheckpoints(checkpointsDir, archiveDir string, from int64) (int, err
} }
// Helper function for `ArchiveCheckpoints`. // Helper function for `ArchiveCheckpoints`.
func archiveCheckpoints(dir string, archiveDir string, from int64) (int, error) { func archiveCheckpoints(dir string, archiveDir string, from int64, deleteInstead bool) (int, error) {
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
return 0, err return 0, err
@ -520,6 +520,18 @@ func archiveCheckpoints(dir string, archiveDir string, from int64) (int, error)
return 0, err return 0, err
} }
if deleteInstead {
n := 0
for _, checkpoint := range files {
filename := filepath.Join(dir, checkpoint)
if err = os.Remove(filename); err != nil {
return n, err
}
n += 1
}
return n, nil
}
filename := filepath.Join(archiveDir, fmt.Sprintf("%d.zip", from)) filename := filepath.Join(archiveDir, fmt.Sprintf("%d.zip", from))
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644) f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
@ -538,15 +550,15 @@ func archiveCheckpoints(dir string, archiveDir string, from int64) (int, error)
defer zw.Close() defer zw.Close()
n := 0 n := 0
for _, jsonFile := range files { for _, checkpoint := range files {
filename := filepath.Join(dir, jsonFile) filename := filepath.Join(dir, checkpoint)
r, err := os.Open(filename) r, err := os.Open(filename)
if err != nil { if err != nil {
return n, err return n, err
} }
defer r.Close() defer r.Close()
w, err := zw.Create(jsonFile) w, err := zw.Create(checkpoint)
if err != nil { if err != nil {
return n, err return n, err
} }

View File

@ -99,6 +99,7 @@ type Config struct {
Archive struct { Archive struct {
Interval string `json:"interval"` Interval string `json:"interval"`
RootDir string `json:"directory"` RootDir string `json:"directory"`
DeleteInstead bool `json:"delete-instead"`
} `json:"archive"` } `json:"archive"`
} }
@ -200,7 +201,7 @@ func intervals(wg *sync.WaitGroup, ctx context.Context) {
case <-ticks: case <-ticks:
t := time.Now().Add(-d) t := time.Now().Add(-d)
log.Printf("start archiving checkpoints (older than %s)...\n", t.Format(time.RFC3339)) log.Printf("start archiving checkpoints (older than %s)...\n", t.Format(time.RFC3339))
n, err := ArchiveCheckpoints(conf.Checkpoints.RootDir, conf.Archive.RootDir, t.Unix()) n, err := ArchiveCheckpoints(conf.Checkpoints.RootDir, conf.Archive.RootDir, t.Unix(), conf.Archive.DeleteInstead)
if err != nil { if err != nil {
log.Printf("archiving failed: %s\n", err.Error()) log.Printf("archiving failed: %s\n", err.Error())
} else { } else {
@ -236,14 +237,18 @@ func main() {
restoreFrom := startupTime.Add(-d) restoreFrom := startupTime.Add(-d)
log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339)) log.Printf("Loading checkpoints newer than %s\n", restoreFrom.Format(time.RFC3339))
files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix()) files, err := memoryStore.FromCheckpoint(conf.Checkpoints.RootDir, restoreFrom.Unix())
loadedData := memoryStore.SizeInBytes() / 1024 / 1024 // In MB
if err != nil { if err != nil {
log.Fatalf("Loading checkpoints failed: %s\n", err.Error()) log.Fatalf("Loading checkpoints failed: %s\n", err.Error())
} else { } else {
log.Printf("Checkpoints loaded (%d files, that took %dms)\n", files, time.Since(startupTime).Milliseconds()) log.Printf("Checkpoints loaded (%d files, %d MB, that took %dms)\n", files, loadedData, time.Since(startupTime).Milliseconds())
} }
runtime.GC() runtime.GC()
if loadedData > 1000 {
debug.SetGCPercent(20) debug.SetGCPercent(20)
}
ctx, shutdown := context.WithCancel(context.Background()) ctx, shutdown := context.WithCancel(context.Background())
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -44,7 +44,7 @@ func (l *level) debugDump(w *bufio.Writer, m *MemoryStore, indent string) error
} }
func (m *MemoryStore) DebugDump(w *bufio.Writer) error { func (m *MemoryStore) DebugDump(w *bufio.Writer) error {
fmt.Fprintf(w, "MemoryStore:\n") fmt.Fprintf(w, "MemoryStore (%d MB):\n", m.SizeInBytes()/1024/1024)
m.root.debugDump(w, m, " ") m.root.debugDump(w, m, " ")
return w.Flush() return w.Flush()
} }

View File

@ -3,6 +3,7 @@ package main
import ( import (
"errors" "errors"
"sync" "sync"
"unsafe"
) )
// Default buffer capacity. // Default buffer capacity.
@ -231,6 +232,14 @@ func (b *buffer) iterFromTo(from, to int64, callback func(b *buffer) error) erro
return nil return nil
} }
func (b *buffer) count() int64 {
res := int64(len(b.data))
if b.prev != nil {
res += b.prev.count()
}
return res
}
// Could also be called "node" as this forms a node in a tree structure. // Could also be called "node" as this forms a node in a tree structure.
// Called level because "node" might be confusing here. // Called level because "node" might be confusing here.
// Can be both a leaf or a inner node. In this tree structue, inner nodes can // Can be both a leaf or a inner node. In this tree structue, inner nodes can
@ -320,6 +329,24 @@ func (l *level) free(t int64) (int, error) {
return n, nil return n, nil
} }
func (l *level) sizeInBytes() int64 {
l.lock.RLock()
defer l.lock.RUnlock()
size := int64(0)
for _, b := range l.metrics {
if b != nil {
size += b.count() * int64(unsafe.Sizeof(Float(0)))
}
}
for _, child := range l.children {
size += child.sizeInBytes()
}
return size
}
type MemoryStore struct { type MemoryStore struct {
root level // root of the tree structure root level // root of the tree structure
metrics map[string]MetricConfig metrics map[string]MetricConfig
@ -477,6 +504,10 @@ func (m *MemoryStore) FreeAll() error {
return nil return nil
} }
func (m *MemoryStore) SizeInBytes() int64 {
return m.root.sizeInBytes()
}
// Given a selector, return a list of all children of the level selected. // Given a selector, return a list of all children of the level selected.
func (m *MemoryStore) ListChildren(selector []string) []string { func (m *MemoryStore) ListChildren(selector []string) []string {
lvl := &m.root lvl := &m.root