mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-01-16 09:41:47 +01:00
Merge branch 'dev' of https://github.com/ClusterCockpit/cc-backend into dev
This commit is contained in:
@@ -271,6 +271,7 @@ func initSubsystems() error {
|
||||
// Initialize job archive
|
||||
archiveCfg := ccconf.GetPackageConfig("archive")
|
||||
if archiveCfg == nil {
|
||||
cclog.Debug("Archive configuration not found, using default archive configuration")
|
||||
archiveCfg = json.RawMessage(defaultArchiveConfig)
|
||||
}
|
||||
if err := archive.Init(archiveCfg, config.Keys.DisableArchive); err != nil {
|
||||
@@ -319,8 +320,13 @@ func runServer(ctx context.Context) error {
|
||||
mscfg := ccconf.GetPackageConfig("metric-store")
|
||||
if mscfg != nil {
|
||||
metricstore.Init(mscfg, &wg)
|
||||
|
||||
// Inject repository as NodeProvider to break import cycle
|
||||
ms := metricstore.GetMemoryStore()
|
||||
jobRepo := repository.GetJobRepository()
|
||||
ms.SetNodeProvider(jobRepo)
|
||||
} else {
|
||||
cclog.Debug("Metric store configuration not found, skipping metricstore initialization")
|
||||
return fmt.Errorf("missing metricstore configuration")
|
||||
}
|
||||
|
||||
// Start archiver and task manager
|
||||
@@ -375,22 +381,37 @@ func runServer(ctx context.Context) error {
|
||||
}
|
||||
runtime.SystemdNotify(true, "running")
|
||||
|
||||
// Wait for completion or error
|
||||
waitDone := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(waitDone)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-waitDone
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
// Check for server startup errors
|
||||
// Wait for either:
|
||||
// 1. An error from server startup
|
||||
// 2. Completion of all goroutines (normal shutdown or crash)
|
||||
select {
|
||||
case err := <-errChan:
|
||||
// errChan will be closed when waitDone is closed, which happens
|
||||
// when all goroutines complete (either from normal shutdown or error)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Give the server 100ms to start and report any immediate startup errors
|
||||
// After that, just wait for normal shutdown completion
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Server started successfully, wait for completion
|
||||
if err := <-errChan; err != nil {
|
||||
return err
|
||||
case <-waitDone:
|
||||
// Normal shutdown completed
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,55 +1,22 @@
|
||||
{
|
||||
"main": {
|
||||
"addr": "127.0.0.1:8080",
|
||||
"short-running-jobs-duration": 300,
|
||||
"resampling": {
|
||||
"minimumPoints": 600,
|
||||
"trigger": 300,
|
||||
"resolutions": [240, 60]
|
||||
},
|
||||
"apiAllowedIPs": ["*"],
|
||||
"emission-constant": 317
|
||||
"apiAllowedIPs": ["*"]
|
||||
},
|
||||
"cron": {
|
||||
"commit-job-worker": "2m",
|
||||
"duration-worker": "5m",
|
||||
"footprint-worker": "10m"
|
||||
},
|
||||
"archive": {
|
||||
"kind": "file",
|
||||
"path": "./var/job-archive"
|
||||
"commit-job-worker": "1m",
|
||||
"duration-worker": "3m",
|
||||
"footprint-worker": "5m"
|
||||
},
|
||||
"auth": {
|
||||
"jwts": {
|
||||
"max-age": "2000h"
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"address": "nats://0.0.0.0:4222",
|
||||
"username": "root",
|
||||
"password": "root"
|
||||
},
|
||||
"metric-store": {
|
||||
"checkpoints": {
|
||||
"file-format": "avro",
|
||||
"interval": "1h",
|
||||
"directory": "./var/checkpoints",
|
||||
"restore": "48h"
|
||||
"interval": "12h"
|
||||
},
|
||||
"archive": {
|
||||
"interval": "1h",
|
||||
"directory": "./var/archive"
|
||||
},
|
||||
"retention-in-memory": "48h",
|
||||
"subscriptions": [
|
||||
{
|
||||
"subscribe-to": "hpc-nats",
|
||||
"cluster-tag": "fritz"
|
||||
},
|
||||
{
|
||||
"subscribe-to": "hpc-nats",
|
||||
"cluster-tag": "alex"
|
||||
}
|
||||
]
|
||||
"retention-in-memory": "48h"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
|
||||
"user": "clustercockpit",
|
||||
"group": "clustercockpit",
|
||||
"validate": false,
|
||||
"apiAllowedIPs": ["*"],
|
||||
"short-running-jobs-duration": 300,
|
||||
"enable-job-taggers": true,
|
||||
"resampling": {
|
||||
"minimumPoints": 600,
|
||||
"trigger": 180,
|
||||
@@ -18,13 +18,48 @@
|
||||
"subjectNodeState": "cc.node.state"
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"address": "nats://0.0.0.0:4222",
|
||||
"username": "root",
|
||||
"password": "root"
|
||||
},
|
||||
"auth": {
|
||||
"jwts": {
|
||||
"max-age": "2000h"
|
||||
}
|
||||
},
|
||||
"cron": {
|
||||
"commit-job-worker": "1m",
|
||||
"duration-worker": "5m",
|
||||
"footprint-worker": "10m"
|
||||
},
|
||||
"archive": {
|
||||
"kind": "file",
|
||||
"path": "./var/job-archive"
|
||||
"kind": "s3",
|
||||
"endpoint": "http://x.x.x.x",
|
||||
"bucket": "jobarchive",
|
||||
"accessKey": "xx",
|
||||
"secretKey": "xx",
|
||||
"retention": {
|
||||
"policy": "move",
|
||||
"age": 365,
|
||||
"location": "./var/archive"
|
||||
}
|
||||
},
|
||||
"metric-store": {
|
||||
"checkpoints": {
|
||||
"interval": "12h"
|
||||
},
|
||||
"retention-in-memory": "48h",
|
||||
"nats-subscriptions": [
|
||||
{
|
||||
"subscribe-to": "hpc-nats",
|
||||
"cluster-tag": "fritz"
|
||||
},
|
||||
{
|
||||
"subscribe-to": "hpc-nats",
|
||||
"cluster-tag": "alex"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ui-file": "ui-config.json"
|
||||
}
|
||||
|
||||
@@ -616,9 +616,9 @@ func securedCheck(user *schema.User, r *http.Request) error {
|
||||
}
|
||||
// If SplitHostPort fails, IPAddress is already just a host (no port)
|
||||
|
||||
// If nothing declared in config: deny all request to this api endpoint
|
||||
// If nothing declared in config: Continue
|
||||
if len(config.Keys.APIAllowedIPs) == 0 {
|
||||
return fmt.Errorf("missing configuration key ApiAllowedIPs")
|
||||
return nil
|
||||
}
|
||||
// If wildcard declared in config: Continue
|
||||
if config.Keys.APIAllowedIPs[0] == "*" {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
package config
|
||||
|
||||
var configSchema = `
|
||||
{
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"addr": {
|
||||
@@ -135,6 +135,5 @@ var configSchema = `
|
||||
},
|
||||
"required": ["subjectJobEvent", "subjectNodeState"]
|
||||
}
|
||||
},
|
||||
"required": ["apiAllowedIPs"]
|
||||
}`
|
||||
}
|
||||
}`
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
func Archiving(wg *sync.WaitGroup, ctx context.Context) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
d, err := time.ParseDuration(Keys.Archive.Interval)
|
||||
d, err := time.ParseDuration(Keys.Archive.ArchiveInterval)
|
||||
if err != nil {
|
||||
cclog.Fatalf("[METRICSTORE]> error parsing archive interval duration: %v\n", err)
|
||||
}
|
||||
|
||||
@@ -30,8 +30,51 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Drain any remaining messages in channel before exiting
|
||||
for {
|
||||
select {
|
||||
case val, ok := <-LineProtocolMessages:
|
||||
if !ok {
|
||||
// Channel closed
|
||||
return
|
||||
case val := <-LineProtocolMessages:
|
||||
}
|
||||
// Process remaining message
|
||||
freq, err := GetMetricFrequency(val.MetricName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
for _, selectorName := range val.Selector {
|
||||
metricName += selectorName + SelectorDelimiter
|
||||
}
|
||||
metricName += val.MetricName
|
||||
|
||||
var selector []string
|
||||
selector = append(selector, val.Cluster, val.Node, strconv.FormatInt(freq, 10))
|
||||
|
||||
if !stringSlicesEqual(oldSelector, selector) {
|
||||
avroLevel = avroStore.root.findAvroLevelOrCreate(selector)
|
||||
if avroLevel == nil {
|
||||
cclog.Errorf("Error creating or finding the level with cluster : %s, node : %s, metric : %s\n", val.Cluster, val.Node, val.MetricName)
|
||||
}
|
||||
oldSelector = slices.Clone(selector)
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
default:
|
||||
// No more messages, exit
|
||||
return
|
||||
}
|
||||
}
|
||||
case val, ok := <-LineProtocolMessages:
|
||||
if !ok {
|
||||
// Channel closed, exit gracefully
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch the frequency of the metric from the global configuration
|
||||
freq, err := GetMetricFrequency(val.MetricName)
|
||||
if err != nil {
|
||||
@@ -65,9 +108,11 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
oldSelector = slices.Clone(selector)
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -408,7 +408,6 @@ func (m *MemoryStore) FromCheckpointFiles(dir string, from int64) (int, error) {
|
||||
return m.FromCheckpoint(dir, from, altFormat)
|
||||
}
|
||||
|
||||
cclog.Print("[METRICSTORE]> No valid checkpoint files found in the directory")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -19,36 +19,49 @@ const (
|
||||
DefaultAvroCheckpointInterval = time.Minute
|
||||
)
|
||||
|
||||
type MetricStoreConfig struct {
|
||||
// Number of concurrent workers for checkpoint and archive operations.
|
||||
// If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10)
|
||||
NumWorkers int `json:"num-workers"`
|
||||
Checkpoints struct {
|
||||
type Checkpoints struct {
|
||||
FileFormat string `json:"file-format"`
|
||||
Interval string `json:"interval"`
|
||||
RootDir string `json:"directory"`
|
||||
Restore string `json:"restore"`
|
||||
} `json:"checkpoints"`
|
||||
Debug struct {
|
||||
}
|
||||
|
||||
type Debug struct {
|
||||
DumpToFile string `json:"dump-to-file"`
|
||||
EnableGops bool `json:"gops"`
|
||||
} `json:"debug"`
|
||||
RetentionInMemory string `json:"retention-in-memory"`
|
||||
Archive struct {
|
||||
Interval string `json:"interval"`
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
ArchiveInterval string `json:"interval"`
|
||||
RootDir string `json:"directory"`
|
||||
DeleteInstead bool `json:"delete-instead"`
|
||||
} `json:"archive"`
|
||||
Subscriptions []struct {
|
||||
}
|
||||
|
||||
type Subscriptions []struct {
|
||||
// Channel name
|
||||
SubscribeTo string `json:"subscribe-to"`
|
||||
|
||||
// Allow lines without a cluster tag, use this as default, optional
|
||||
ClusterTag string `json:"cluster-tag"`
|
||||
} `json:"subscriptions"`
|
||||
}
|
||||
|
||||
var Keys MetricStoreConfig
|
||||
type MetricStoreConfig struct {
|
||||
// Number of concurrent workers for checkpoint and archive operations.
|
||||
// If not set or 0, defaults to min(runtime.NumCPU()/2+1, 10)
|
||||
NumWorkers int `json:"num-workers"`
|
||||
RetentionInMemory string `json:"retention-in-memory"`
|
||||
MemoryCap int `json:"memory-cap"`
|
||||
Checkpoints Checkpoints `json:"checkpoints"`
|
||||
Debug *Debug `json:"debug"`
|
||||
Archive *Archive `json:"archive"`
|
||||
Subscriptions *Subscriptions `json:"nats-subscriptions"`
|
||||
}
|
||||
|
||||
var Keys MetricStoreConfig = MetricStoreConfig{
|
||||
Checkpoints: Checkpoints{
|
||||
FileFormat: "avro",
|
||||
RootDir: "./var/checkpoints",
|
||||
},
|
||||
}
|
||||
|
||||
// AggregationStrategy for aggregation over multiple values at different cpus/sockets/..., not time!
|
||||
type AggregationStrategy int
|
||||
|
||||
@@ -9,6 +9,10 @@ const configSchema = `{
|
||||
"type": "object",
|
||||
"description": "Configuration specific to built-in metric-store.",
|
||||
"properties": {
|
||||
"num-workers": {
|
||||
"description": "Number of concurrent workers for checkpoint and archive operations",
|
||||
"type": "integer"
|
||||
},
|
||||
"checkpoints": {
|
||||
"description": "Configuration for checkpointing the metrics within metric-store",
|
||||
"type": "object",
|
||||
@@ -24,12 +28,9 @@ const configSchema = `{
|
||||
"directory": {
|
||||
"description": "Specify the parent directy in which the checkpointed files should be placed.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"restore": {
|
||||
"description": "When cc-backend starts up, look for checkpointed files that are less than X hours old and load metrics from these selected checkpoint files.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
"required": ["interval"]
|
||||
},
|
||||
"archive": {
|
||||
"description": "Configuration for archiving the already checkpointed files.",
|
||||
@@ -40,38 +41,21 @@ const configSchema = `{
|
||||
"type": "string"
|
||||
},
|
||||
"directory": {
|
||||
"description": "Specify the parent directy in which the archived files should be placed.",
|
||||
"description": "Specify the directy in which the archived files should be placed.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["interval", "directory"]
|
||||
},
|
||||
"retention-in-memory": {
|
||||
"description": "Keep the metrics within memory for given time interval. Retention for X hours, then the metrics would be freed.",
|
||||
"type": "string"
|
||||
},
|
||||
"nats": {
|
||||
"description": "Configuration for accepting published data through NATS.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"description": "Address of the NATS server.",
|
||||
"type": "string"
|
||||
"memory-cap": {
|
||||
"description": "Upper memory capacity limit used by metricstore in GB",
|
||||
"type": "integer"
|
||||
},
|
||||
"username": {
|
||||
"description": "Optional: If configured with username/password method.",
|
||||
"type": "string"
|
||||
},
|
||||
"password": {
|
||||
"description": "Optional: If configured with username/password method.",
|
||||
"type": "string"
|
||||
},
|
||||
"creds-file-path": {
|
||||
"description": "Optional: If configured with Credential File method. Path to your NATS cred file.",
|
||||
"type": "string"
|
||||
},
|
||||
"subscriptions": {
|
||||
"nats-subscriptions": {
|
||||
"description": "Array of various subscriptions. Allows to subscibe to different subjects and publishers.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -88,8 +72,6 @@ const configSchema = `{
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["checkpoints", "retention-in-memory"]
|
||||
}`
|
||||
|
||||
@@ -29,29 +29,30 @@ func ReceiveNats(ms *MemoryStore,
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
msgs := make(chan []byte, workers*2)
|
||||
|
||||
for _, sc := range Keys.Subscriptions {
|
||||
for _, sc := range *Keys.Subscriptions {
|
||||
clusterTag := sc.ClusterTag
|
||||
if workers > 1 {
|
||||
wg.Add(workers)
|
||||
|
||||
for range workers {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for m := range msgs {
|
||||
dec := lineprotocol.NewDecoderWithBytes(m)
|
||||
if err := DecodeLine(dec, ms, clusterTag); err != nil {
|
||||
cclog.Errorf("error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
nc.Subscribe(sc.SubscribeTo, func(subject string, data []byte) {
|
||||
msgs <- data
|
||||
select {
|
||||
case msgs <- data:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
})
|
||||
} else {
|
||||
nc.Subscribe(sc.SubscribeTo, func(subject string, data []byte) {
|
||||
@@ -64,7 +65,11 @@ func ReceiveNats(ms *MemoryStore,
|
||||
cclog.Infof("NATS subscription to '%s' established", sc.SubscribeTo)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
close(msgs)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -44,6 +45,15 @@ var (
|
||||
shutdownFunc context.CancelFunc
|
||||
)
|
||||
|
||||
// NodeProvider provides information about nodes currently in use by running jobs.
|
||||
// This interface allows metricstore to query job information without directly
|
||||
// depending on the repository package, breaking the import cycle.
|
||||
type NodeProvider interface {
|
||||
// GetUsedNodes returns a map of cluster names to sorted lists of unique hostnames
|
||||
// that are currently in use by jobs that started before the given timestamp.
|
||||
GetUsedNodes(ts int64) (map[string][]string, error)
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Name string
|
||||
Value schema.Float
|
||||
@@ -53,6 +63,7 @@ type Metric struct {
|
||||
type MemoryStore struct {
|
||||
Metrics map[string]MetricConfig
|
||||
root Level
|
||||
nodeProvider NodeProvider // Injected dependency for querying running jobs
|
||||
}
|
||||
|
||||
func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) {
|
||||
@@ -61,7 +72,7 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) {
|
||||
if rawConfig != nil {
|
||||
config.Validate(configSchema, rawConfig)
|
||||
dec := json.NewDecoder(bytes.NewReader(rawConfig))
|
||||
// dec.DisallowUnknownFields()
|
||||
dec.DisallowUnknownFields()
|
||||
if err := dec.Decode(&Keys); err != nil {
|
||||
cclog.Abortf("[METRICSTORE]> Metric Store Config Init: Could not decode config file '%s'.\nError: %s\n", rawConfig, err.Error())
|
||||
}
|
||||
@@ -103,7 +114,7 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) {
|
||||
|
||||
ms := GetMemoryStore()
|
||||
|
||||
d, err := time.ParseDuration(Keys.Checkpoints.Restore)
|
||||
d, err := time.ParseDuration(Keys.RetentionInMemory)
|
||||
if err != nil {
|
||||
cclog.Fatal(err)
|
||||
}
|
||||
@@ -128,7 +139,13 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) {
|
||||
|
||||
ctx, shutdown := context.WithCancel(context.Background())
|
||||
|
||||
wg.Add(4)
|
||||
retentionGoroutines := 1
|
||||
checkpointingGoroutines := 1
|
||||
dataStagingGoroutines := 1
|
||||
archivingGoroutines := 1
|
||||
|
||||
totalGoroutines := retentionGoroutines + checkpointingGoroutines + dataStagingGoroutines + archivingGoroutines
|
||||
wg.Add(totalGoroutines)
|
||||
|
||||
Retention(wg, ctx)
|
||||
Checkpointing(wg, ctx)
|
||||
@@ -141,10 +158,12 @@ func Init(rawConfig json.RawMessage, wg *sync.WaitGroup) {
|
||||
// Store the shutdown function for later use by Shutdown()
|
||||
shutdownFunc = shutdown
|
||||
|
||||
if Keys.Subscriptions != nil {
|
||||
err = ReceiveNats(ms, 1, ctx)
|
||||
if err != nil {
|
||||
cclog.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InitMetrics creates a new, initialized instance of a MemoryStore.
|
||||
@@ -183,12 +202,23 @@ func GetMemoryStore() *MemoryStore {
|
||||
return msInstance
|
||||
}
|
||||
|
||||
// SetNodeProvider sets the NodeProvider implementation for the MemoryStore.
|
||||
// This must be called during initialization to provide job state information
|
||||
// for selective buffer retention during Free operations.
|
||||
// If not set, the Free function will fall back to freeing all buffers.
|
||||
func (ms *MemoryStore) SetNodeProvider(provider NodeProvider) {
|
||||
ms.nodeProvider = provider
|
||||
}
|
||||
|
||||
func Shutdown() {
|
||||
// Cancel the context to signal all background goroutines to stop
|
||||
if shutdownFunc != nil {
|
||||
shutdownFunc()
|
||||
}
|
||||
|
||||
if Keys.Checkpoints.FileFormat != "json" {
|
||||
close(LineProtocolMessages)
|
||||
}
|
||||
|
||||
cclog.Infof("[METRICSTORE]> Writing to '%s'...\n", Keys.Checkpoints.RootDir)
|
||||
var files int
|
||||
var err error
|
||||
@@ -199,7 +229,6 @@ func Shutdown() {
|
||||
files, err = ms.ToCheckpoint(Keys.Checkpoints.RootDir, lastCheckpoint.Unix(), time.Now().Unix())
|
||||
} else {
|
||||
files, err = GetAvroStore().ToCheckpoint(Keys.Checkpoints.RootDir, true)
|
||||
close(LineProtocolMessages)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -248,18 +277,15 @@ func Retention(wg *sync.WaitGroup, ctx context.Context) {
|
||||
}
|
||||
|
||||
func Free(ms *MemoryStore, t time.Time) (int, error) {
|
||||
// jobRepo := repository.GetJobRepository()
|
||||
// excludeSelectors, err := jobRepo.GetUsedNodes(t.Unix())
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
// If no NodeProvider is configured, free all buffers older than t
|
||||
if ms.nodeProvider == nil {
|
||||
return ms.Free(nil, t.Unix())
|
||||
}
|
||||
|
||||
excludeSelectors := make(map[string][]string, 0)
|
||||
|
||||
// excludeSelectors := map[string][]string{
|
||||
// "alex": {"a0122", "a0123", "a0225"},
|
||||
// "fritz": {"f0201", "f0202"},
|
||||
// }
|
||||
excludeSelectors, err := ms.nodeProvider.GetUsedNodes(t.Unix())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch lenMap := len(excludeSelectors); lenMap {
|
||||
|
||||
@@ -314,11 +340,8 @@ func GetSelectors(ms *MemoryStore, excludeSelectors map[string][]string) [][]str
|
||||
// Check if the key exists in our exclusion map
|
||||
if excludedValues, exists := excludeSelectors[key]; exists {
|
||||
// The key exists, now check if the specific value is in the exclusion list
|
||||
for _, ev := range excludedValues {
|
||||
if ev == value {
|
||||
if slices.Contains(excludedValues, value) {
|
||||
exclude = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,9 +350,6 @@ func GetSelectors(ms *MemoryStore, excludeSelectors map[string][]string) [][]str
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.Printf("All selectors: %#v\n\n", allSelectors)
|
||||
// fmt.Printf("filteredSelectors: %#v\n\n", filteredSelectors)
|
||||
|
||||
return filteredSelectors
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,8 @@ func RegisterLdapSyncService(ds string) {
|
||||
|
||||
auth := auth.GetAuthInstance()
|
||||
|
||||
cclog.Info("Register LDAP sync service")
|
||||
cclog.Infof("register ldap sync service with %s interval", ds)
|
||||
|
||||
s.NewJob(gocron.DurationJob(interval),
|
||||
gocron.NewTask(
|
||||
func() {
|
||||
@@ -32,6 +33,5 @@ func RegisterLdapSyncService(ds string) {
|
||||
if err := auth.LdapAuth.Sync(); err != nil {
|
||||
cclog.Errorf("ldap sync failed: %s", err.Error())
|
||||
}
|
||||
cclog.Print("ldap sync done")
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultCompressOlderThan = 7
|
||||
)
|
||||
|
||||
// Retention defines the configuration for job retention policies.
|
||||
type Retention struct {
|
||||
Policy string `json:"policy"`
|
||||
@@ -60,6 +64,38 @@ func parseDuration(s string) (time.Duration, error) {
|
||||
return interval, nil
|
||||
}
|
||||
|
||||
func initArchiveServices(config json.RawMessage) {
|
||||
var cfg struct {
|
||||
Retention Retention `json:"retention"`
|
||||
Compression int `json:"compression"`
|
||||
}
|
||||
cfg.Retention.IncludeDB = true
|
||||
|
||||
if err := json.Unmarshal(config, &cfg); err != nil {
|
||||
cclog.Errorf("error while unmarshaling raw config json: %v", err)
|
||||
}
|
||||
|
||||
switch cfg.Retention.Policy {
|
||||
case "delete":
|
||||
RegisterRetentionDeleteService(
|
||||
cfg.Retention.Age,
|
||||
cfg.Retention.IncludeDB,
|
||||
cfg.Retention.OmitTagged)
|
||||
case "move":
|
||||
RegisterRetentionMoveService(
|
||||
cfg.Retention.Age,
|
||||
cfg.Retention.IncludeDB,
|
||||
cfg.Retention.Location,
|
||||
cfg.Retention.OmitTagged)
|
||||
}
|
||||
|
||||
if cfg.Compression > 0 {
|
||||
RegisterCompressionService(cfg.Compression)
|
||||
} else {
|
||||
RegisterCompressionService(DefaultCompressOlderThan)
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes the task manager, parses configurations, and registers background tasks.
|
||||
// It starts the gocron scheduler.
|
||||
func Start(cronCfg, archiveConfig json.RawMessage) {
|
||||
@@ -80,32 +116,11 @@ func Start(cronCfg, archiveConfig json.RawMessage) {
|
||||
cclog.Errorf("error while decoding cron config: %v", err)
|
||||
}
|
||||
|
||||
var cfg struct {
|
||||
Retention Retention `json:"retention"`
|
||||
Compression int `json:"compression"`
|
||||
}
|
||||
cfg.Retention.IncludeDB = true
|
||||
|
||||
if err := json.Unmarshal(archiveConfig, &cfg); err != nil {
|
||||
cclog.Warn("Error while unmarshaling raw config json")
|
||||
}
|
||||
|
||||
switch cfg.Retention.Policy {
|
||||
case "delete":
|
||||
RegisterRetentionDeleteService(
|
||||
cfg.Retention.Age,
|
||||
cfg.Retention.IncludeDB,
|
||||
cfg.Retention.OmitTagged)
|
||||
case "move":
|
||||
RegisterRetentionMoveService(
|
||||
cfg.Retention.Age,
|
||||
cfg.Retention.IncludeDB,
|
||||
cfg.Retention.Location,
|
||||
cfg.Retention.OmitTagged)
|
||||
}
|
||||
|
||||
if cfg.Compression > 0 {
|
||||
RegisterCompressionService(cfg.Compression)
|
||||
if archiveConfig != nil {
|
||||
initArchiveServices(archiveConfig)
|
||||
} else {
|
||||
// Always enable compression
|
||||
RegisterCompressionService(DefaultCompressOlderThan)
|
||||
}
|
||||
|
||||
lc := auth.Keys.LdapConfig
|
||||
|
||||
@@ -77,7 +77,7 @@ type MessageHandler func(subject string, data []byte)
|
||||
func Connect() {
|
||||
clientOnce.Do(func() {
|
||||
if Keys.Address == "" {
|
||||
cclog.Warn("NATS: no address configured, skipping connection")
|
||||
cclog.Info("NATS: no address configured, skipping connection")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -77,14 +77,14 @@ type PlotConfiguration struct {
|
||||
var UIDefaults = WebConfig{
|
||||
JobList: JobListConfig{
|
||||
UsePaging: false,
|
||||
ShowFootprint: true,
|
||||
ShowFootprint: false,
|
||||
},
|
||||
NodeList: NodeListConfig{
|
||||
UsePaging: true,
|
||||
},
|
||||
JobView: JobViewConfig{
|
||||
ShowPolarPlot: true,
|
||||
ShowFootprint: true,
|
||||
ShowFootprint: false,
|
||||
ShowRoofline: true,
|
||||
ShowStatTable: true,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user