diff --git a/cmd/cc-backend/server.go b/cmd/cc-backend/server.go
index 4e6120e9..b37d3450 100644
--- a/cmd/cc-backend/server.go
+++ b/cmd/cc-backend/server.go
@@ -18,6 +18,7 @@ import (
"net/http"
"os"
"strings"
+ "sync"
"time"
"github.com/99designs/gqlgen/graphql"
@@ -399,16 +400,6 @@ func (s *Server) Start(ctx context.Context) error {
return fmt.Errorf("dropping privileges: %w", err)
}
- // Handle context cancellation for graceful shutdown
- go func() {
- <-ctx.Done()
- shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- if err := s.server.Shutdown(shutdownCtx); err != nil {
- cclog.Errorf("Server shutdown error: %v", err)
- }
- }()
-
if err = s.server.Serve(listener); err != nil && err != http.ErrServerClosed {
return fmt.Errorf("server failed: %w", err)
}
@@ -416,8 +407,7 @@ func (s *Server) Start(ctx context.Context) error {
}
func (s *Server) Shutdown(ctx context.Context) {
- // Create a shutdown context with timeout
- shutdownCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ shutdownCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
nc := nats.GetClient()
@@ -425,20 +415,36 @@ func (s *Server) Shutdown(ctx context.Context) {
nc.Close()
}
- // First shut down the server gracefully (waiting for all ongoing requests)
if err := s.server.Shutdown(shutdownCtx); err != nil {
cclog.Errorf("Server shutdown error: %v", err)
}
- // Archive all the metric store data
- ms := metricstore.GetMemoryStore()
+ // Run metricstore and archiver shutdown concurrently.
+ // They are independent: metricstore writes .bin snapshots,
+ // archiver flushes pending job archives.
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ var wg sync.WaitGroup
- if ms != nil {
- metricstore.Shutdown()
- }
+ if ms := metricstore.GetMemoryStore(); ms != nil {
+ wg.Go(func() {
+ metricstore.Shutdown()
+ })
+ }
- // Shutdown archiver with 10 second timeout for fast shutdown
- if err := archiver.Shutdown(10 * time.Second); err != nil {
- cclog.Warnf("Archiver shutdown: %v", err)
+ wg.Go(func() {
+ if err := archiver.Shutdown(10 * time.Second); err != nil {
+ cclog.Warnf("Archiver shutdown: %v", err)
+ }
+ })
+
+ wg.Wait()
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ cclog.Warn("Shutdown deadline exceeded, forcing exit")
}
}
diff --git a/internal/repository/jobQuery.go b/internal/repository/jobQuery.go
index 36c5892e..5256bacb 100644
--- a/internal/repository/jobQuery.go
+++ b/internal/repository/jobQuery.go
@@ -280,11 +280,11 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
// buildIntCondition creates clauses for integer range filters, using BETWEEN only if required.
func buildIntCondition(field string, cond *config.IntRange, query sq.SelectBuilder) sq.SelectBuilder {
- if cond.From != 1 && cond.To != 0 {
+ if cond.From > 0 && cond.To > 0 {
return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To)
- } else if cond.From != 1 && cond.To == 0 {
+ } else if cond.From > 0 && cond.To == 0 {
return query.Where(field+" >= ?", cond.From)
- } else if cond.From == 1 && cond.To != 0 {
+ } else if cond.From == 0 && cond.To > 0 {
return query.Where(field+" <= ?", cond.To)
} else {
return query
@@ -293,11 +293,11 @@ func buildIntCondition(field string, cond *config.IntRange, query sq.SelectBuild
// buildFloatCondition creates a clauses for float range filters, using BETWEEN only if required.
func buildFloatCondition(field string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder {
- if cond.From != 1.0 && cond.To != 0.0 {
+ if cond.From > 0.0 && cond.To > 0.0 {
return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To)
- } else if cond.From != 1.0 && cond.To == 0.0 {
+ } else if cond.From > 0.0 && cond.To == 0.0 {
return query.Where(field+" >= ?", cond.From)
- } else if cond.From == 1.0 && cond.To != 0.0 {
+ } else if cond.From == 0.0 && cond.To > 0.0 {
return query.Where(field+" <= ?", cond.To)
} else {
return query
@@ -339,11 +339,11 @@ func buildTimeCondition(field string, cond *config.TimeRange, query sq.SelectBui
// buildFloatJSONCondition creates a filter on a numeric field within the footprint JSON column, using BETWEEN only if required.
func buildFloatJSONCondition(jsonField string, cond *model.FloatRange, query sq.SelectBuilder) sq.SelectBuilder {
query = query.Where("JSON_VALID(footprint)")
- if cond.From != 1.0 && cond.To != 0.0 {
+ if cond.From > 0.0 && cond.To > 0.0 {
return query.Where("JSON_EXTRACT(footprint, \"$."+jsonField+"\") BETWEEN ? AND ?", cond.From, cond.To)
- } else if cond.From != 1.0 && cond.To == 0.0 {
+ } else if cond.From > 0.0 && cond.To == 0.0 {
return query.Where("JSON_EXTRACT(footprint, \"$."+jsonField+"\") >= ?", cond.From)
- } else if cond.From == 1.0 && cond.To != 0.0 {
+ } else if cond.From == 0.0 && cond.To > 0.0 {
return query.Where("JSON_EXTRACT(footprint, \"$."+jsonField+"\") <= ?", cond.To)
} else {
return query
diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go
index e24038e2..78bab931 100644
--- a/internal/routerConfig/routes.go
+++ b/internal/routerConfig/routes.go
@@ -308,7 +308,7 @@ func buildFilterPresets(query url.Values) map[string]any {
if parts[0] == "lessthan" {
lt, lte := strconv.Atoi(parts[1])
if lte == nil {
- filterPresets["numNodes"] = map[string]int{"from": 1, "to": lt}
+ filterPresets["numNodes"] = map[string]int{"from": 0, "to": lt}
}
} else if parts[0] == "morethan" {
mt, mte := strconv.Atoi(parts[1])
@@ -330,7 +330,7 @@ func buildFilterPresets(query url.Values) map[string]any {
if parts[0] == "lessthan" {
lt, lte := strconv.Atoi(parts[1])
if lte == nil {
- filterPresets["numHWThreads"] = map[string]int{"from": 1, "to": lt}
+ filterPresets["numHWThreads"] = map[string]int{"from": 0, "to": lt}
}
} else if parts[0] == "morethan" {
mt, mte := strconv.Atoi(parts[1])
@@ -352,7 +352,7 @@ func buildFilterPresets(query url.Values) map[string]any {
if parts[0] == "lessthan" {
lt, lte := strconv.Atoi(parts[1])
if lte == nil {
- filterPresets["numAccelerators"] = map[string]int{"from": 1, "to": lt}
+ filterPresets["numAccelerators"] = map[string]int{"from": 0, "to": lt}
}
} else if parts[0] == "morethan" {
mt, mte := strconv.Atoi(parts[1])
@@ -408,7 +408,7 @@ func buildFilterPresets(query url.Values) map[string]any {
if parts[0] == "lessthan" {
lt, lte := strconv.Atoi(parts[1])
if lte == nil {
- filterPresets["energy"] = map[string]int{"from": 1, "to": lt}
+ filterPresets["energy"] = map[string]int{"from": 0, "to": lt}
}
} else if parts[0] == "morethan" {
mt, mte := strconv.Atoi(parts[1])
@@ -434,7 +434,7 @@ func buildFilterPresets(query url.Values) map[string]any {
if lte == nil {
statEntry := map[string]any{
"field": parts[0],
- "from": 1,
+ "from": 0,
"to": lt,
}
statList = append(statList, statEntry)
diff --git a/pkg/metricstore/walCheckpoint.go b/pkg/metricstore/walCheckpoint.go
index 38585cf5..57064e5f 100644
--- a/pkg/metricstore/walCheckpoint.go
+++ b/pkg/metricstore/walCheckpoint.go
@@ -69,6 +69,7 @@ import (
"strings"
"sync"
"sync/atomic"
+ "time"
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
"github.com/ClusterCockpit/cc-lib/v2/schema"
@@ -111,10 +112,16 @@ type walRotateReq struct {
// walFileState holds an open WAL file handle and buffered writer for one host directory.
type walFileState struct {
- f *os.File
- w *bufio.Writer
+ f *os.File
+ w *bufio.Writer
+ dirty bool
}
+// walFlushInterval controls how often dirty WAL files are flushed to disk.
+// Decoupling flushes from message processing lets the consumer run at memory
+// speed, amortizing syscall overhead across many writes.
+const walFlushInterval = 5 * time.Second
+
// walShardIndex computes which shard a message belongs to based on cluster+node.
// Uses FNV-1a hash for fast, well-distributed mapping.
func walShardIndex(cluster, node string) int {
@@ -222,6 +229,7 @@ func WALStaging(wg *sync.WaitGroup, ctx context.Context) {
if err := writeWALRecordDirect(ws.w, msg); err != nil {
cclog.Errorf("[METRICSTORE]> WAL: write record: %v", err)
}
+ ws.dirty = true
}
processRotate := func(req walRotateReq) {
@@ -238,10 +246,11 @@ func WALStaging(wg *sync.WaitGroup, ctx context.Context) {
close(req.done)
}
- flushAll := func() {
+ flushDirty := func() {
for _, ws := range hostFiles {
- if ws.f != nil {
+ if ws.dirty {
ws.w.Flush()
+ ws.dirty = false
}
}
}
@@ -257,12 +266,35 @@ func WALStaging(wg *sync.WaitGroup, ctx context.Context) {
case req := <-rotateCh:
processRotate(req)
default:
- flushAll()
+ flushDirty()
return
}
}
}
+ ticker := time.NewTicker(walFlushInterval)
+ defer ticker.Stop()
+
+ // drainBatch processes up to 4096 pending messages without blocking.
+ // Returns false if the channel was closed.
+ drainBatch := func() bool {
+ for range 4096 {
+ select {
+ case msg, ok := <-msgCh:
+ if !ok {
+ flushDirty()
+ return false
+ }
+ processMsg(msg)
+ case req := <-rotateCh:
+ processRotate(req)
+ default:
+ return true
+ }
+ }
+ return true
+ }
+
for {
select {
case <-ctx.Done():
@@ -273,23 +305,12 @@ func WALStaging(wg *sync.WaitGroup, ctx context.Context) {
return
}
processMsg(msg)
-
- // Drain up to 256 more messages without blocking to batch writes.
- for range 256 {
- select {
- case msg, ok := <-msgCh:
- if !ok {
- return
- }
- processMsg(msg)
- case req := <-rotateCh:
- processRotate(req)
- default:
- goto flushed
- }
+ if !drainBatch() {
+ return
}
- flushed:
- flushAll()
+ // No flush here — timer handles periodic flushing.
+ case <-ticker.C:
+ flushDirty()
case req := <-rotateCh:
processRotate(req)
}
@@ -413,69 +434,6 @@ func writeWALRecordDirect(w *bufio.Writer, msg *WALMessage) error {
return err
}
-// buildWALPayload encodes a WALMessage into a binary payload (without magic/length/CRC).
-func buildWALPayload(msg *WALMessage) []byte {
- size := 8 + 2 + len(msg.MetricName) + 1 + 4
- for _, s := range msg.Selector {
- size += 1 + len(s)
- }
-
- buf := make([]byte, 0, size)
-
- // Timestamp (8 bytes, little-endian int64)
- var ts [8]byte
- binary.LittleEndian.PutUint64(ts[:], uint64(msg.Timestamp))
- buf = append(buf, ts[:]...)
-
- // Metric name (2-byte length prefix + bytes)
- var mLen [2]byte
- binary.LittleEndian.PutUint16(mLen[:], uint16(len(msg.MetricName)))
- buf = append(buf, mLen[:]...)
- buf = append(buf, msg.MetricName...)
-
- // Selector count (1 byte)
- buf = append(buf, byte(len(msg.Selector)))
-
- // Selectors (1-byte length prefix + bytes each)
- for _, sel := range msg.Selector {
- buf = append(buf, byte(len(sel)))
- buf = append(buf, sel...)
- }
-
- // Value (4 bytes, float32 bit representation)
- var val [4]byte
- binary.LittleEndian.PutUint32(val[:], math.Float32bits(float32(msg.Value)))
- buf = append(buf, val[:]...)
-
- return buf
-}
-
-// writeWALRecord appends a binary WAL record to the writer.
-// Format: [4B magic][4B payload_len][payload][4B CRC32]
-func writeWALRecord(w io.Writer, msg *WALMessage) error {
- payload := buildWALPayload(msg)
- crc := crc32.ChecksumIEEE(payload)
-
- record := make([]byte, 0, 4+4+len(payload)+4)
-
- var magic [4]byte
- binary.LittleEndian.PutUint32(magic[:], walRecordMagic)
- record = append(record, magic[:]...)
-
- var pLen [4]byte
- binary.LittleEndian.PutUint32(pLen[:], uint32(len(payload)))
- record = append(record, pLen[:]...)
-
- record = append(record, payload...)
-
- var crcBytes [4]byte
- binary.LittleEndian.PutUint32(crcBytes[:], crc)
- record = append(record, crcBytes[:]...)
-
- _, err := w.Write(record)
- return err
-}
-
// readWALRecord reads one WAL record from the reader.
// Returns (nil, nil) on clean EOF. Returns error on data corruption.
// A CRC mismatch indicates a truncated trailing record (expected on crash).
diff --git a/web/frontend/src/generic/Filters.svelte b/web/frontend/src/generic/Filters.svelte
index 02f801a0..3c0091ad 100644
--- a/web/frontend/src/generic/Filters.svelte
+++ b/web/frontend/src/generic/Filters.svelte
@@ -166,12 +166,12 @@
items.push({ project: { [filters.projectMatch]: filters.project } });
if (filters.user)
items.push({ user: { [filters.userMatch]: filters.user } });
- if (filters.numNodes.from != null || filters.numNodes.to != null) {
+ if (filters.numNodes.from != null && filters.numNodes.to != null) {
items.push({
numNodes: { from: filters.numNodes.from, to: filters.numNodes.to },
});
}
- if (filters.numAccelerators.from != null || filters.numAccelerators.to != null) {
+ if (filters.numAccelerators.from != null && filters.numAccelerators.to != null) {
items.push({
numAccelerators: {
from: filters.numAccelerators.from,
@@ -179,7 +179,7 @@
},
});
}
- if (filters.numHWThreads.from != null || filters.numHWThreads.to != null) {
+ if (filters.numHWThreads.from != null && filters.numHWThreads.to != null) {
items.push({
numHWThreads: {
from: filters.numHWThreads.from,
@@ -206,14 +206,21 @@
items.push({ duration: { to: filters.duration.lessThan, from: 0 } });
if (filters.duration.moreThan)
items.push({ duration: { to: 0, from: filters.duration.moreThan } });
- if (filters.energy.from != null || filters.energy.to != null)
+ if (filters.energy.from != null && filters.energy.to != null)
items.push({
energy: { from: filters.energy.from, to: filters.energy.to },
});
if (filters.jobId)
items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } });
- if (filters.stats.length != 0)
- items.push({ metricStats: filters.stats.map((st) => { return { metricName: st.field, range: { from: st.from, to: st.to }} }) });
+ if (filters.stats.length != 0) {
+ const metricStats = [];
+ filters.stats.forEach((st) => {
+ if (st.from != null && st.to != null)
+ metricStats.push({ metricName: st.field, range: { from: st.from, to: st.to }});
+ });
+ if (metricStats.length != 0)
+ items.push({metricStats})
+ };
if (filters.node) items.push({ node: { [filters.nodeMatch]: filters.node } });
if (filters.jobName) items.push({ jobName: { contains: filters.jobName } });
if (filters.schedule) items.push({ schedule: filters.schedule });
@@ -280,40 +287,40 @@
opts.push(`duration=morethan-${filters.duration.moreThan}`);
if (filters.tags.length != 0)
for (let tag of filters.tags) opts.push(`tag=${tag}`);
- if (filters.numNodes.from > 1 && filters.numNodes.to > 0)
+ if (filters.numNodes.from > 0 && filters.numNodes.to > 0)
opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`);
- else if (filters.numNodes.from > 1 && filters.numNodes.to == 0)
+ else if (filters.numNodes.from > 0 && filters.numNodes.to == 0)
opts.push(`numNodes=morethan-${filters.numNodes.from}`);
- else if (filters.numNodes.from == 1 && filters.numNodes.to > 0)
+ else if (filters.numNodes.from == 0 && filters.numNodes.to > 0)
opts.push(`numNodes=lessthan-${filters.numNodes.to}`);
- if (filters.numHWThreads.from > 1 && filters.numHWThreads.to > 0)
+ if (filters.numHWThreads.from > 0 && filters.numHWThreads.to > 0)
opts.push(`numHWThreads=${filters.numHWThreads.from}-${filters.numHWThreads.to}`);
- else if (filters.numHWThreads.from > 1 && filters.numHWThreads.to == 0)
+ else if (filters.numHWThreads.from > 0 && filters.numHWThreads.to == 0)
opts.push(`numHWThreads=morethan-${filters.numHWThreads.from}`);
- else if (filters.numHWThreads.from == 1 && filters.numHWThreads.to > 0)
+ else if (filters.numHWThreads.from == 0 && filters.numHWThreads.to > 0)
opts.push(`numHWThreads=lessthan-${filters.numHWThreads.to}`);
- if (filters.numAccelerators.from && filters.numAccelerators.to)
+ if (filters.numAccelerators.from > 0 && filters.numAccelerators.to > 0)
opts.push(`numAccelerators=${filters.numAccelerators.from}-${filters.numAccelerators.to}`);
- else if (filters.numAccelerators.from > 1 && filters.numAccelerators.to == 0)
+ else if (filters.numAccelerators.from > 0 && filters.numAccelerators.to == 0)
opts.push(`numAccelerators=morethan-${filters.numAccelerators.from}`);
- else if (filters.numAccelerators.from == 1 && filters.numAccelerators.to > 0)
+ else if (filters.numAccelerators.from == 0 && filters.numAccelerators.to > 0)
opts.push(`numAccelerators=lessthan-${filters.numAccelerators.to}`);
if (filters.node) opts.push(`node=${filters.node}`);
if (filters.node && filters.nodeMatch != "eq") // "eq" is default-case
opts.push(`nodeMatch=${filters.nodeMatch}`);
- if (filters.energy.from > 1 && filters.energy.to > 0)
+ if (filters.energy.from > 0 && filters.energy.to > 0)
opts.push(`energy=${filters.energy.from}-${filters.energy.to}`);
- else if (filters.energy.from > 1 && filters.energy.to == 0)
+ else if (filters.energy.from > 0 && filters.energy.to == 0)
opts.push(`energy=morethan-${filters.energy.from}`);
- else if (filters.energy.from == 1 && filters.energy.to > 0)
+ else if (filters.energy.from == 0 && filters.energy.to > 0)
opts.push(`energy=lessthan-${filters.energy.to}`);
if (filters.stats.length > 0)
for (let stat of filters.stats) {
- if (stat.from > 1 && stat.to > 0)
+ if (stat.from > 0 && stat.to > 0)
opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`);
- else if (stat.from > 1 && stat.to == 0)
+ else if (stat.from > 0 && stat.to == 0)
opts.push(`stat=${stat.field}-morethan-${stat.from}`);
- else if (stat.from == 1 && stat.to > 0)
+ else if (stat.from == 0 && stat.to > 0)
opts.push(`stat=${stat.field}-lessthan-${stat.to}`);
}
// Build && Return
@@ -511,43 +518,43 @@
{/if}
- {#if filters.numNodes.from > 1 && filters.numNodes.to > 0}
+ {#if filters.numNodes.from > 0 && filters.numNodes.to > 0}
(isResourcesOpen = true)}>
Nodes: {filters.numNodes.from} - {filters.numNodes.to}
- {:else if filters.numNodes.from > 1 && filters.numNodes.to == 0}
+ {:else if filters.numNodes.from > 0 && filters.numNodes.to == 0}
(isResourcesOpen = true)}>
≥ {filters.numNodes.from} Node(s)
- {:else if filters.numNodes.from == 1 && filters.numNodes.to > 0}
+ {:else if filters.numNodes.from == 0 && filters.numNodes.to > 0}
(isResourcesOpen = true)}>
≤ {filters.numNodes.to} Node(s)
{/if}
- {#if filters.numHWThreads.from > 1 && filters.numHWThreads.to > 0}
+ {#if filters.numHWThreads.from > 0 && filters.numHWThreads.to > 0}
(isResourcesOpen = true)}>
HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to}
- {:else if filters.numHWThreads.from > 1 && filters.numHWThreads.to == 0}
+ {:else if filters.numHWThreads.from > 0 && filters.numHWThreads.to == 0}
(isResourcesOpen = true)}>
≥ {filters.numHWThreads.from} HWThread(s)
- {:else if filters.numHWThreads.from == 1 && filters.numHWThreads.to > 0}
+ {:else if filters.numHWThreads.from == 0 && filters.numHWThreads.to > 0}
(isResourcesOpen = true)}>
≤ {filters.numHWThreads.to} HWThread(s)
{/if}
- {#if filters.numAccelerators.from > 1 && filters.numAccelerators.to > 0}
+ {#if filters.numAccelerators.from > 0 && filters.numAccelerators.to > 0}
(isResourcesOpen = true)}>
Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to}
- {:else if filters.numAccelerators.from > 1 && filters.numAccelerators.to == 0}
+ {:else if filters.numAccelerators.from > 0 && filters.numAccelerators.to == 0}
(isResourcesOpen = true)}>
≥ {filters.numAccelerators.from} Acc(s)
- {:else if filters.numAccelerators.from == 1 && filters.numAccelerators.to > 0}
+ {:else if filters.numAccelerators.from == 0 && filters.numAccelerators.to > 0}
(isResourcesOpen = true)}>
≤ {filters.numAccelerators.to} Acc(s)
@@ -559,15 +566,15 @@
{/if}
- {#if filters.energy.from > 1 && filters.energy.to > 0}
+ {#if filters.energy.from > 0 && filters.energy.to > 0}
(isEnergyOpen = true)}>
Total Energy: {filters.energy.from} - {filters.energy.to} kWh
- {:else if filters.energy.from > 1 && filters.energy.to == 0}
+ {:else if filters.energy.from > 0 && filters.energy.to == 0}
(isEnergyOpen = true)}>
Total Energy ≥ {filters.energy.from} kWh
- {:else if filters.energy.from == 1 && filters.energy.to > 0}
+ {:else if filters.energy.from == 0 && filters.energy.to > 0}
(isEnergyOpen = true)}>
Total Energy ≤ {filters.energy.to} kWh
@@ -575,15 +582,15 @@
{#if filters.stats.length > 0}
{#each filters.stats as stat}
- {#if stat.from > 1 && stat.to > 0}
+ {#if stat.from > 0 && stat.to > 0}
(isStatsOpen = true)}>
{stat.field}: {stat.from} - {stat.to} {stat.unit}
- {:else if stat.from > 1 && stat.to == 0}
+ {:else if stat.from > 0 && stat.to == 0}
(isStatsOpen = true)}>
{stat.field} ≥ {stat.from} {stat.unit}
- {:else if stat.from == 1 && stat.to > 0}
+ {:else if stat.from == 0 && stat.to > 0}
(isStatsOpen = true)}>
{stat.field} ≤ {stat.to} {stat.unit}
diff --git a/web/frontend/src/generic/filters/Energy.svelte b/web/frontend/src/generic/filters/Energy.svelte
index dc532c86..c8013b1b 100644
--- a/web/frontend/src/generic/filters/Energy.svelte
+++ b/web/frontend/src/generic/filters/Energy.svelte
@@ -28,31 +28,29 @@
} = $props();
/* Const */
- const minEnergyPreset = 1;
+ const minEnergyPreset = 0;
const maxEnergyPreset = 100;
/* Derived */
// Pending
let pendingEnergyState = $derived({
- from: presetEnergy?.from ? presetEnergy.from : minEnergyPreset,
- to: !(presetEnergy.to == null || presetEnergy.to == 0) ? presetEnergy.to : maxEnergyPreset,
+ from: presetEnergy?.from || minEnergyPreset,
+ to: (presetEnergy.to == 0) ? null : presetEnergy.to,
});
// Changable
let energyState = $derived({
- from: presetEnergy?.from ? presetEnergy.from : minEnergyPreset,
- to: !(presetEnergy.to == null || presetEnergy.to == 0) ? presetEnergy.to : maxEnergyPreset,
+ from: presetEnergy?.from || minEnergyPreset,
+ to: (presetEnergy.to == 0) ? null : presetEnergy.to,
});
- const energyActive = $derived(!(JSON.stringify(energyState) === JSON.stringify({ from: minEnergyPreset, to: maxEnergyPreset })));
- // Block Apply if null
- const disableApply = $derived(energyState.from === null || energyState.to === null);
+ const energyActive = $derived(!(JSON.stringify(energyState) === JSON.stringify({ from: minEnergyPreset, to: null })));
/* Function */
function setEnergy() {
if (energyActive) {
pendingEnergyState = {
- from: energyState.from,
- to: (energyState.to == maxEnergyPreset) ? 0 : energyState.to
+ from: (!energyState?.from) ? 0 : energyState.from,
+ to: (energyState.to === null) ? 0 : energyState.to
};
} else {
pendingEnergyState = { from: null, to: null};
@@ -86,7 +84,6 @@