mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-02-20 09:47:30 +01:00
Run go fix
This commit is contained in:
@@ -16,6 +16,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -692,13 +693,7 @@ func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error
|
||||
}
|
||||
|
||||
// Update clusters list if new
|
||||
found := false
|
||||
for _, c := range fsa.clusters {
|
||||
if c == name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(fsa.clusters, name)
|
||||
if !found {
|
||||
fsa.clusters = append(fsa.clusters, name)
|
||||
}
|
||||
|
||||
@@ -39,18 +39,18 @@ func (m *memTarget) WriteFile(name string, data []byte) error {
|
||||
|
||||
func makeTestJob(jobID int64) (*schema.Job, *schema.JobData) {
|
||||
meta := &schema.Job{
|
||||
JobID: jobID,
|
||||
Cluster: "testcluster",
|
||||
SubCluster: "sc0",
|
||||
Project: "testproject",
|
||||
User: "testuser",
|
||||
State: schema.JobStateCompleted,
|
||||
StartTime: 1700000000,
|
||||
Duration: 3600,
|
||||
Walltime: 7200,
|
||||
NumNodes: 2,
|
||||
JobID: jobID,
|
||||
Cluster: "testcluster",
|
||||
SubCluster: "sc0",
|
||||
Project: "testproject",
|
||||
User: "testuser",
|
||||
State: schema.JobStateCompleted,
|
||||
StartTime: 1700000000,
|
||||
Duration: 3600,
|
||||
Walltime: 7200,
|
||||
NumNodes: 2,
|
||||
NumHWThreads: 16,
|
||||
SMT: 1,
|
||||
SMT: 1,
|
||||
Resources: []*schema.Resource{
|
||||
{Hostname: "node001"},
|
||||
{Hostname: "node002"},
|
||||
@@ -141,7 +141,7 @@ func TestParquetWriterSingleBatch(t *testing.T) {
|
||||
target := newMemTarget()
|
||||
pw := NewParquetWriter(target, 512)
|
||||
|
||||
for i := int64(0); i < 5; i++ {
|
||||
for i := range int64(5) {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
@@ -179,7 +179,7 @@ func TestParquetWriterBatching(t *testing.T) {
|
||||
pw := NewParquetWriter(target, 0) // 0 MB means every job triggers a flush
|
||||
pw.maxSizeBytes = 1 // Force flush after every row
|
||||
|
||||
for i := int64(0); i < 3; i++ {
|
||||
for i := range int64(3) {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
@@ -263,7 +263,7 @@ func TestClusterAwareParquetWriter(t *testing.T) {
|
||||
cw.SetClusterConfig("alex", &schema.Cluster{Name: "alex"})
|
||||
|
||||
// Add jobs from different clusters
|
||||
for i := int64(0); i < 3; i++ {
|
||||
for i := range int64(3) {
|
||||
meta, data := makeTestJobForCluster(i, "fritz")
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
@@ -44,11 +45,11 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
var metricName strings.Builder
|
||||
for _, selectorName := range val.Selector {
|
||||
metricName += selectorName + SelectorDelimiter
|
||||
metricName.WriteString(selectorName + SelectorDelimiter)
|
||||
}
|
||||
metricName += val.MetricName
|
||||
metricName.WriteString(val.MetricName)
|
||||
|
||||
var selector []string
|
||||
selector = append(selector, val.Cluster, val.Node, strconv.FormatInt(freq, 10))
|
||||
@@ -62,7 +63,7 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
avroLevel.addMetric(metricName.String(), val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
default:
|
||||
// No more messages, exit
|
||||
@@ -82,13 +83,13 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
var metricName strings.Builder
|
||||
|
||||
for _, selectorName := range val.Selector {
|
||||
metricName += selectorName + SelectorDelimiter
|
||||
metricName.WriteString(selectorName + SelectorDelimiter)
|
||||
}
|
||||
|
||||
metricName += val.MetricName
|
||||
metricName.WriteString(val.MetricName)
|
||||
|
||||
// Create a new selector for the Avro level
|
||||
// The selector is a slice of strings that represents the path to the
|
||||
@@ -109,7 +110,7 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
avroLevel.addMetric(metricName.String(), val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestHealthCheck(t *testing.T) {
|
||||
// Setup test data for node003 - some metrics missing (no buffer)
|
||||
node003 := ms.root.findLevelOrCreate([]string{"testcluster", "node003"}, len(metrics))
|
||||
// Only create buffers for first two metrics
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
node003.metrics[i] = newBuffer(startTime, 10)
|
||||
for ts := startTime; ts <= now; ts += 10 {
|
||||
node003.metrics[i].write(ts, schema.Float(float64(i+1)))
|
||||
|
||||
Reference in New Issue
Block a user