mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-02-20 09:47:30 +01:00
Run go fix
This commit is contained in:
@@ -49,7 +49,7 @@ func TestRateLimiterBehavior(t *testing.T) {
|
||||
limiter := getIPUserLimiter(ip, username)
|
||||
|
||||
// Should allow first 5 attempts
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
if !limiter.Allow() {
|
||||
t.Errorf("Request %d should be allowed within rate limit", i+1)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
@@ -80,11 +81,12 @@ func extractNameFromClaims(claims jwt.MapClaims) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%v", vals[0])
|
||||
var name strings.Builder
|
||||
name.WriteString(fmt.Sprintf("%v", vals[0]))
|
||||
for i := 1; i < len(vals); i++ {
|
||||
name += fmt.Sprintf(" %v", vals[i])
|
||||
name.WriteString(fmt.Sprintf(" %v", vals[i]))
|
||||
}
|
||||
return name
|
||||
return name.String()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
func HandleImportFlag(flag string) error {
|
||||
r := repository.GetJobRepository()
|
||||
|
||||
for _, pair := range strings.Split(flag, ",") {
|
||||
for pair := range strings.SplitSeq(flag, ",") {
|
||||
files := strings.Split(pair, ":")
|
||||
if len(files) != 2 {
|
||||
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
|
||||
|
||||
@@ -62,7 +62,7 @@ func (r *JobRepository) TransactionEnd(t *Transaction) error {
|
||||
func (r *JobRepository) TransactionAddNamed(
|
||||
t *Transaction,
|
||||
query string,
|
||||
args ...interface{},
|
||||
args ...any,
|
||||
) (int64, error) {
|
||||
if t.tx == nil {
|
||||
return 0, fmt.Errorf("transaction is nil or already completed")
|
||||
@@ -82,7 +82,7 @@ func (r *JobRepository) TransactionAddNamed(
|
||||
}
|
||||
|
||||
// TransactionAdd executes a query within the transaction.
|
||||
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
|
||||
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...any) (int64, error) {
|
||||
if t.tx == nil {
|
||||
return 0, fmt.Errorf("transaction is nil or already completed")
|
||||
}
|
||||
|
||||
@@ -189,7 +189,7 @@ func TestTransactionAddNamed(t *testing.T) {
|
||||
tx := &Transaction{tx: nil}
|
||||
|
||||
_, err := r.TransactionAddNamed(tx, "INSERT INTO tag (tag_type, tag_name, tag_scope) VALUES (:type, :name, :scope)",
|
||||
map[string]interface{}{"type": "test", "name": "test", "scope": "global"})
|
||||
map[string]any{"type": "test", "name": "test", "scope": "global"})
|
||||
assert.Error(t, err, "Should error on nil transaction")
|
||||
assert.Contains(t, err.Error(), "transaction is nil or already completed")
|
||||
})
|
||||
@@ -204,7 +204,7 @@ func TestTransactionMultipleOperations(t *testing.T) {
|
||||
defer tx.Rollback()
|
||||
|
||||
// Insert multiple tags
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
_, err = r.TransactionAdd(tx,
|
||||
"INSERT INTO tag (tag_type, tag_name, tag_scope) VALUES (?, ?, ?)",
|
||||
"test_type", "test_multi_"+string(rune('a'+i)), "global")
|
||||
@@ -230,7 +230,7 @@ func TestTransactionMultipleOperations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert multiple tags
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
_, err = r.TransactionAdd(tx,
|
||||
"INSERT INTO tag (tag_type, tag_name, tag_scope) VALUES (?, ?, ?)",
|
||||
"test_type", "test_rollback_"+string(rune('a'+i)), "global")
|
||||
|
||||
@@ -126,7 +126,7 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
||||
projectsJson, _ := json.Marshal(user.Projects)
|
||||
|
||||
cols := []string{"username", "roles", "projects"}
|
||||
vals := []interface{}{user.Username, string(rolesJson), string(projectsJson)}
|
||||
vals := []any{user.Username, string(rolesJson), string(projectsJson)}
|
||||
|
||||
if user.Name != "" {
|
||||
cols = append(cols, "name")
|
||||
@@ -392,7 +392,7 @@ func (r *UserRepository) RemoveProject(ctx context.Context, username string, pro
|
||||
}
|
||||
|
||||
if exists {
|
||||
var result interface{}
|
||||
var result any
|
||||
if len(newprojects) == 0 {
|
||||
result = "[]"
|
||||
} else {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
type InfoType map[string]interface{}
|
||||
type InfoType map[string]any
|
||||
|
||||
type Route struct {
|
||||
Route string
|
||||
@@ -193,7 +193,7 @@ func setupAnalysisRoute(i InfoType, r *http.Request) InfoType {
|
||||
func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
jobRepo := repository.GetJobRepository()
|
||||
tags, counts, err := jobRepo.CountTags(repository.GetUserFromContext(r.Context()))
|
||||
tagMap := make(map[string][]map[string]interface{})
|
||||
tagMap := make(map[string][]map[string]any)
|
||||
if err != nil {
|
||||
cclog.Warnf("GetTags failed: %s", err.Error())
|
||||
i["tagmap"] = tagMap
|
||||
@@ -204,7 +204,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
// Uses tag.ID as second Map-Key component to differentiate tags with identical names
|
||||
if userAuthlevel >= 4 { // Support+ : Show tags for all scopes, regardless of count
|
||||
for _, tag := range tags {
|
||||
tagItem := map[string]interface{}{
|
||||
tagItem := map[string]any{
|
||||
"id": tag.ID,
|
||||
"name": tag.Name,
|
||||
"scope": tag.Scope,
|
||||
@@ -216,7 +216,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
for _, tag := range tags {
|
||||
tagCount := counts[fmt.Sprint(tag.Type, tag.Name, tag.ID)]
|
||||
if ((tag.Scope == "global" || tag.Scope == "admin") && tagCount >= 1) || (tag.Scope != "global" && tag.Scope != "admin") {
|
||||
tagItem := map[string]interface{}{
|
||||
tagItem := map[string]any{
|
||||
"id": tag.ID,
|
||||
"name": tag.Name,
|
||||
"scope": tag.Scope,
|
||||
@@ -232,8 +232,8 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
// FIXME: Lots of redundant code. Needs refactoring
|
||||
func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
filterPresets := map[string]interface{}{}
|
||||
func buildFilterPresets(query url.Values) map[string]any {
|
||||
filterPresets := map[string]any{}
|
||||
|
||||
if query.Get("cluster") != "" {
|
||||
filterPresets["cluster"] = query.Get("cluster")
|
||||
@@ -377,14 +377,14 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
}
|
||||
}
|
||||
if len(query["stat"]) != 0 {
|
||||
statList := make([]map[string]interface{}, 0)
|
||||
statList := make([]map[string]any, 0)
|
||||
for _, statEntry := range query["stat"] {
|
||||
parts := strings.Split(statEntry, "-")
|
||||
if len(parts) == 3 { // Metric Footprint Stat Field, from - to
|
||||
a, e1 := strconv.ParseInt(parts[1], 10, 64)
|
||||
b, e2 := strconv.ParseInt(parts[2], 10, 64)
|
||||
if e1 == nil && e2 == nil {
|
||||
statEntry := map[string]interface{}{
|
||||
statEntry := map[string]any{
|
||||
"field": parts[0],
|
||||
"from": a,
|
||||
"to": b,
|
||||
@@ -401,7 +401,6 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
func SetupRoutes(router chi.Router, buildInfo web.Build) {
|
||||
userCfgRepo := repository.GetUserCfgRepo()
|
||||
for _, route := range routes {
|
||||
route := route
|
||||
router.HandleFunc(route.Route, func(rw http.ResponseWriter, r *http.Request) {
|
||||
conf, err := userCfgRepo.GetUIConfig(repository.GetUserFromContext(r.Context()))
|
||||
if err != nil {
|
||||
@@ -410,7 +409,7 @@ func SetupRoutes(router chi.Router, buildInfo web.Build) {
|
||||
}
|
||||
|
||||
title := route.Title
|
||||
infos := route.Setup(map[string]interface{}{}, r)
|
||||
infos := route.Setup(map[string]any{}, r)
|
||||
if id, ok := infos["id"]; ok {
|
||||
title = strings.Replace(route.Title, "<ID>", id.(string), 1)
|
||||
if sid, ok := infos["sid"]; ok { // 2nd ID element
|
||||
|
||||
@@ -45,13 +45,13 @@ func createTargetBackend(cfg Retention) (archive.ArchiveBackend, error) {
|
||||
|
||||
switch cfg.TargetKind {
|
||||
case "s3":
|
||||
raw, err = json.Marshal(map[string]interface{}{
|
||||
"kind": "s3",
|
||||
"endpoint": cfg.TargetEndpoint,
|
||||
"bucket": cfg.TargetBucket,
|
||||
"access-key": cfg.TargetAccessKey,
|
||||
"secret-key": cfg.TargetSecretKey,
|
||||
"region": cfg.TargetRegion,
|
||||
raw, err = json.Marshal(map[string]any{
|
||||
"kind": "s3",
|
||||
"endpoint": cfg.TargetEndpoint,
|
||||
"bucket": cfg.TargetBucket,
|
||||
"access-key": cfg.TargetAccessKey,
|
||||
"secret-key": cfg.TargetSecretKey,
|
||||
"region": cfg.TargetRegion,
|
||||
"use-path-style": cfg.TargetUsePathStyle,
|
||||
})
|
||||
default:
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -692,13 +693,7 @@ func (fsa *FsArchive) StoreClusterCfg(name string, config *schema.Cluster) error
|
||||
}
|
||||
|
||||
// Update clusters list if new
|
||||
found := false
|
||||
for _, c := range fsa.clusters {
|
||||
if c == name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(fsa.clusters, name)
|
||||
if !found {
|
||||
fsa.clusters = append(fsa.clusters, name)
|
||||
}
|
||||
|
||||
@@ -39,18 +39,18 @@ func (m *memTarget) WriteFile(name string, data []byte) error {
|
||||
|
||||
func makeTestJob(jobID int64) (*schema.Job, *schema.JobData) {
|
||||
meta := &schema.Job{
|
||||
JobID: jobID,
|
||||
Cluster: "testcluster",
|
||||
SubCluster: "sc0",
|
||||
Project: "testproject",
|
||||
User: "testuser",
|
||||
State: schema.JobStateCompleted,
|
||||
StartTime: 1700000000,
|
||||
Duration: 3600,
|
||||
Walltime: 7200,
|
||||
NumNodes: 2,
|
||||
JobID: jobID,
|
||||
Cluster: "testcluster",
|
||||
SubCluster: "sc0",
|
||||
Project: "testproject",
|
||||
User: "testuser",
|
||||
State: schema.JobStateCompleted,
|
||||
StartTime: 1700000000,
|
||||
Duration: 3600,
|
||||
Walltime: 7200,
|
||||
NumNodes: 2,
|
||||
NumHWThreads: 16,
|
||||
SMT: 1,
|
||||
SMT: 1,
|
||||
Resources: []*schema.Resource{
|
||||
{Hostname: "node001"},
|
||||
{Hostname: "node002"},
|
||||
@@ -141,7 +141,7 @@ func TestParquetWriterSingleBatch(t *testing.T) {
|
||||
target := newMemTarget()
|
||||
pw := NewParquetWriter(target, 512)
|
||||
|
||||
for i := int64(0); i < 5; i++ {
|
||||
for i := range int64(5) {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
@@ -179,7 +179,7 @@ func TestParquetWriterBatching(t *testing.T) {
|
||||
pw := NewParquetWriter(target, 0) // 0 MB means every job triggers a flush
|
||||
pw.maxSizeBytes = 1 // Force flush after every row
|
||||
|
||||
for i := int64(0); i < 3; i++ {
|
||||
for i := range int64(3) {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
@@ -263,7 +263,7 @@ func TestClusterAwareParquetWriter(t *testing.T) {
|
||||
cw.SetClusterConfig("alex", &schema.Cluster{Name: "alex"})
|
||||
|
||||
// Add jobs from different clusters
|
||||
for i := int64(0); i < 3; i++ {
|
||||
for i := range int64(3) {
|
||||
meta, data := makeTestJobForCluster(i, "fritz")
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
@@ -44,11 +45,11 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
var metricName strings.Builder
|
||||
for _, selectorName := range val.Selector {
|
||||
metricName += selectorName + SelectorDelimiter
|
||||
metricName.WriteString(selectorName + SelectorDelimiter)
|
||||
}
|
||||
metricName += val.MetricName
|
||||
metricName.WriteString(val.MetricName)
|
||||
|
||||
var selector []string
|
||||
selector = append(selector, val.Cluster, val.Node, strconv.FormatInt(freq, 10))
|
||||
@@ -62,7 +63,7 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
avroLevel.addMetric(metricName.String(), val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
default:
|
||||
// No more messages, exit
|
||||
@@ -82,13 +83,13 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
metricName := ""
|
||||
var metricName strings.Builder
|
||||
|
||||
for _, selectorName := range val.Selector {
|
||||
metricName += selectorName + SelectorDelimiter
|
||||
metricName.WriteString(selectorName + SelectorDelimiter)
|
||||
}
|
||||
|
||||
metricName += val.MetricName
|
||||
metricName.WriteString(val.MetricName)
|
||||
|
||||
// Create a new selector for the Avro level
|
||||
// The selector is a slice of strings that represents the path to the
|
||||
@@ -109,7 +110,7 @@ func DataStaging(wg *sync.WaitGroup, ctx context.Context) {
|
||||
}
|
||||
|
||||
if avroLevel != nil {
|
||||
avroLevel.addMetric(metricName, val.Value, val.Timestamp, int(freq))
|
||||
avroLevel.addMetric(metricName.String(), val.Value, val.Timestamp, int(freq))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestHealthCheck(t *testing.T) {
|
||||
// Setup test data for node003 - some metrics missing (no buffer)
|
||||
node003 := ms.root.findLevelOrCreate([]string{"testcluster", "node003"}, len(metrics))
|
||||
// Only create buffers for first two metrics
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
node003.metrics[i] = newBuffer(startTime, 10)
|
||||
for ts := startTime; ts <= now; ts += 10 {
|
||||
node003.metrics[i].write(ts, schema.Float(float64(i+1)))
|
||||
|
||||
Reference in New Issue
Block a user