mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2026-02-11 13:31:45 +01:00
Merge pull request #485 from ClusterCockpit/change-web-framework
Change web framework
This commit is contained in:
@@ -14,7 +14,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -36,8 +35,9 @@ import (
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/nats"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/runtime"
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/go-chi/cors"
|
||||
httpSwagger "github.com/swaggo/http-swagger"
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ const (
|
||||
|
||||
// Server encapsulates the HTTP server state and dependencies
|
||||
type Server struct {
|
||||
router *mux.Router
|
||||
router chi.Router
|
||||
server *http.Server
|
||||
restAPIHandle *api.RestAPI
|
||||
natsAPIHandle *api.NatsAPI
|
||||
@@ -70,7 +70,7 @@ func NewServer(version, commit, buildDate string) (*Server, error) {
|
||||
buildInfo = web.Build{Version: version, Hash: commit, Buildtime: buildDate}
|
||||
|
||||
s := &Server{
|
||||
router: mux.NewRouter(),
|
||||
router: chi.NewRouter(),
|
||||
}
|
||||
|
||||
if err := s.init(); err != nil {
|
||||
@@ -106,6 +106,27 @@ func (s *Server) init() error {
|
||||
|
||||
authHandle := auth.GetAuthInstance()
|
||||
|
||||
// Middleware must be defined before routes in chi
|
||||
s.router.Use(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
ww := middleware.NewWrapResponseWriter(rw, r.ProtoMajor)
|
||||
next.ServeHTTP(ww, r)
|
||||
cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||
r.Method, r.URL.RequestURI(),
|
||||
ww.Status(), float32(ww.BytesWritten())/1024,
|
||||
time.Since(start).Milliseconds())
|
||||
})
|
||||
})
|
||||
s.router.Use(middleware.Compress(5))
|
||||
s.router.Use(middleware.Recoverer)
|
||||
s.router.Use(cors.Handler(cors.Options{
|
||||
AllowCredentials: true,
|
||||
AllowedHeaders: []string{"X-Requested-With", "Content-Type", "Authorization", "Origin"},
|
||||
AllowedMethods: []string{"GET", "POST", "HEAD", "OPTIONS"},
|
||||
AllowedOrigins: []string{"*"},
|
||||
}))
|
||||
|
||||
s.restAPIHandle = api.New()
|
||||
|
||||
info := map[string]any{}
|
||||
@@ -117,11 +138,11 @@ func (s *Server) init() error {
|
||||
info["hasOpenIDConnect"] = true
|
||||
}
|
||||
|
||||
s.router.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) {
|
||||
s.router.Get("/login", func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
cclog.Debugf("##%v##", info)
|
||||
web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info})
|
||||
}).Methods(http.MethodGet)
|
||||
})
|
||||
s.router.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
web.RenderTemplate(rw, "imprint.tmpl", &web.Page{Title: "Imprint", Build: buildInfo})
|
||||
@@ -131,13 +152,6 @@ func (s *Server) init() error {
|
||||
web.RenderTemplate(rw, "privacy.tmpl", &web.Page{Title: "Privacy", Build: buildInfo})
|
||||
})
|
||||
|
||||
secured := s.router.PathPrefix("/").Subrouter()
|
||||
securedapi := s.router.PathPrefix("/api").Subrouter()
|
||||
userapi := s.router.PathPrefix("/userapi").Subrouter()
|
||||
configapi := s.router.PathPrefix("/config").Subrouter()
|
||||
frontendapi := s.router.PathPrefix("/frontend").Subrouter()
|
||||
metricstoreapi := s.router.PathPrefix("/api").Subrouter()
|
||||
|
||||
if !config.Keys.DisableAuthentication {
|
||||
// Create login failure handler (used by both /login and /jwt-login)
|
||||
loginFailureHandler := func(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
@@ -152,10 +166,10 @@ func (s *Server) init() error {
|
||||
})
|
||||
}
|
||||
|
||||
s.router.Handle("/login", authHandle.Login(loginFailureHandler)).Methods(http.MethodPost)
|
||||
s.router.Handle("/jwt-login", authHandle.Login(loginFailureHandler))
|
||||
s.router.Post("/login", authHandle.Login(loginFailureHandler).ServeHTTP)
|
||||
s.router.HandleFunc("/jwt-login", authHandle.Login(loginFailureHandler).ServeHTTP)
|
||||
|
||||
s.router.Handle("/logout", authHandle.Logout(
|
||||
s.router.Post("/logout", authHandle.Logout(
|
||||
http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
@@ -166,86 +180,97 @@ func (s *Server) init() error {
|
||||
Build: buildInfo,
|
||||
Infos: info,
|
||||
})
|
||||
}))).Methods(http.MethodPost)
|
||||
|
||||
secured.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.Auth(
|
||||
// On success;
|
||||
next,
|
||||
|
||||
// On failure:
|
||||
func(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
web.RenderTemplate(rw, "login.tmpl", &web.Page{
|
||||
Title: "Authentication failed - ClusterCockpit",
|
||||
MsgType: "alert-danger",
|
||||
Message: err.Error(),
|
||||
Build: buildInfo,
|
||||
Infos: info,
|
||||
Redirect: r.RequestURI,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
securedapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthAPI(
|
||||
// On success;
|
||||
next,
|
||||
// On failure: JSON Response
|
||||
onFailureResponse)
|
||||
})
|
||||
|
||||
userapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthUserAPI(
|
||||
// On success;
|
||||
next,
|
||||
// On failure: JSON Response
|
||||
onFailureResponse)
|
||||
})
|
||||
|
||||
metricstoreapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthMetricStoreAPI(
|
||||
// On success;
|
||||
next,
|
||||
// On failure: JSON Response
|
||||
onFailureResponse)
|
||||
})
|
||||
|
||||
configapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthConfigAPI(
|
||||
// On success;
|
||||
next,
|
||||
// On failure: JSON Response
|
||||
onFailureResponse)
|
||||
})
|
||||
|
||||
frontendapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthFrontendAPI(
|
||||
// On success;
|
||||
next,
|
||||
// On failure: JSON Response
|
||||
onFailureResponse)
|
||||
})
|
||||
})).ServeHTTP)
|
||||
}
|
||||
|
||||
if flagDev {
|
||||
s.router.Handle("/playground", playground.Handler("GraphQL playground", "/query"))
|
||||
s.router.PathPrefix("/swagger/").Handler(httpSwagger.Handler(
|
||||
httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet)
|
||||
s.router.Get("/swagger/*", httpSwagger.Handler(
|
||||
httpSwagger.URL("http://"+config.Keys.Addr+"/swagger/doc.json")))
|
||||
}
|
||||
secured.Handle("/query", graphQLServer)
|
||||
|
||||
// Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project.
|
||||
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
||||
routerConfig.HandleSearchBar(rw, r, buildInfo)
|
||||
// Secured routes (require authentication)
|
||||
s.router.Group(func(secured chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
secured.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.Auth(
|
||||
next,
|
||||
func(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
web.RenderTemplate(rw, "login.tmpl", &web.Page{
|
||||
Title: "Authentication failed - ClusterCockpit",
|
||||
MsgType: "alert-danger",
|
||||
Message: err.Error(),
|
||||
Build: buildInfo,
|
||||
Infos: info,
|
||||
Redirect: r.RequestURI,
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
secured.Handle("/query", graphQLServer)
|
||||
|
||||
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
||||
routerConfig.HandleSearchBar(rw, r, buildInfo)
|
||||
})
|
||||
|
||||
routerConfig.SetupRoutes(secured, buildInfo)
|
||||
})
|
||||
|
||||
// Mount all /monitoring/... and /api/... routes.
|
||||
routerConfig.SetupRoutes(secured, buildInfo)
|
||||
s.restAPIHandle.MountAPIRoutes(securedapi)
|
||||
s.restAPIHandle.MountUserAPIRoutes(userapi)
|
||||
s.restAPIHandle.MountConfigAPIRoutes(configapi)
|
||||
s.restAPIHandle.MountFrontendAPIRoutes(frontendapi)
|
||||
// API routes (JWT token auth)
|
||||
s.router.Route("/api", func(apiRouter chi.Router) {
|
||||
// Main API routes with API auth
|
||||
apiRouter.Group(func(securedapi chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
securedapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthAPI(next, onFailureResponse)
|
||||
})
|
||||
}
|
||||
s.restAPIHandle.MountAPIRoutes(securedapi)
|
||||
})
|
||||
|
||||
// Metric store API routes with separate auth
|
||||
apiRouter.Group(func(metricstoreapi chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
metricstoreapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthMetricStoreAPI(next, onFailureResponse)
|
||||
})
|
||||
}
|
||||
s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi)
|
||||
})
|
||||
})
|
||||
|
||||
// User API routes
|
||||
s.router.Route("/userapi", func(userapi chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
userapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthUserAPI(next, onFailureResponse)
|
||||
})
|
||||
}
|
||||
s.restAPIHandle.MountUserAPIRoutes(userapi)
|
||||
})
|
||||
|
||||
// Config API routes (uses Group with full paths to avoid shadowing
|
||||
// the /config page route that is registered in the secured group)
|
||||
s.router.Group(func(configapi chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
configapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthConfigAPI(next, onFailureResponse)
|
||||
})
|
||||
}
|
||||
s.restAPIHandle.MountConfigAPIRoutes(configapi)
|
||||
})
|
||||
|
||||
// Frontend API routes
|
||||
s.router.Route("/frontend", func(frontendapi chi.Router) {
|
||||
if !config.Keys.DisableAuthentication {
|
||||
frontendapi.Use(func(next http.Handler) http.Handler {
|
||||
return authHandle.AuthFrontendAPI(next, onFailureResponse)
|
||||
})
|
||||
}
|
||||
s.restAPIHandle.MountFrontendAPIRoutes(frontendapi)
|
||||
})
|
||||
|
||||
if config.Keys.APISubjects != nil {
|
||||
s.natsAPIHandle = api.NewNatsAPI()
|
||||
@@ -254,28 +279,55 @@ func (s *Server) init() error {
|
||||
}
|
||||
}
|
||||
|
||||
s.restAPIHandle.MountMetricStoreAPIRoutes(metricstoreapi)
|
||||
// 404 handler for pages and API routes
|
||||
notFoundHandler := func(rw http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasPrefix(r.URL.Path, "/api/") || strings.HasPrefix(r.URL.Path, "/userapi/") ||
|
||||
strings.HasPrefix(r.URL.Path, "/frontend/") || strings.HasPrefix(r.URL.Path, "/config/") {
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
json.NewEncoder(rw).Encode(map[string]string{
|
||||
"status": "Resource not found",
|
||||
"error": "the requested endpoint does not exist",
|
||||
})
|
||||
return
|
||||
}
|
||||
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
web.RenderTemplate(rw, "404.tmpl", &web.Page{
|
||||
Title: "Page Not Found",
|
||||
Build: buildInfo,
|
||||
})
|
||||
}
|
||||
|
||||
if config.Keys.EmbedStaticFiles {
|
||||
if i, err := os.Stat("./var/img"); err == nil {
|
||||
if i.IsDir() {
|
||||
cclog.Info("Use local directory for static images")
|
||||
s.router.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
|
||||
s.router.Handle("/img/*", http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
|
||||
}
|
||||
}
|
||||
s.router.PathPrefix("/").Handler(http.StripPrefix("/", web.ServeFiles()))
|
||||
fileServer := http.StripPrefix("/", web.ServeFiles())
|
||||
s.router.Handle("/*", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if web.StaticFileExists(r.URL.Path) {
|
||||
fileServer.ServeHTTP(rw, r)
|
||||
return
|
||||
}
|
||||
notFoundHandler(rw, r)
|
||||
}))
|
||||
} else {
|
||||
s.router.PathPrefix("/").Handler(http.FileServer(http.Dir(config.Keys.StaticFiles)))
|
||||
staticDir := http.Dir(config.Keys.StaticFiles)
|
||||
fileServer := http.FileServer(staticDir)
|
||||
s.router.Handle("/*", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
f, err := staticDir.Open(r.URL.Path)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
fileServer.ServeHTTP(rw, r)
|
||||
return
|
||||
}
|
||||
notFoundHandler(rw, r)
|
||||
}))
|
||||
}
|
||||
|
||||
s.router.Use(handlers.CompressHandler)
|
||||
s.router.Use(handlers.RecoveryHandler(handlers.PrintRecoveryStack(true)))
|
||||
s.router.Use(handlers.CORS(
|
||||
handlers.AllowCredentials(),
|
||||
handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization", "Origin"}),
|
||||
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
|
||||
handlers.AllowedOrigins([]string{"*"})))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -286,20 +338,6 @@ const (
|
||||
)
|
||||
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
handler := handlers.CustomLoggingHandler(io.Discard, s.router, func(_ io.Writer, params handlers.LogFormatterParams) {
|
||||
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
|
||||
cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||
params.Request.Method, params.URL.RequestURI(),
|
||||
params.StatusCode, float32(params.Size)/1024,
|
||||
time.Since(params.TimeStamp).Milliseconds())
|
||||
} else {
|
||||
cclog.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||
params.Request.Method, params.URL.RequestURI(),
|
||||
params.StatusCode, float32(params.Size)/1024,
|
||||
time.Since(params.TimeStamp).Milliseconds())
|
||||
}
|
||||
})
|
||||
|
||||
// Use configurable timeouts with defaults
|
||||
readTimeout := time.Duration(defaultReadTimeout) * time.Second
|
||||
writeTimeout := time.Duration(defaultWriteTimeout) * time.Second
|
||||
@@ -307,7 +345,7 @@ func (s *Server) Start(ctx context.Context) error {
|
||||
s.server = &http.Server{
|
||||
ReadTimeout: readTimeout,
|
||||
WriteTimeout: writeTimeout,
|
||||
Handler: handler,
|
||||
Handler: s.router,
|
||||
Addr: config.Keys.Addr,
|
||||
}
|
||||
|
||||
|
||||
17
go.mod
17
go.mod
@@ -1,8 +1,6 @@
|
||||
module github.com/ClusterCockpit/cc-backend
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.1
|
||||
go 1.24.9
|
||||
|
||||
tool (
|
||||
github.com/99designs/gqlgen
|
||||
@@ -19,19 +17,20 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
|
||||
github.com/coreos/go-oidc/v3 v3.17.0
|
||||
github.com/expr-lang/expr v1.17.7
|
||||
github.com/go-chi/chi/v5 v5.2.5
|
||||
github.com/go-chi/cors v1.2.2
|
||||
github.com/go-co-op/gocron/v2 v2.19.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/golang-migrate/migrate/v4 v4.19.1
|
||||
github.com/google/gops v0.3.28
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/sessions v1.4.0
|
||||
github.com/influxdata/line-protocol/v2 v2.2.1
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/linkedin/goavro/v2 v2.14.1
|
||||
github.com/mattn/go-sqlite3 v1.14.33
|
||||
github.com/parquet-go/parquet-go v0.27.0
|
||||
github.com/qustavo/sqlhooks/v2 v2.1.0
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
@@ -47,6 +46,7 @@ require (
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
@@ -65,7 +65,6 @@ require (
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
@@ -82,7 +81,6 @@ require (
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/goccy/go-yaml v1.19.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
@@ -98,6 +96,9 @@ require (
|
||||
github.com/nats-io/nkeys v0.4.12 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/oapi-codegen/runtime v1.1.1 // indirect
|
||||
github.com/parquet-go/bitpack v1.0.0 // indirect
|
||||
github.com/parquet-go/jsonlite v1.0.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
@@ -106,6 +107,7 @@ require (
|
||||
github.com/stmcginnis/gofish v0.20.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/swaggo/files v1.0.1 // indirect
|
||||
github.com/twpayne/go-geom v1.6.1 // indirect
|
||||
github.com/urfave/cli/v2 v2.27.7 // indirect
|
||||
github.com/urfave/cli/v3 v3.6.1 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
@@ -118,6 +120,7 @@ require (
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/tools v0.40.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
30
go.sum
30
go.sum
@@ -6,6 +6,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1 h1:iCVas+Jc61zFH5S2VG3H1sc7tsn+U4lOJwUYjYZEims=
|
||||
github.com/ClusterCockpit/cc-lib/v2 v2.2.1/go.mod h1:JuxMAuEOaLLNEnnL9U3ejha8kMvsSatLdKPZEgJw6iw=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
|
||||
@@ -17,10 +19,16 @@ github.com/PuerkitoBio/goquery v1.11.0/go.mod h1:wQHgxUOU3JGuj3oD/QFfxUdlzW6xPHf
|
||||
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
||||
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
|
||||
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
|
||||
github.com/alecthomas/assert/v2 v2.10.0 h1:jjRCHsj6hBJhkmhznrCzoNpbA3zqy0fYiUcYZP/GkPY=
|
||||
github.com/alecthomas/assert/v2 v2.10.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM=
|
||||
@@ -85,8 +93,6 @@ github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7c
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
|
||||
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk=
|
||||
@@ -95,6 +101,10 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug=
|
||||
github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
|
||||
github.com/go-chi/cors v1.2.2 h1:Jmey33TE+b+rB7fT8MUy1u0I4L+NARQlK6LhzKPSyQE=
|
||||
github.com/go-chi/cors v1.2.2/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
@@ -154,8 +164,6 @@ github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
|
||||
github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
|
||||
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
@@ -168,6 +176,8 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
|
||||
@@ -238,6 +248,14 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
||||
github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro=
|
||||
github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxPcQA=
|
||||
github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs=
|
||||
github.com/parquet-go/jsonlite v1.0.0 h1:87QNdi56wOfsE5bdgas0vRzHPxfJgzrXGml1zZdd7VU=
|
||||
github.com/parquet-go/jsonlite v1.0.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0=
|
||||
github.com/parquet-go/parquet-go v0.27.0 h1:vHWK2xaHbj+v1DYps03yDRpEsdtOeKbhiXUaixoPb3g=
|
||||
github.com/parquet-go/parquet-go v0.27.0/go.mod h1:navtkAYr2LGoJVp141oXPlO/sxLvaOe3la2JEoD8+rg=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
@@ -285,6 +303,8 @@ github.com/swaggo/http-swagger v1.3.4 h1:q7t/XLx0n15H1Q9/tk3Y9L4n210XzJF5WtnDX64
|
||||
github.com/swaggo/http-swagger v1.3.4/go.mod h1:9dAh0unqMBAlbp1uE2Uc2mQTxNMU/ha4UbucIg1MFkQ=
|
||||
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
|
||||
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
|
||||
github.com/twpayne/go-geom v1.6.1 h1:iLE+Opv0Ihm/ABIcvQFGIiFBXd76oBIar9drAwHFhR4=
|
||||
github.com/twpayne/go-geom v1.6.1/go.mod h1:Kr+Nly6BswFsKM5sd31YaoWS5PeDDH2NftJTK7Gd028=
|
||||
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
|
||||
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
|
||||
github.com/urfave/cli/v3 v3.6.1 h1:j8Qq8NyUawj/7rTYdBGrxcH7A/j7/G8Q5LhWEW4G3Mo=
|
||||
@@ -293,6 +313,8 @@ github.com/vektah/gqlparser/v2 v2.5.31 h1:YhWGA1mfTjID7qJhd1+Vxhpk5HTgydrGU9IgkW
|
||||
github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts=
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
ccconf "github.com/ClusterCockpit/cc-lib/v2/ccConfig"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
@@ -216,9 +216,7 @@ func TestRestApi(t *testing.T) {
|
||||
return testData, nil
|
||||
}
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.PathPrefix("/api").Subrouter()
|
||||
r.StrictSlash(true)
|
||||
r := chi.NewRouter()
|
||||
restapi.MountAPIRoutes(r)
|
||||
|
||||
var TestJobID int64 = 123
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -243,10 +243,10 @@ func (api *RestAPI) getJobs(rw http.ResponseWriter, r *http.Request) {
|
||||
// @router /api/jobs/{id} [get]
|
||||
func (api *RestAPI) getCompleteJobByID(rw http.ResponseWriter, r *http.Request) {
|
||||
// Fetch job from db
|
||||
id, ok := mux.Vars(r)["id"]
|
||||
id := chi.URLParam(r, "id")
|
||||
var job *schema.Job
|
||||
var err error
|
||||
if ok {
|
||||
if id != "" {
|
||||
id, e := strconv.ParseInt(id, 10, 64)
|
||||
if e != nil {
|
||||
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
|
||||
@@ -336,10 +336,10 @@ func (api *RestAPI) getCompleteJobByID(rw http.ResponseWriter, r *http.Request)
|
||||
// @router /api/jobs/{id} [post]
|
||||
func (api *RestAPI) getJobByID(rw http.ResponseWriter, r *http.Request) {
|
||||
// Fetch job from db
|
||||
id, ok := mux.Vars(r)["id"]
|
||||
id := chi.URLParam(r, "id")
|
||||
var job *schema.Job
|
||||
var err error
|
||||
if ok {
|
||||
if id != "" {
|
||||
id, e := strconv.ParseInt(id, 10, 64)
|
||||
if e != nil {
|
||||
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
|
||||
@@ -439,7 +439,7 @@ func (api *RestAPI) getJobByID(rw http.ResponseWriter, r *http.Request) {
|
||||
// @security ApiKeyAuth
|
||||
// @router /api/jobs/edit_meta/{id} [post]
|
||||
func (api *RestAPI) editMeta(rw http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
handleError(fmt.Errorf("parsing job ID failed: %w", err), http.StatusBadRequest, rw)
|
||||
return
|
||||
@@ -487,7 +487,7 @@ func (api *RestAPI) editMeta(rw http.ResponseWriter, r *http.Request) {
|
||||
// @security ApiKeyAuth
|
||||
// @router /api/jobs/tag_job/{id} [post]
|
||||
func (api *RestAPI) tagJob(rw http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
handleError(fmt.Errorf("parsing job ID failed: %w", err), http.StatusBadRequest, rw)
|
||||
return
|
||||
@@ -551,7 +551,7 @@ func (api *RestAPI) tagJob(rw http.ResponseWriter, r *http.Request) {
|
||||
// @security ApiKeyAuth
|
||||
// @router /jobs/tag_job/{id} [delete]
|
||||
func (api *RestAPI) removeTagJob(rw http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
handleError(fmt.Errorf("parsing job ID failed: %w", err), http.StatusBadRequest, rw)
|
||||
return
|
||||
@@ -786,9 +786,9 @@ func (api *RestAPI) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
// @router /api/jobs/delete_job/{id} [delete]
|
||||
func (api *RestAPI) deleteJobByID(rw http.ResponseWriter, r *http.Request) {
|
||||
// Fetch job (that will be stopped) from db
|
||||
id, ok := mux.Vars(r)["id"]
|
||||
id := chi.URLParam(r, "id")
|
||||
var err error
|
||||
if ok {
|
||||
if id != "" {
|
||||
id, e := strconv.ParseInt(id, 10, 64)
|
||||
if e != nil {
|
||||
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
|
||||
@@ -885,9 +885,9 @@ func (api *RestAPI) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
|
||||
func (api *RestAPI) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
|
||||
var cnt int
|
||||
// Fetch job (that will be stopped) from db
|
||||
id, ok := mux.Vars(r)["ts"]
|
||||
id := chi.URLParam(r, "ts")
|
||||
var err error
|
||||
if ok {
|
||||
if id != "" {
|
||||
ts, e := strconv.ParseInt(id, 10, 64)
|
||||
if e != nil {
|
||||
handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
|
||||
@@ -976,7 +976,7 @@ func (api *RestAPI) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
|
||||
}
|
||||
|
||||
func (api *RestAPI) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||
id := mux.Vars(r)["id"]
|
||||
id := chi.URLParam(r, "id")
|
||||
metrics := r.URL.Query()["metric"]
|
||||
var scopes []schema.MetricScope
|
||||
for _, scope := range r.URL.Query()["scope"] {
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/util"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// @title ClusterCockpit REST API
|
||||
@@ -73,91 +73,95 @@ func New() *RestAPI {
|
||||
|
||||
// MountAPIRoutes registers REST API endpoints for job and cluster management.
|
||||
// These routes use JWT token authentication via the X-Auth-Token header.
|
||||
func (api *RestAPI) MountAPIRoutes(r *mux.Router) {
|
||||
r.StrictSlash(true)
|
||||
func (api *RestAPI) MountAPIRoutes(r chi.Router) {
|
||||
// REST API Uses TokenAuth
|
||||
// User List
|
||||
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
|
||||
r.Get("/users/", api.getUsers)
|
||||
// Cluster List
|
||||
r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet)
|
||||
r.Get("/clusters/", api.getClusters)
|
||||
// Slurm node state
|
||||
r.HandleFunc("/nodestate/", api.updateNodeStates).Methods(http.MethodPost, http.MethodPut)
|
||||
r.Post("/nodestate/", api.updateNodeStates)
|
||||
r.Put("/nodestate/", api.updateNodeStates)
|
||||
// Job Handler
|
||||
if config.Keys.APISubjects == nil {
|
||||
cclog.Info("Enabling REST start/stop job API")
|
||||
r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut)
|
||||
r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut)
|
||||
r.Post("/jobs/start_job/", api.startJob)
|
||||
r.Put("/jobs/start_job/", api.startJob)
|
||||
r.Post("/jobs/stop_job/", api.stopJobByRequest)
|
||||
r.Put("/jobs/stop_job/", api.stopJobByRequest)
|
||||
}
|
||||
r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet)
|
||||
r.HandleFunc("/jobs/used_nodes", api.getUsedNodes).Methods(http.MethodGet)
|
||||
r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch)
|
||||
r.HandleFunc("/jobs/tag_job/{id}", api.removeTagJob).Methods(http.MethodDelete)
|
||||
r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch)
|
||||
r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet)
|
||||
r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete)
|
||||
r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobByID).Methods(http.MethodDelete)
|
||||
r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete)
|
||||
r.HandleFunc("/jobs/{id}", api.getJobByID).Methods(http.MethodPost)
|
||||
r.HandleFunc("/jobs/{id}", api.getCompleteJobByID).Methods(http.MethodGet)
|
||||
r.Get("/jobs/", api.getJobs)
|
||||
r.Get("/jobs/used_nodes", api.getUsedNodes)
|
||||
r.Post("/jobs/tag_job/{id}", api.tagJob)
|
||||
r.Patch("/jobs/tag_job/{id}", api.tagJob)
|
||||
r.Delete("/jobs/tag_job/{id}", api.removeTagJob)
|
||||
r.Post("/jobs/edit_meta/{id}", api.editMeta)
|
||||
r.Patch("/jobs/edit_meta/{id}", api.editMeta)
|
||||
r.Get("/jobs/metrics/{id}", api.getJobMetrics)
|
||||
r.Delete("/jobs/delete_job/", api.deleteJobByRequest)
|
||||
r.Delete("/jobs/delete_job/{id}", api.deleteJobByID)
|
||||
r.Delete("/jobs/delete_job_before/{ts}", api.deleteJobBefore)
|
||||
r.Post("/jobs/{id}", api.getJobByID)
|
||||
r.Get("/jobs/{id}", api.getCompleteJobByID)
|
||||
|
||||
r.HandleFunc("/tags/", api.removeTags).Methods(http.MethodDelete)
|
||||
r.Delete("/tags/", api.removeTags)
|
||||
|
||||
if api.MachineStateDir != "" {
|
||||
r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet)
|
||||
r.HandleFunc("/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost)
|
||||
r.Get("/machine_state/{cluster}/{host}", api.getMachineState)
|
||||
r.Put("/machine_state/{cluster}/{host}", api.putMachineState)
|
||||
r.Post("/machine_state/{cluster}/{host}", api.putMachineState)
|
||||
}
|
||||
}
|
||||
|
||||
// MountUserAPIRoutes registers user-accessible REST API endpoints.
|
||||
// These are limited endpoints for regular users with JWT token authentication.
|
||||
func (api *RestAPI) MountUserAPIRoutes(r *mux.Router) {
|
||||
r.StrictSlash(true)
|
||||
func (api *RestAPI) MountUserAPIRoutes(r chi.Router) {
|
||||
// REST API Uses TokenAuth
|
||||
r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet)
|
||||
r.HandleFunc("/jobs/{id}", api.getJobByID).Methods(http.MethodPost)
|
||||
r.HandleFunc("/jobs/{id}", api.getCompleteJobByID).Methods(http.MethodGet)
|
||||
r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet)
|
||||
r.Get("/jobs/", api.getJobs)
|
||||
r.Post("/jobs/{id}", api.getJobByID)
|
||||
r.Get("/jobs/{id}", api.getCompleteJobByID)
|
||||
r.Get("/jobs/metrics/{id}", api.getJobMetrics)
|
||||
}
|
||||
|
||||
// MountMetricStoreAPIRoutes registers metric storage API endpoints.
|
||||
// These endpoints handle metric data ingestion and health checks with JWT token authentication.
|
||||
func (api *RestAPI) MountMetricStoreAPIRoutes(r *mux.Router) {
|
||||
func (api *RestAPI) MountMetricStoreAPIRoutes(r chi.Router) {
|
||||
// REST API Uses TokenAuth
|
||||
// Note: StrictSlash handles trailing slash variations automatically
|
||||
r.HandleFunc("/free", freeMetrics).Methods(http.MethodPost)
|
||||
r.HandleFunc("/write", writeMetrics).Methods(http.MethodPost)
|
||||
r.HandleFunc("/debug", debugMetrics).Methods(http.MethodGet)
|
||||
r.HandleFunc("/healthcheck", api.updateNodeStates).Methods(http.MethodPost)
|
||||
r.Post("/free", freeMetrics)
|
||||
r.Post("/write", writeMetrics)
|
||||
r.Get("/debug", debugMetrics)
|
||||
r.Post("/healthcheck", api.updateNodeStates)
|
||||
// Same endpoints but with trailing slash
|
||||
r.HandleFunc("/free/", freeMetrics).Methods(http.MethodPost)
|
||||
r.HandleFunc("/write/", writeMetrics).Methods(http.MethodPost)
|
||||
r.HandleFunc("/debug/", debugMetrics).Methods(http.MethodGet)
|
||||
r.HandleFunc("/healthcheck/", api.updateNodeStates).Methods(http.MethodPost)
|
||||
r.Post("/free/", freeMetrics)
|
||||
r.Post("/write/", writeMetrics)
|
||||
r.Get("/debug/", debugMetrics)
|
||||
r.Post("/healthcheck/", api.updateNodeStates)
|
||||
}
|
||||
|
||||
// MountConfigAPIRoutes registers configuration and user management endpoints.
|
||||
// These routes use session-based authentication and require admin privileges.
|
||||
func (api *RestAPI) MountConfigAPIRoutes(r *mux.Router) {
|
||||
r.StrictSlash(true)
|
||||
// Routes use full paths (including /config prefix) to avoid conflicting with
|
||||
// the /config page route when registered via Group instead of Route.
|
||||
func (api *RestAPI) MountConfigAPIRoutes(r chi.Router) {
|
||||
// Settings Frontend Uses SessionAuth
|
||||
if api.Authentication != nil {
|
||||
r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet)
|
||||
r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut)
|
||||
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
|
||||
r.HandleFunc("/users/", api.deleteUser).Methods(http.MethodDelete)
|
||||
r.HandleFunc("/user/{id}", api.updateUser).Methods(http.MethodPost)
|
||||
r.HandleFunc("/notice/", api.editNotice).Methods(http.MethodPost)
|
||||
r.Get("/config/roles/", api.getRoles)
|
||||
r.Post("/config/users/", api.createUser)
|
||||
r.Put("/config/users/", api.createUser)
|
||||
r.Get("/config/users/", api.getUsers)
|
||||
r.Delete("/config/users/", api.deleteUser)
|
||||
r.Post("/config/user/{id}", api.updateUser)
|
||||
r.Post("/config/notice/", api.editNotice)
|
||||
}
|
||||
}
|
||||
|
||||
// MountFrontendAPIRoutes registers frontend-specific API endpoints.
|
||||
// These routes support JWT generation and user configuration updates with session authentication.
|
||||
func (api *RestAPI) MountFrontendAPIRoutes(r *mux.Router) {
|
||||
r.StrictSlash(true)
|
||||
func (api *RestAPI) MountFrontendAPIRoutes(r chi.Router) {
|
||||
// Settings Frontend Uses SessionAuth
|
||||
if api.Authentication != nil {
|
||||
r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet)
|
||||
r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost)
|
||||
r.Get("/jwt/", api.getJWT)
|
||||
r.Post("/configuration/", api.updateConfiguration)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,9 +385,8 @@ func (api *RestAPI) putMachineState(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
cluster := vars["cluster"]
|
||||
host := vars["host"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
host := chi.URLParam(r, "host")
|
||||
|
||||
if err := validatePathComponent(cluster, "cluster name"); err != nil {
|
||||
handleError(err, http.StatusBadRequest, rw)
|
||||
@@ -434,9 +437,8 @@ func (api *RestAPI) getMachineState(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
cluster := vars["cluster"]
|
||||
host := vars["host"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
host := chi.URLParam(r, "host")
|
||||
|
||||
if err := validatePathComponent(cluster, "cluster name"); err != nil {
|
||||
handleError(err, http.StatusBadRequest, rw)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
type APIReturnedUser struct {
|
||||
@@ -91,7 +91,7 @@ func (api *RestAPI) updateUser(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Handle role updates
|
||||
if newrole != "" {
|
||||
if err := repository.GetUserRepository().AddRole(r.Context(), mux.Vars(r)["id"], newrole); err != nil {
|
||||
if err := repository.GetUserRepository().AddRole(r.Context(), chi.URLParam(r, "id"), newrole); err != nil {
|
||||
handleError(fmt.Errorf("adding role failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||
return
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func (api *RestAPI) updateUser(rw http.ResponseWriter, r *http.Request) {
|
||||
cclog.Errorf("Failed to encode response: %v", err)
|
||||
}
|
||||
} else if delrole != "" {
|
||||
if err := repository.GetUserRepository().RemoveRole(r.Context(), mux.Vars(r)["id"], delrole); err != nil {
|
||||
if err := repository.GetUserRepository().RemoveRole(r.Context(), chi.URLParam(r, "id"), delrole); err != nil {
|
||||
handleError(fmt.Errorf("removing role failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||
return
|
||||
}
|
||||
@@ -107,7 +107,7 @@ func (api *RestAPI) updateUser(rw http.ResponseWriter, r *http.Request) {
|
||||
cclog.Errorf("Failed to encode response: %v", err)
|
||||
}
|
||||
} else if newproj != "" {
|
||||
if err := repository.GetUserRepository().AddProject(r.Context(), mux.Vars(r)["id"], newproj); err != nil {
|
||||
if err := repository.GetUserRepository().AddProject(r.Context(), chi.URLParam(r, "id"), newproj); err != nil {
|
||||
handleError(fmt.Errorf("adding project failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||
return
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (api *RestAPI) updateUser(rw http.ResponseWriter, r *http.Request) {
|
||||
cclog.Errorf("Failed to encode response: %v", err)
|
||||
}
|
||||
} else if delproj != "" {
|
||||
if err := repository.GetUserRepository().RemoveProject(r.Context(), mux.Vars(r)["id"], delproj); err != nil {
|
||||
if err := repository.GetUserRepository().RemoveProject(r.Context(), chi.URLParam(r, "id"), delproj); err != nil {
|
||||
handleError(fmt.Errorf("removing project failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -86,7 +86,7 @@ func NewOIDC(a *Authentication) *OIDC {
|
||||
return oa
|
||||
}
|
||||
|
||||
func (oa *OIDC) RegisterEndpoints(r *mux.Router) {
|
||||
func (oa *OIDC) RegisterEndpoints(r chi.Router) {
|
||||
r.HandleFunc("/oidc-login", oa.OAuth2Login)
|
||||
r.HandleFunc("/oidc-callback", oa.OAuth2Callback)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
"github.com/ClusterCockpit/cc-lib/v2/util"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
type InfoType map[string]interface{}
|
||||
@@ -96,7 +96,7 @@ func setupConfigRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupJobRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["id"] = mux.Vars(r)["id"]
|
||||
i["id"] = chi.URLParam(r, "id")
|
||||
if config.Keys.EmissionConstant != 0 {
|
||||
i["emission"] = config.Keys.EmissionConstant
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func setupJobRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupUserRoute(i InfoType, r *http.Request) InfoType {
|
||||
username := mux.Vars(r)["id"]
|
||||
username := chi.URLParam(r, "id")
|
||||
i["id"] = username
|
||||
i["username"] = username
|
||||
// TODO: If forbidden (== err exists), redirect to error page
|
||||
@@ -116,33 +116,33 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupClusterStatusRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
i["id"] = cluster
|
||||
i["cluster"] = cluster
|
||||
i["displayType"] = "DASHBOARD"
|
||||
return i
|
||||
}
|
||||
|
||||
func setupClusterDetailRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
i["id"] = cluster
|
||||
i["cluster"] = cluster
|
||||
i["displayType"] = "DETAILS"
|
||||
return i
|
||||
}
|
||||
|
||||
func setupDashboardRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
i["id"] = cluster
|
||||
i["cluster"] = cluster
|
||||
i["displayType"] = "PUBLIC" // Used in Main Template
|
||||
return i
|
||||
}
|
||||
|
||||
func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
i["id"] = cluster
|
||||
i["cluster"] = cluster
|
||||
i["displayType"] = "OVERVIEW"
|
||||
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
@@ -154,11 +154,12 @@ func setupClusterOverviewRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupClusterListRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
i["sid"] = vars["subcluster"]
|
||||
i["subCluster"] = vars["subcluster"]
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
subcluster := chi.URLParam(r, "subcluster")
|
||||
i["id"] = cluster
|
||||
i["cluster"] = cluster
|
||||
i["sid"] = subcluster
|
||||
i["subCluster"] = subcluster
|
||||
i["displayType"] = "LIST"
|
||||
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
@@ -170,10 +171,11 @@ func setupClusterListRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupNodeRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["cluster"] = vars["cluster"]
|
||||
i["hostname"] = vars["hostname"]
|
||||
i["id"] = fmt.Sprintf("%s (%s)", vars["cluster"], vars["hostname"])
|
||||
cluster := chi.URLParam(r, "cluster")
|
||||
hostname := chi.URLParam(r, "hostname")
|
||||
i["cluster"] = cluster
|
||||
i["hostname"] = hostname
|
||||
i["id"] = fmt.Sprintf("%s (%s)", cluster, hostname)
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
if from != "" && to != "" {
|
||||
i["from"] = from
|
||||
@@ -183,7 +185,7 @@ func setupNodeRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
func setupAnalysisRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["cluster"] = mux.Vars(r)["cluster"]
|
||||
i["cluster"] = chi.URLParam(r, "cluster")
|
||||
return i
|
||||
}
|
||||
|
||||
@@ -395,7 +397,7 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
return filterPresets
|
||||
}
|
||||
|
||||
func SetupRoutes(router *mux.Router, buildInfo web.Build) {
|
||||
func SetupRoutes(router chi.Router, buildInfo web.Build) {
|
||||
userCfgRepo := repository.GetUserCfgRepo()
|
||||
for _, route := range routes {
|
||||
route := route
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
pqarchive "github.com/ClusterCockpit/cc-backend/pkg/archive/parquet"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
)
|
||||
@@ -66,3 +67,96 @@ func RegisterRetentionMoveService(age int, includeDB bool, location string, omit
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func RegisterRetentionParquetService(retention Retention) {
|
||||
cclog.Info("Register retention parquet service")
|
||||
|
||||
maxFileSizeMB := retention.MaxFileSizeMB
|
||||
if maxFileSizeMB <= 0 {
|
||||
maxFileSizeMB = 512
|
||||
}
|
||||
|
||||
var target pqarchive.ParquetTarget
|
||||
var err error
|
||||
|
||||
switch retention.TargetKind {
|
||||
case "s3":
|
||||
target, err = pqarchive.NewS3Target(pqarchive.S3TargetConfig{
|
||||
Endpoint: retention.TargetEndpoint,
|
||||
Bucket: retention.TargetBucket,
|
||||
AccessKey: retention.TargetAccessKey,
|
||||
SecretKey: retention.TargetSecretKey,
|
||||
Region: retention.TargetRegion,
|
||||
UsePathStyle: retention.TargetUsePathStyle,
|
||||
})
|
||||
default:
|
||||
target, err = pqarchive.NewFileTarget(retention.TargetPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
cclog.Errorf("Parquet retention: failed to create target: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
s.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(gocron.NewAtTime(5, 0, 0))),
|
||||
gocron.NewTask(
|
||||
func() {
|
||||
startTime := time.Now().Unix() - int64(retention.Age*24*3600)
|
||||
jobs, err := jobRepo.FindJobsBetween(0, startTime, retention.OmitTagged)
|
||||
if err != nil {
|
||||
cclog.Warnf("Parquet retention: error finding jobs: %v", err)
|
||||
return
|
||||
}
|
||||
if len(jobs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
cclog.Infof("Parquet retention: processing %d jobs", len(jobs))
|
||||
ar := archive.GetHandle()
|
||||
pw := pqarchive.NewParquetWriter(target, maxFileSizeMB)
|
||||
|
||||
for _, job := range jobs {
|
||||
meta, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
cclog.Warnf("Parquet retention: load meta for job %d: %v", job.JobID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := ar.LoadJobData(job)
|
||||
if err != nil {
|
||||
cclog.Warnf("Parquet retention: load data for job %d: %v", job.JobID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
row, err := pqarchive.JobToParquetRow(meta, &data)
|
||||
if err != nil {
|
||||
cclog.Warnf("Parquet retention: convert job %d: %v", job.JobID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := pw.AddJob(*row); err != nil {
|
||||
cclog.Errorf("Parquet retention: add job %d to writer: %v", job.JobID, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := pw.Close(); err != nil {
|
||||
cclog.Errorf("Parquet retention: close writer: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ar.CleanUp(jobs)
|
||||
|
||||
if retention.IncludeDB {
|
||||
cnt, err := jobRepo.DeleteJobsBefore(startTime, retention.OmitTagged)
|
||||
if err != nil {
|
||||
cclog.Errorf("Parquet retention: delete jobs from db: %v", err)
|
||||
} else {
|
||||
cclog.Infof("Parquet retention: removed %d jobs from db", cnt)
|
||||
}
|
||||
if err = jobRepo.Optimize(); err != nil {
|
||||
cclog.Errorf("Parquet retention: db optimization error: %v", err)
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -23,11 +23,20 @@ const (
|
||||
|
||||
// Retention defines the configuration for job retention policies.
|
||||
type Retention struct {
|
||||
Policy string `json:"policy"`
|
||||
Location string `json:"location"`
|
||||
Age int `json:"age"`
|
||||
IncludeDB bool `json:"includeDB"`
|
||||
OmitTagged bool `json:"omitTagged"`
|
||||
Policy string `json:"policy"`
|
||||
Location string `json:"location"`
|
||||
Age int `json:"age"`
|
||||
IncludeDB bool `json:"includeDB"`
|
||||
OmitTagged bool `json:"omitTagged"`
|
||||
TargetKind string `json:"target-kind"`
|
||||
TargetPath string `json:"target-path"`
|
||||
TargetEndpoint string `json:"target-endpoint"`
|
||||
TargetBucket string `json:"target-bucket"`
|
||||
TargetAccessKey string `json:"target-access-key"`
|
||||
TargetSecretKey string `json:"target-secret-key"`
|
||||
TargetRegion string `json:"target-region"`
|
||||
TargetUsePathStyle bool `json:"target-use-path-style"`
|
||||
MaxFileSizeMB int `json:"max-file-size-mb"`
|
||||
}
|
||||
|
||||
// CronFrequency defines the execution intervals for various background workers.
|
||||
@@ -87,6 +96,8 @@ func initArchiveServices(config json.RawMessage) {
|
||||
cfg.Retention.IncludeDB,
|
||||
cfg.Retention.Location,
|
||||
cfg.Retention.OmitTagged)
|
||||
case "parquet":
|
||||
RegisterRetentionParquetService(cfg.Retention)
|
||||
}
|
||||
|
||||
if cfg.Compression > 0 {
|
||||
|
||||
@@ -57,7 +57,7 @@ var configSchema = `
|
||||
"policy": {
|
||||
"description": "Retention policy",
|
||||
"type": "string",
|
||||
"enum": ["none", "delete", "move"]
|
||||
"enum": ["none", "delete", "move", "parquet"]
|
||||
},
|
||||
"include-db": {
|
||||
"description": "Also remove jobs from database",
|
||||
@@ -70,6 +70,43 @@ var configSchema = `
|
||||
"location": {
|
||||
"description": "The target directory for retention. Only applicable for retention move.",
|
||||
"type": "string"
|
||||
},
|
||||
"target-kind": {
|
||||
"description": "Target storage kind for parquet retention: file or s3",
|
||||
"type": "string",
|
||||
"enum": ["file", "s3"]
|
||||
},
|
||||
"target-path": {
|
||||
"description": "Target directory path for parquet file storage",
|
||||
"type": "string"
|
||||
},
|
||||
"target-endpoint": {
|
||||
"description": "S3 endpoint URL for parquet target",
|
||||
"type": "string"
|
||||
},
|
||||
"target-bucket": {
|
||||
"description": "S3 bucket name for parquet target",
|
||||
"type": "string"
|
||||
},
|
||||
"target-access-key": {
|
||||
"description": "S3 access key for parquet target",
|
||||
"type": "string"
|
||||
},
|
||||
"target-secret-key": {
|
||||
"description": "S3 secret key for parquet target",
|
||||
"type": "string"
|
||||
},
|
||||
"target-region": {
|
||||
"description": "S3 region for parquet target",
|
||||
"type": "string"
|
||||
},
|
||||
"target-use-path-style": {
|
||||
"description": "Use path-style S3 URLs for parquet target",
|
||||
"type": "boolean"
|
||||
},
|
||||
"max-file-size-mb": {
|
||||
"description": "Maximum parquet file size in MB before splitting",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": ["policy"]
|
||||
|
||||
116
pkg/archive/parquet/convert.go
Normal file
116
pkg/archive/parquet/convert.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parquet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
)
|
||||
|
||||
// JobToParquetRow converts job metadata and metric data into a flat ParquetJobRow.
|
||||
// Nested fields are marshaled to JSON; metric data is gzip-compressed JSON.
|
||||
func JobToParquetRow(meta *schema.Job, data *schema.JobData) (*ParquetJobRow, error) {
|
||||
resourcesJSON, err := json.Marshal(meta.Resources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal resources: %w", err)
|
||||
}
|
||||
|
||||
var statisticsJSON []byte
|
||||
if meta.Statistics != nil {
|
||||
statisticsJSON, err = json.Marshal(meta.Statistics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal statistics: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var tagsJSON []byte
|
||||
if len(meta.Tags) > 0 {
|
||||
tagsJSON, err = json.Marshal(meta.Tags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal tags: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var metaDataJSON []byte
|
||||
if meta.MetaData != nil {
|
||||
metaDataJSON, err = json.Marshal(meta.MetaData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var footprintJSON []byte
|
||||
if meta.Footprint != nil {
|
||||
footprintJSON, err = json.Marshal(meta.Footprint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal footprint: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var energyFootJSON []byte
|
||||
if meta.EnergyFootprint != nil {
|
||||
energyFootJSON, err = json.Marshal(meta.EnergyFootprint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal energy footprint: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
metricDataGz, err := compressJobData(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("compress metric data: %w", err)
|
||||
}
|
||||
|
||||
return &ParquetJobRow{
|
||||
JobID: meta.JobID,
|
||||
Cluster: meta.Cluster,
|
||||
SubCluster: meta.SubCluster,
|
||||
Partition: meta.Partition,
|
||||
Project: meta.Project,
|
||||
User: meta.User,
|
||||
State: string(meta.State),
|
||||
StartTime: meta.StartTime,
|
||||
Duration: meta.Duration,
|
||||
Walltime: meta.Walltime,
|
||||
NumNodes: meta.NumNodes,
|
||||
NumHWThreads: meta.NumHWThreads,
|
||||
NumAcc: meta.NumAcc,
|
||||
Exclusive: meta.Exclusive,
|
||||
Energy: meta.Energy,
|
||||
SMT: meta.SMT,
|
||||
ResourcesJSON: resourcesJSON,
|
||||
StatisticsJSON: statisticsJSON,
|
||||
TagsJSON: tagsJSON,
|
||||
MetaDataJSON: metaDataJSON,
|
||||
FootprintJSON: footprintJSON,
|
||||
EnergyFootJSON: energyFootJSON,
|
||||
MetricDataGz: metricDataGz,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func compressJobData(data *schema.JobData) ([]byte, error) {
|
||||
jsonBytes, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
gz, err := gzip.NewWriterLevel(&buf, gzip.BestCompression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := gz.Write(jsonBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
32
pkg/archive/parquet/schema.go
Normal file
32
pkg/archive/parquet/schema.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parquet
|
||||
|
||||
type ParquetJobRow struct {
|
||||
JobID int64 `parquet:"job_id"`
|
||||
Cluster string `parquet:"cluster"`
|
||||
SubCluster string `parquet:"sub_cluster"`
|
||||
Partition string `parquet:"partition,optional"`
|
||||
Project string `parquet:"project"`
|
||||
User string `parquet:"user"`
|
||||
State string `parquet:"job_state"`
|
||||
StartTime int64 `parquet:"start_time"`
|
||||
Duration int32 `parquet:"duration"`
|
||||
Walltime int64 `parquet:"walltime"`
|
||||
NumNodes int32 `parquet:"num_nodes"`
|
||||
NumHWThreads int32 `parquet:"num_hwthreads"`
|
||||
NumAcc int32 `parquet:"num_acc"`
|
||||
Exclusive int32 `parquet:"exclusive"`
|
||||
Energy float64 `parquet:"energy"`
|
||||
SMT int32 `parquet:"smt"`
|
||||
ResourcesJSON []byte `parquet:"resources_json"`
|
||||
StatisticsJSON []byte `parquet:"statistics_json,optional"`
|
||||
TagsJSON []byte `parquet:"tags_json,optional"`
|
||||
MetaDataJSON []byte `parquet:"meta_data_json,optional"`
|
||||
FootprintJSON []byte `parquet:"footprint_json,optional"`
|
||||
EnergyFootJSON []byte `parquet:"energy_footprint_json,optional"`
|
||||
MetricDataGz []byte `parquet:"metric_data_gz"`
|
||||
}
|
||||
100
pkg/archive/parquet/target.go
Normal file
100
pkg/archive/parquet/target.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parquet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
// ParquetTarget abstracts the destination for parquet file writes.
|
||||
type ParquetTarget interface {
|
||||
WriteFile(name string, data []byte) error
|
||||
}
|
||||
|
||||
// FileTarget writes parquet files to a local filesystem directory.
|
||||
type FileTarget struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func NewFileTarget(path string) (*FileTarget, error) {
|
||||
if err := os.MkdirAll(path, 0o750); err != nil {
|
||||
return nil, fmt.Errorf("create target directory: %w", err)
|
||||
}
|
||||
return &FileTarget{path: path}, nil
|
||||
}
|
||||
|
||||
func (ft *FileTarget) WriteFile(name string, data []byte) error {
|
||||
return os.WriteFile(filepath.Join(ft.path, name), data, 0o640)
|
||||
}
|
||||
|
||||
// S3TargetConfig holds the configuration for an S3 parquet target.
|
||||
type S3TargetConfig struct {
|
||||
Endpoint string
|
||||
Bucket string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
Region string
|
||||
UsePathStyle bool
|
||||
}
|
||||
|
||||
// S3Target writes parquet files to an S3-compatible object store.
|
||||
type S3Target struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
}
|
||||
|
||||
func NewS3Target(cfg S3TargetConfig) (*S3Target, error) {
|
||||
if cfg.Bucket == "" {
|
||||
return nil, fmt.Errorf("S3 target: empty bucket name")
|
||||
}
|
||||
|
||||
region := cfg.Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(),
|
||||
awsconfig.WithRegion(region),
|
||||
awsconfig.WithCredentialsProvider(
|
||||
credentials.NewStaticCredentialsProvider(cfg.AccessKey, cfg.SecretKey, ""),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("S3 target: load AWS config: %w", err)
|
||||
}
|
||||
|
||||
opts := func(o *s3.Options) {
|
||||
if cfg.Endpoint != "" {
|
||||
o.BaseEndpoint = aws.String(cfg.Endpoint)
|
||||
}
|
||||
o.UsePathStyle = cfg.UsePathStyle
|
||||
}
|
||||
|
||||
client := s3.NewFromConfig(awsCfg, opts)
|
||||
return &S3Target{client: client, bucket: cfg.Bucket}, nil
|
||||
}
|
||||
|
||||
func (st *S3Target) WriteFile(name string, data []byte) error {
|
||||
_, err := st.client.PutObject(context.Background(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(st.bucket),
|
||||
Key: aws.String(name),
|
||||
Body: bytes.NewReader(data),
|
||||
ContentType: aws.String("application/vnd.apache.parquet"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("S3 target: put object %q: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
113
pkg/archive/parquet/writer.go
Normal file
113
pkg/archive/parquet/writer.go
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parquet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
cclog "github.com/ClusterCockpit/cc-lib/v2/ccLogger"
|
||||
pq "github.com/parquet-go/parquet-go"
|
||||
)
|
||||
|
||||
// ParquetWriter batches ParquetJobRows and flushes them to a target
|
||||
// when the estimated size exceeds maxSizeBytes.
|
||||
type ParquetWriter struct {
|
||||
target ParquetTarget
|
||||
maxSizeBytes int64
|
||||
rows []ParquetJobRow
|
||||
currentSize int64
|
||||
fileCounter int
|
||||
datePrefix string
|
||||
}
|
||||
|
||||
// NewParquetWriter creates a new writer that flushes batches to the given target.
|
||||
// maxSizeMB sets the approximate maximum size per parquet file in megabytes.
|
||||
func NewParquetWriter(target ParquetTarget, maxSizeMB int) *ParquetWriter {
|
||||
return &ParquetWriter{
|
||||
target: target,
|
||||
maxSizeBytes: int64(maxSizeMB) * 1024 * 1024,
|
||||
datePrefix: time.Now().Format("2006-01-02"),
|
||||
}
|
||||
}
|
||||
|
||||
// AddJob adds a row to the current batch. If the estimated batch size
|
||||
// exceeds the configured maximum, the batch is flushed to the target first.
|
||||
func (pw *ParquetWriter) AddJob(row ParquetJobRow) error {
|
||||
rowSize := estimateRowSize(&row)
|
||||
|
||||
if pw.currentSize+rowSize > pw.maxSizeBytes && len(pw.rows) > 0 {
|
||||
if err := pw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pw.rows = append(pw.rows, row)
|
||||
pw.currentSize += rowSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush writes the current batch to a parquet file on the target.
|
||||
func (pw *ParquetWriter) Flush() error {
|
||||
if len(pw.rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pw.fileCounter++
|
||||
fileName := fmt.Sprintf("cc-archive-%s-%03d.parquet", pw.datePrefix, pw.fileCounter)
|
||||
|
||||
data, err := writeParquetBytes(pw.rows)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write parquet buffer: %w", err)
|
||||
}
|
||||
|
||||
if err := pw.target.WriteFile(fileName, data); err != nil {
|
||||
return fmt.Errorf("write parquet file %q: %w", fileName, err)
|
||||
}
|
||||
|
||||
cclog.Infof("Parquet retention: wrote %s (%d jobs, %d bytes)", fileName, len(pw.rows), len(data))
|
||||
pw.rows = pw.rows[:0]
|
||||
pw.currentSize = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close flushes any remaining rows and finalizes the writer.
|
||||
func (pw *ParquetWriter) Close() error {
|
||||
return pw.Flush()
|
||||
}
|
||||
|
||||
func writeParquetBytes(rows []ParquetJobRow) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
writer := pq.NewGenericWriter[ParquetJobRow](&buf,
|
||||
pq.Compression(&pq.Snappy),
|
||||
)
|
||||
|
||||
if _, err := writer.Write(rows); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func estimateRowSize(row *ParquetJobRow) int64 {
|
||||
// Fixed fields: ~100 bytes for numeric fields + strings estimate
|
||||
size := int64(200)
|
||||
size += int64(len(row.Cluster) + len(row.SubCluster) + len(row.Partition) +
|
||||
len(row.Project) + len(row.User) + len(row.State))
|
||||
size += int64(len(row.ResourcesJSON))
|
||||
size += int64(len(row.StatisticsJSON))
|
||||
size += int64(len(row.TagsJSON))
|
||||
size += int64(len(row.MetaDataJSON))
|
||||
size += int64(len(row.FootprintJSON))
|
||||
size += int64(len(row.EnergyFootJSON))
|
||||
size += int64(len(row.MetricDataGz))
|
||||
return size
|
||||
}
|
||||
225
pkg/archive/parquet/writer_test.go
Normal file
225
pkg/archive/parquet/writer_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parquet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-lib/v2/schema"
|
||||
pq "github.com/parquet-go/parquet-go"
|
||||
)
|
||||
|
||||
// memTarget collects written files in memory for testing.
|
||||
type memTarget struct {
|
||||
mu sync.Mutex
|
||||
files map[string][]byte
|
||||
}
|
||||
|
||||
func newMemTarget() *memTarget {
|
||||
return &memTarget{files: make(map[string][]byte)}
|
||||
}
|
||||
|
||||
func (m *memTarget) WriteFile(name string, data []byte) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.files[name] = append([]byte(nil), data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeTestJob(jobID int64) (*schema.Job, *schema.JobData) {
|
||||
meta := &schema.Job{
|
||||
JobID: jobID,
|
||||
Cluster: "testcluster",
|
||||
SubCluster: "sc0",
|
||||
Project: "testproject",
|
||||
User: "testuser",
|
||||
State: schema.JobStateCompleted,
|
||||
StartTime: 1700000000,
|
||||
Duration: 3600,
|
||||
Walltime: 7200,
|
||||
NumNodes: 2,
|
||||
NumHWThreads: 16,
|
||||
Exclusive: 1,
|
||||
SMT: 1,
|
||||
Resources: []*schema.Resource{
|
||||
{Hostname: "node001"},
|
||||
{Hostname: "node002"},
|
||||
},
|
||||
}
|
||||
|
||||
data := schema.JobData{
|
||||
"cpu_load": {
|
||||
schema.MetricScopeNode: &schema.JobMetric{
|
||||
Unit: schema.Unit{Base: ""},
|
||||
Timestep: 60,
|
||||
Series: []schema.Series{
|
||||
{
|
||||
Hostname: "node001",
|
||||
Data: []schema.Float{1.0, 2.0, 3.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return meta, &data
|
||||
}
|
||||
|
||||
func TestJobToParquetRowConversion(t *testing.T) {
|
||||
meta, data := makeTestJob(1001)
|
||||
meta.Tags = []*schema.Tag{{Type: "test", Name: "tag1"}}
|
||||
meta.MetaData = map[string]string{"key": "value"}
|
||||
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
t.Fatalf("JobToParquetRow: %v", err)
|
||||
}
|
||||
|
||||
if row.JobID != 1001 {
|
||||
t.Errorf("JobID = %d, want 1001", row.JobID)
|
||||
}
|
||||
if row.Cluster != "testcluster" {
|
||||
t.Errorf("Cluster = %q, want %q", row.Cluster, "testcluster")
|
||||
}
|
||||
if row.User != "testuser" {
|
||||
t.Errorf("User = %q, want %q", row.User, "testuser")
|
||||
}
|
||||
if row.State != "completed" {
|
||||
t.Errorf("State = %q, want %q", row.State, "completed")
|
||||
}
|
||||
if row.NumNodes != 2 {
|
||||
t.Errorf("NumNodes = %d, want 2", row.NumNodes)
|
||||
}
|
||||
|
||||
// Verify resources JSON
|
||||
var resources []*schema.Resource
|
||||
if err := json.Unmarshal(row.ResourcesJSON, &resources); err != nil {
|
||||
t.Fatalf("unmarshal resources: %v", err)
|
||||
}
|
||||
if len(resources) != 2 {
|
||||
t.Errorf("resources len = %d, want 2", len(resources))
|
||||
}
|
||||
|
||||
// Verify tags JSON
|
||||
var tags []*schema.Tag
|
||||
if err := json.Unmarshal(row.TagsJSON, &tags); err != nil {
|
||||
t.Fatalf("unmarshal tags: %v", err)
|
||||
}
|
||||
if len(tags) != 1 || tags[0].Name != "tag1" {
|
||||
t.Errorf("tags = %v, want [{test tag1}]", tags)
|
||||
}
|
||||
|
||||
// Verify metric data is gzip-compressed valid JSON
|
||||
gz, err := gzip.NewReader(bytes.NewReader(row.MetricDataGz))
|
||||
if err != nil {
|
||||
t.Fatalf("gzip reader: %v", err)
|
||||
}
|
||||
decompressed, err := io.ReadAll(gz)
|
||||
if err != nil {
|
||||
t.Fatalf("gzip read: %v", err)
|
||||
}
|
||||
var jobData schema.JobData
|
||||
if err := json.Unmarshal(decompressed, &jobData); err != nil {
|
||||
t.Fatalf("unmarshal metric data: %v", err)
|
||||
}
|
||||
if _, ok := jobData["cpu_load"]; !ok {
|
||||
t.Error("metric data missing cpu_load key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParquetWriterSingleBatch(t *testing.T) {
|
||||
target := newMemTarget()
|
||||
pw := NewParquetWriter(target, 512)
|
||||
|
||||
for i := int64(0); i < 5; i++ {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
t.Fatalf("convert job %d: %v", i, err)
|
||||
}
|
||||
if err := pw.AddJob(*row); err != nil {
|
||||
t.Fatalf("add job %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := pw.Close(); err != nil {
|
||||
t.Fatalf("close: %v", err)
|
||||
}
|
||||
|
||||
if len(target.files) != 1 {
|
||||
t.Fatalf("expected 1 file, got %d", len(target.files))
|
||||
}
|
||||
|
||||
// Verify the parquet file is readable
|
||||
for name, data := range target.files {
|
||||
file := bytes.NewReader(data)
|
||||
pf, err := pq.OpenFile(file, int64(len(data)))
|
||||
if err != nil {
|
||||
t.Fatalf("open parquet %s: %v", name, err)
|
||||
}
|
||||
if pf.NumRows() != 5 {
|
||||
t.Errorf("parquet rows = %d, want 5", pf.NumRows())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParquetWriterBatching(t *testing.T) {
|
||||
target := newMemTarget()
|
||||
// Use a very small max size to force multiple files
|
||||
pw := NewParquetWriter(target, 0) // 0 MB means every job triggers a flush
|
||||
pw.maxSizeBytes = 1 // Force flush after every row
|
||||
|
||||
for i := int64(0); i < 3; i++ {
|
||||
meta, data := makeTestJob(i)
|
||||
row, err := JobToParquetRow(meta, data)
|
||||
if err != nil {
|
||||
t.Fatalf("convert job %d: %v", i, err)
|
||||
}
|
||||
if err := pw.AddJob(*row); err != nil {
|
||||
t.Fatalf("add job %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := pw.Close(); err != nil {
|
||||
t.Fatalf("close: %v", err)
|
||||
}
|
||||
|
||||
// With maxSizeBytes=1, each AddJob should flush the previous batch,
|
||||
// resulting in multiple files
|
||||
if len(target.files) < 2 {
|
||||
t.Errorf("expected multiple files due to batching, got %d", len(target.files))
|
||||
}
|
||||
|
||||
// Verify all files are valid parquet
|
||||
for name, data := range target.files {
|
||||
file := bytes.NewReader(data)
|
||||
_, err := pq.OpenFile(file, int64(len(data)))
|
||||
if err != nil {
|
||||
t.Errorf("invalid parquet file %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTarget(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
ft, err := NewFileTarget(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileTarget: %v", err)
|
||||
}
|
||||
|
||||
testData := []byte("test parquet data")
|
||||
if err := ft.WriteFile("test.parquet", testData); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
|
||||
// Verify file exists and has correct content
|
||||
// (using the target itself is sufficient; we just check no error)
|
||||
}
|
||||
@@ -1,10 +1,39 @@
|
||||
{{template "base.tmpl" .}}
|
||||
{{define "content"}}
|
||||
<div class="row">
|
||||
<div class="col">
|
||||
<div class="alert alert-error" role="alert">
|
||||
404: Not found
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
|
||||
<meta name='viewport' content='width=device-width,initial-scale=1'>
|
||||
<title>{{.Title}}</title>
|
||||
<link rel='icon' type='image/png' href='/favicon.png'>
|
||||
<link rel="stylesheet" href="/bootstrap.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<nav class="navbar navbar-expand-lg navbar-light fixed-top bg-light">
|
||||
<div class="container-fluid">
|
||||
<a class="navbar-brand" href="/">
|
||||
<img style="height: 30px;" alt="ClusterCockpit Logo" src="/img/logo.png" class="d-inline-block align-top">
|
||||
</a>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<main>
|
||||
<section class="content-section">
|
||||
<div class="container">
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-6 text-center">
|
||||
<div class="card mt-5">
|
||||
<div class="card-body p-5">
|
||||
<h1 class="display-1 text-muted">404</h1>
|
||||
<h2 class="mb-3">Page Not Found</h2>
|
||||
<p class="text-muted mb-4">The page you are looking for does not exist or has been moved.</p>
|
||||
<a href="/" class="btn btn-primary">Back to Home</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
14
web/web.go
14
web/web.go
@@ -186,6 +186,16 @@ func ServeFiles() http.Handler {
|
||||
return http.FileServer(http.FS(publicFiles))
|
||||
}
|
||||
|
||||
// StaticFileExists checks whether a static file exists in the embedded frontend FS.
|
||||
func StaticFileExists(path string) bool {
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
_, err := fs.Stat(frontendFiles, "frontend/public/"+path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
//go:embed templates/*
|
||||
var templateFiles embed.FS
|
||||
|
||||
@@ -201,6 +211,10 @@ func init() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if path == "templates/404.tmpl" {
|
||||
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.ParseFS(templateFiles, path))
|
||||
return nil
|
||||
}
|
||||
if path == "templates/login.tmpl" {
|
||||
if util.CheckFileExists("./var/login.tmpl") {
|
||||
cclog.Info("overwrite login.tmpl with local file")
|
||||
|
||||
Reference in New Issue
Block a user