From c1f0d2ed4087e19d8f6156afecb731bfff21fee2 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:03:00 +0100 Subject: [PATCH 01/25] authentication via database and/or ldap --- auth/auth.go | 329 +++++++++++++++++++++++++++++++++++++++++++++++++++ auth/ldap.go | 183 ++++++++++++++++++++++++++++ 2 files changed, 512 insertions(+) create mode 100644 auth/auth.go create mode 100644 auth/ldap.go diff --git a/auth/auth.go b/auth/auth.go new file mode 100644 index 0000000..66fd1e9 --- /dev/null +++ b/auth/auth.go @@ -0,0 +1,329 @@ +package auth + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "database/sql" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/ClusterCockpit/cc-jobarchive/templates" + sq "github.com/Masterminds/squirrel" + "github.com/golang-jwt/jwt/v4" + "github.com/gorilla/sessions" + "github.com/jmoiron/sqlx" + "golang.org/x/crypto/bcrypt" +) + +type User struct { + Username string + Password string + Name string + IsAdmin bool + ViaLdap bool + Email string +} + +type ContextKey string + +const ContextUserKey ContextKey = "user" + +var JwtPublicKey ed25519.PublicKey +var JwtPrivateKey ed25519.PrivateKey + +var sessionStore *sessions.CookieStore + +func Init(db *sqlx.DB, ldapConfig *LdapConfig) error { + _, err := db.Exec(` + CREATE TABLE IF NOT EXISTS user ( + username varchar(255) PRIMARY KEY, + password varchar(255) DEFAULT NULL, + ldap tinyint DEFAULT 0, + name varchar(255) DEFAULT NULL, + roles varchar(255) DEFAULT NULL, + email varchar(255) DEFAULT NULL);`) + if err != nil { + return err + } + + sessKey := os.Getenv("SESSION_KEY") + if sessKey == "" { + log.Println("warning: environment variable 'SESSION_KEY' not set (will use non-persistent random key)") + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return err + } + sessionStore = sessions.NewCookieStore(bytes) + } else { + bytes, err := base64.StdEncoding.DecodeString(sessKey) + if err != nil { + return err + } + sessionStore = sessions.NewCookieStore(bytes) + } + + pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY") + if pubKey == "" || privKey == "" { + log.Println("warning: environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") + } else { + bytes, err := base64.StdEncoding.DecodeString(pubKey) + if err != nil { + return err + } + JwtPublicKey = ed25519.PublicKey(bytes) + bytes, err = base64.StdEncoding.DecodeString(privKey) + if err != nil { + return err + } + JwtPrivateKey = ed25519.PrivateKey(bytes) + } + + if ldapConfig != nil { + if err := initLdap(ldapConfig); err != nil { + return err + } + } + + return nil +} + +// arg must be formated like this: ":[admin]:" +func AddUserToDB(db *sqlx.DB, arg string) error { + parts := strings.SplitN(arg, ":", 3) + if len(parts) != 3 || len(parts[0]) == 0 || len(parts[2]) == 0 || !(len(parts[1]) == 0 || parts[1] == "admin") { + return errors.New("invalid argument format") + } + + password, err := bcrypt.GenerateFromPassword([]byte(parts[2]), bcrypt.DefaultCost) + if err != nil { + return err + } + + roles := "[]" + if parts[1] == "admin" { + roles = "[\"ROLE_ADMIN\"]" + } + + _, err = sq.Insert("user").Columns("username", "password", "roles").Values(parts[0], string(password), roles).RunWith(db).Exec() + if err != nil { + return err + } + log.Printf("new user '%s' added (roles: %s)\n", parts[0], roles) + return nil +} + +func DelUserFromDB(db *sqlx.DB, username string) error { + _, err := db.Exec(`DELETE FROM user WHERE user.username = ?`, username) + return err +} + +func fetchUserFromDB(db *sqlx.DB, username string) (*User, error) { + user := &User{Username: username} + var hashedPassword, name, rawRoles, email sql.NullString + if err := sq.Select("password", "ldap", "name", "roles", "email").From("user"). + Where("user.username = ?", username).RunWith(db). + QueryRow().Scan(&hashedPassword, &user.ViaLdap, &name, &rawRoles, &email); err != nil { + return nil, fmt.Errorf("user '%s' not found (%s)", username, err.Error()) + } + + user.Password = hashedPassword.String + user.Name = name.String + user.Email = email.String + var roles []string + if rawRoles.Valid { + json.Unmarshal([]byte(rawRoles.String), &roles) + } + for _, role := range roles { + if role == "ROLE_ADMIN" { + user.IsAdmin = true + } + } + + return user, nil +} + +// Handle a POST request that should log the user in, +// starting a new session. +func Login(db *sqlx.DB) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + username, password := r.FormValue("username"), r.FormValue("password") + user, err := fetchUserFromDB(db, username) + if err == nil && user.ViaLdap && ldapAuthEnabled { + err = loginViaLdap(user, password) + } else if err == nil && !user.ViaLdap && user.Password != "" { + if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)); e != nil { + err = fmt.Errorf("user '%s' provided the wrong password (%s)", username, e.Error()) + } + } else { + err = errors.New("could not authenticate user") + } + + if err != nil { + log.Printf("login failed: %s\n", err.Error()) + rw.WriteHeader(http.StatusUnauthorized) + templates.Render(rw, r, "login.html", &templates.Page{ + Title: "Login failed", + Login: &templates.LoginPage{ + Error: "Username or password incorrect", + }, + }) + return + } + + session, err := sessionStore.New(r, "session") + if err != nil { + log.Printf("session creation failed: %s\n", err.Error()) + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + session.Values["username"] = user.Username + session.Values["is_admin"] = user.IsAdmin + if err := sessionStore.Save(r, rw, session); err != nil { + log.Printf("session save failed: %s\n", err.Error()) + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + log.Printf("login successfull: user: %#v\n", user) + http.Redirect(rw, r, "/", http.StatusTemporaryRedirect) + }) +} + +var ErrTokenInvalid error = errors.New("invalid token") + +func authViaToken(r *http.Request) (*User, error) { + if JwtPublicKey == nil { + return nil, nil + } + + rawtoken := r.Header.Get("X-Auth-Token") + if rawtoken == "" { + rawtoken = r.Header.Get("Authorization") + prefix := "Bearer " + if !strings.HasPrefix(rawtoken, prefix) { + return nil, nil + } + rawtoken = rawtoken[len(prefix):] + } + + token, err := jwt.Parse(rawtoken, func(t *jwt.Token) (interface{}, error) { + if t.Method != jwt.SigningMethodEdDSA { + return nil, errors.New("only Ed25519/EdDSA supported") + } + return JwtPublicKey, nil + }) + if err != nil { + return nil, ErrTokenInvalid + } + + if err := token.Claims.Valid(); err != nil { + return nil, ErrTokenInvalid + } + + claims := token.Claims.(jwt.MapClaims) + sub, _ := claims["sub"].(string) + isAdmin, _ := claims["is_admin"].(bool) + return &User{ + Username: sub, + IsAdmin: isAdmin, + }, nil +} + +// Authenticate the user and put a User object in the +// context of the request. If authentication fails, +// do not continue but send client to the login screen. +func Auth(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + user, err := authViaToken(r) + if err == ErrTokenInvalid { + log.Printf("authentication failed: invalid token\n") + http.Error(rw, err.Error(), http.StatusUnauthorized) + return + } + if user != nil { + ctx := context.WithValue(r.Context(), ContextUserKey, user) + next.ServeHTTP(rw, r.WithContext(ctx)) + return + } + + session, err := sessionStore.Get(r, "session") + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + if session.IsNew { + log.Printf("authentication failed: no session or jwt found\n") + + rw.WriteHeader(http.StatusUnauthorized) + templates.Render(rw, r, "login.html", &templates.Page{ + Title: "Authentication failed", + Login: &templates.LoginPage{ + Error: "No valid session or JWT provided", + }, + }) + return + } + + ctx := context.WithValue(r.Context(), ContextUserKey, &User{ + Username: session.Values["username"].(string), + IsAdmin: session.Values["is_admin"].(bool), + }) + next.ServeHTTP(rw, r.WithContext(ctx)) + }) +} + +// Generate a new JWT that can be used for authentication +func ProvideJWT(user *User) (string, error) { + if JwtPrivateKey == nil { + return "", errors.New("environment variable 'JWT_PUBLIC_KEY' not set") + } + + tok := jwt.NewWithClaims(jwt.SigningMethodEdDSA, jwt.MapClaims{ + "sub": user.Username, + "is_admin": user.IsAdmin, + }) + + return tok.SignedString(JwtPrivateKey) +} + +func GetUser(ctx context.Context) *User { + x := ctx.Value(ContextUserKey) + if x == nil { + return nil + } + + return x.(*User) +} + +// Clears the session cookie +func Logout(rw http.ResponseWriter, r *http.Request) { + session, err := sessionStore.Get(r, "session") + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + if !session.IsNew { + session.Options.MaxAge = -1 + if err := sessionStore.Save(r, rw, session); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + } + + templates.Render(rw, r, "login.html", &templates.Page{ + Title: "Logout successful", + Login: &templates.LoginPage{ + Info: "Logout successful", + }, + }) +} diff --git a/auth/ldap.go b/auth/ldap.go new file mode 100644 index 0000000..bb401bc --- /dev/null +++ b/auth/ldap.go @@ -0,0 +1,183 @@ +package auth + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "os" + "strings" + "sync" + + "github.com/go-ldap/ldap/v3" + "github.com/jmoiron/sqlx" +) + +type LdapConfig struct { + Url string `json:"url"` + UserBase string `json:"user_base"` + SearchDN string `json:"search_dn"` + UserBind string `json:"user_bind"` + UserFilter string `json:"user_filter"` + TLS bool `json:"tls"` +} + +var ldapAuthEnabled bool = false +var ldapConfig *LdapConfig +var ldapAdminPassword string + +func initLdap(config *LdapConfig) error { + ldapAdminPassword = os.Getenv("LDAP_ADMIN_PASSWORD") + if ldapAdminPassword == "" { + log.Println("warning: environment variable 'LDAP_ADMIN_PASSWORD' not set") + } + + ldapConfig = config + ldapAuthEnabled = true + return nil +} + +var ldapConnectionsLock sync.Mutex +var ldapConnections []*ldap.Conn = []*ldap.Conn{} + +// TODO: Add a connection pool or something like +// that so that connections can be reused/cached. +func getLdapConnection() (*ldap.Conn, error) { + ldapConnectionsLock.Lock() + n := len(ldapConnections) + if n > 0 { + conn := ldapConnections[n-1] + ldapConnections = ldapConnections[:n-1] + ldapConnectionsLock.Unlock() + return conn, nil + } + ldapConnectionsLock.Unlock() + + conn, err := ldap.DialURL(ldapConfig.Url) + if err != nil { + return nil, err + } + + if ldapConfig.TLS { + if err := conn.StartTLS(&tls.Config{InsecureSkipVerify: true}); err != nil { + conn.Close() + return nil, err + } + } + + if err := conn.Bind(ldapConfig.SearchDN, ldapAdminPassword); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func releaseConnection(conn *ldap.Conn) { + // Re-bind to the user we can run queries with + if err := conn.Bind(ldapConfig.SearchDN, ldapAdminPassword); err != nil { + conn.Close() + log.Printf("ldap error: %s", err.Error()) + } + + ldapConnectionsLock.Lock() + defer ldapConnectionsLock.Unlock() + + n := len(ldapConnections) + if n > 2 { + conn.Close() + return + } + + ldapConnections = append(ldapConnections, conn) +} + +func loginViaLdap(user *User, password string) error { + l, err := getLdapConnection() + if err != nil { + return err + } + defer releaseConnection(l) + + userDn := strings.Replace(ldapConfig.UserBind, "{username}", user.Username, -1) + if err := l.Bind(userDn, password); err != nil { + return err + } + + user.ViaLdap = true + return nil +} + +// Delete users where user.ldap is 1 and that do not show up in the ldap search results. +// Add users to the users table that are new in the ldap search results. +func SyncWithLDAP(db *sqlx.DB) error { + if !ldapAuthEnabled { + return errors.New("ldap not enabled") + } + + const IN_DB int = 1 + const IN_LDAP int = 2 + const IN_BOTH int = 3 + + users := map[string]int{} + rows, err := db.Query(`SELECT username FROM user WHERE user.ldap = 1`) + if err != nil { + return err + } + + for rows.Next() { + var username string + if err := rows.Scan(&username); err != nil { + return err + } + + users[username] = IN_DB + } + + l, err := getLdapConnection() + if err != nil { + return err + } + defer releaseConnection(l) + + ldapResults, err := l.Search(ldap.NewSearchRequest( + ldapConfig.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + ldapConfig.UserFilter, []string{"dn", "uid", "gecos"}, nil)) + if err != nil { + return err + } + + newnames := map[string]string{} + for _, entry := range ldapResults.Entries { + username := entry.GetAttributeValue("uid") + if username == "" { + return errors.New("no attribute 'uid'") + } + + _, ok := users[username] + if !ok { + users[username] = IN_LDAP + newnames[username] = entry.GetAttributeValue("gecos") + } else { + users[username] = IN_BOTH + } + } + + for username, where := range users { + if where == IN_DB { + fmt.Printf("ldap-sync: remove '%s' (does not show up in LDAP anymore)\n", username) + if _, err := db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil { + return err + } + } else if where == IN_LDAP { + name := newnames[username] + fmt.Printf("ldap-sync: add '%s' (name: '%s', roles: [], ldap: true)\n", username, name) + if _, err := db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`, + username, 1, name, "[]"); err != nil { + return err + } + } + } + + return nil +} From 84c5cd47f64da7e387272db29fd17bf8e860ac2c Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:08:41 +0100 Subject: [PATCH 02/25] support the new job archive directory structure --- init-db.go | 96 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 36 deletions(-) diff --git a/init-db.go b/init-db.go index 2e7d1ff..ef49145 100644 --- a/init-db.go +++ b/init-db.go @@ -5,6 +5,7 @@ import ( "database/sql" "encoding/json" "fmt" + "log" "os" "path/filepath" "strings" @@ -58,7 +59,7 @@ func initDB(db *sqlx.DB, archive string) error { return err } - entries0, err := os.ReadDir(archive) + clustersDir, err := os.ReadDir(archive) if err != nil { return err } @@ -70,50 +71,73 @@ func initDB(db *sqlx.DB, archive string) error { return err } - var tx *sql.Tx = nil - var i int = 0 + tx, err := db.Begin() + if err != nil { + return err + } + i := 0 tags := make(map[string]int64) - for _, entry0 := range entries0 { - entries1, err := os.ReadDir(filepath.Join(archive, entry0.Name())) - if err != nil { - return err - } - - for _, entry1 := range entries1 { - if !entry1.IsDir() { - continue + handleDirectory := func(filename string) error { + // Bundle 100 inserts into one transaction for better performance: + if i%100 == 0 { + if tx != nil { + if err := tx.Commit(); err != nil { + return err + } } - entries2, err := os.ReadDir(filepath.Join(archive, entry0.Name(), entry1.Name())) + tx, err = db.Begin() if err != nil { return err } - for _, entry2 := range entries2 { - // Bundle 200 inserts into one transaction for better performance: - if i%200 == 0 { - if tx != nil { - if err := tx.Commit(); err != nil { - return err + insertstmt = tx.Stmt(insertstmt) + fmt.Printf("%d jobs inserted...\r", i) + } + + err := loadJob(tx, insertstmt, tags, filename) + if err == nil { + i += 1 + } + + return err + } + + for _, clusterDir := range clustersDir { + lvl1Dirs, err := os.ReadDir(filepath.Join(archive, clusterDir.Name())) + if err != nil { + return err + } + + for _, lvl1Dir := range lvl1Dirs { + if !lvl1Dir.IsDir() { + // Could be the cluster.json file + continue + } + + lvl2Dirs, err := os.ReadDir(filepath.Join(archive, clusterDir.Name(), lvl1Dir.Name())) + if err != nil { + return err + } + + for _, lvl2Dir := range lvl2Dirs { + dirpath := filepath.Join(archive, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name()) + startTimeDirs, err := os.ReadDir(dirpath) + if err != nil { + return err + } + + for _, startTiemDir := range startTimeDirs { + if startTiemDir.Type().IsRegular() && startTiemDir.Name() == "meta.json" { + if err := handleDirectory(dirpath); err != nil { + log.Printf("in %s: %s\n", dirpath, err.Error()) + } + } else if startTiemDir.IsDir() { + if err := handleDirectory(filepath.Join(dirpath, startTiemDir.Name())); err != nil { + log.Printf("in %s: %s\n", filepath.Join(dirpath, startTiemDir.Name()), err.Error()) } } - - tx, err = db.Begin() - if err != nil { - return err - } - - insertstmt = tx.Stmt(insertstmt) - fmt.Printf("%d jobs inserted...\r", i) } - - filename := filepath.Join(archive, entry0.Name(), entry1.Name(), entry2.Name()) - if err = loadJob(tx, insertstmt, tags, filename); err != nil { - fmt.Printf("failed to load '%s': %s", filename, err.Error()) - continue - } - - i += 1 } } } @@ -130,7 +154,7 @@ func initDB(db *sqlx.DB, archive string) error { return err } - fmt.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds()) + log.Printf("A total of %d jobs have been registered in %.3f seconds.\n", i, time.Since(starttime).Seconds()) return nil } From 960b0245b2c93df8464b7a883ef5286012c2b668 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:09:47 +0100 Subject: [PATCH 03/25] templates for the login page --- templates/404.html | 10 +++++++++ templates/base.html | 24 +++++++++++++++++++++ templates/login.html | 48 ++++++++++++++++++++++++++++++++++++++++++ templates/templates.go | 29 +++++++++++++++++++++++++ 4 files changed, 111 insertions(+) create mode 100644 templates/404.html create mode 100644 templates/base.html create mode 100644 templates/login.html create mode 100644 templates/templates.go diff --git a/templates/404.html b/templates/404.html new file mode 100644 index 0000000..c937561 --- /dev/null +++ b/templates/404.html @@ -0,0 +1,10 @@ +{{template "base.html" .}} +{{define "content"}} +
+
+ +
+
+{{end}} diff --git a/templates/base.html b/templates/base.html new file mode 100644 index 0000000..32d6f6b --- /dev/null +++ b/templates/base.html @@ -0,0 +1,24 @@ + + + + + + {{.Title}} + + + + + + + +
+
+
+ {{block "content" .}} + Whoops, you should not see this... + {{end}} +
+
+
+ + \ No newline at end of file diff --git a/templates/login.html b/templates/login.html new file mode 100644 index 0000000..35776a4 --- /dev/null +++ b/templates/login.html @@ -0,0 +1,48 @@ +{{template "base.html" .}} +{{define "content"}} +
+
+

+ ClusterCockpit Login +

+
+
+
+
+ {{if .Login.Error}} + + {{end}} + + {{if .Login.Info}} + + {{end}} +
+
+
+
+
+
+ + +
+
+ + +
+ +
+
+
+
+
+
+
+ +
+
+
+{{end}} diff --git a/templates/templates.go b/templates/templates.go new file mode 100644 index 0000000..c1c7c07 --- /dev/null +++ b/templates/templates.go @@ -0,0 +1,29 @@ +package templates + +import ( + "html/template" + "log" + "net/http" +) + +var templates *template.Template + +type Page struct { + Title string + Login *LoginPage +} + +type LoginPage struct { + Error string + Info string +} + +func init() { + templates = template.Must(template.ParseGlob("./templates/*.html")) +} + +func Render(rw http.ResponseWriter, r *http.Request, name string, page *Page) { + if err := templates.ExecuteTemplate(rw, name, page); err != nil { + log.Printf("template error: %s\n", err.Error()) + } +} From 4ca0cba7cd2ceb6d557299f2cb389b0aaeac715f Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:12:19 +0100 Subject: [PATCH 04/25] individual configurations per user --- config/config.go | 134 ++++++++++++++++++++++++++++++-------- graph/model/models.go | 17 +++++ graph/model/models_gen.go | 13 ---- graph/resolver.go | 24 ++++++- graph/schema.resolvers.go | 6 +- graph/stats.go | 14 ++-- var/ui.config.json | 1 - 7 files changed, 160 insertions(+), 49 deletions(-) delete mode 100644 var/ui.config.json diff --git a/config/config.go b/config/config.go index c49a401..502ac67 100644 --- a/config/config.go +++ b/config/config.go @@ -3,63 +3,143 @@ package config import ( "context" "encoding/json" + "fmt" "log" "net/http" "os" + "path/filepath" "sync" + "time" + "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/graph/model" + "github.com/jmoiron/sqlx" ) +var db *sqlx.DB var lock sync.RWMutex -var config map[string]interface{} +var uiDefaults map[string]interface{} var Clusters []*model.Cluster -const configFilePath string = "./var/ui.config.json" - -func init() { - lock.Lock() - defer lock.Unlock() - - bytes, err := os.ReadFile(configFilePath) +func Init(usersdb *sqlx.DB, authEnabled bool, uiConfig map[string]interface{}, jobArchive string) error { + db = usersdb + uiDefaults = uiConfig + entries, err := os.ReadDir(jobArchive) if err != nil { - log.Fatal(err) + return err } - if err := json.Unmarshal(bytes, &config); err != nil { - log.Fatal(err) + Clusters = []*model.Cluster{} + for _, de := range entries { + bytes, err := os.ReadFile(filepath.Join(jobArchive, de.Name(), "cluster.json")) + if err != nil { + return err + } + + var cluster model.Cluster + if err := json.Unmarshal(bytes, &cluster); err != nil { + return err + } + + if cluster.FilterRanges.StartTime.To.IsZero() { + cluster.FilterRanges.StartTime.To = time.Unix(0, 0) + } + + if cluster.ClusterID != de.Name() { + return fmt.Errorf("the file '%s/cluster.json' contains the clusterId '%s'", de.Name(), cluster.ClusterID) + } + + Clusters = append(Clusters, &cluster) } + + if authEnabled { + _, err := db.Exec(` + CREATE TABLE IF NOT EXISTS configuration ( + username varchar(255), + key varchar(255), + value varchar(255), + PRIMARY KEY (username, key), + FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`) + if err != nil { + return err + } + } + + return nil } -// Call this function to change the current configuration. -// `value` must be valid JSON. This This function is thread-safe. -func UpdateConfig(key, value string, ctx context.Context) error { - var v interface{} - if err := json.Unmarshal([]byte(value), &v); err != nil { - return err +// Return the personalised UI config for the currently authenticated +// user or return the plain default config. +func GetUIConfig(r *http.Request) (map[string]interface{}, error) { + lock.RLock() + config := make(map[string]interface{}, len(uiDefaults)) + for k, v := range uiDefaults { + config[k] = v + } + lock.RUnlock() + + user := auth.GetUser(r.Context()) + if user == nil { + return config, nil } - lock.Lock() - defer lock.Unlock() - - config[key] = v - bytes, err := json.Marshal(config) + rows, err := db.Query(`SELECT key, value FROM configuration WHERE configuration.username = ?`, user.Username) if err != nil { - return err + return nil, err } - if err := os.WriteFile(configFilePath, bytes, 0644); err != nil { + for rows.Next() { + var key, rawval string + if err := rows.Scan(&key, &rawval); err != nil { + return nil, err + } + + var val interface{} + if err := json.Unmarshal([]byte(rawval), &val); err != nil { + return nil, err + } + + config[key] = val + } + + return config, nil +} + +// If the context does not have a user, update the global ui configuration without persisting it! +// If there is a (authenticated) user, update only his configuration. +func UpdateConfig(key, value string, ctx context.Context) error { + user := auth.GetUser(ctx) + if user == nil { + lock.RLock() + defer lock.RUnlock() + + var val interface{} + if err := json.Unmarshal([]byte(value), &val); err != nil { + return err + } + + uiDefaults[key] = val + return nil + } + + if _, err := db.Exec(`REPLACE INTO configuration (username, key, value) VALUES (?, ?, ?)`, + user.Username, key, value); err != nil { + log.Printf("db.Exec: %s\n", err.Error()) return err } return nil } -// http.HandlerFunc compatible function that serves the current configuration as JSON +// http.HandlerFunc compatible function that serves the current configuration as JSON. +// TODO: Use templates and stuff instead of this... func ServeConfig(rw http.ResponseWriter, r *http.Request) { - lock.RLock() - defer lock.RUnlock() + config, err := GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } rw.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(rw).Encode(config); err != nil { diff --git a/graph/model/models.go b/graph/model/models.go index 31930a2..0096801 100644 --- a/graph/model/models.go +++ b/graph/model/models.go @@ -7,3 +7,20 @@ type JobTag struct { TagType string `json:"tagType" db:"tag_type"` TagName string `json:"tagName" db:"tag_name"` } + +type Cluster struct { + ClusterID string `json:"clusterID"` + ProcessorType string `json:"processorType"` + SocketsPerNode int `json:"socketsPerNode"` + CoresPerSocket int `json:"coresPerSocket"` + ThreadsPerCore int `json:"threadsPerCore"` + FlopRateScalar int `json:"flopRateScalar"` + FlopRateSimd int `json:"flopRateSimd"` + MemoryBandwidth int `json:"memoryBandwidth"` + MetricConfig []*MetricConfig `json:"metricConfig"` + FilterRanges *FilterRanges `json:"filterRanges"` + MetricDataRepository *struct { + Kind string `json:"kind"` + Url string `json:"url"` + } `json:"metricDataRepository"` +} diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index 68562b5..71e9104 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -11,19 +11,6 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/schema" ) -type Cluster struct { - ClusterID string `json:"clusterID"` - ProcessorType string `json:"processorType"` - SocketsPerNode int `json:"socketsPerNode"` - CoresPerSocket int `json:"coresPerSocket"` - ThreadsPerCore int `json:"threadsPerCore"` - FlopRateScalar int `json:"flopRateScalar"` - FlopRateSimd int `json:"flopRateSimd"` - MemoryBandwidth int `json:"memoryBandwidth"` - MetricConfig []*MetricConfig `json:"metricConfig"` - FilterRanges *FilterRanges `json:"filterRanges"` -} - type FilterRanges struct { Duration *IntRangeOutput `json:"duration"` NumNodes *IntRangeOutput `json:"numNodes"` diff --git a/graph/resolver.go b/graph/resolver.go index 4f44d39..badf516 100644 --- a/graph/resolver.go +++ b/graph/resolver.go @@ -1,11 +1,14 @@ package graph import ( + "context" "errors" "fmt" "regexp" "strings" + "time" + "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/graph/model" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" @@ -37,13 +40,18 @@ func ScanJob(row Scannable) (*model.Job, error) { return nil, err } + if job.Duration == 0 && job.State == model.JobStateRunning { + job.Duration = int(time.Since(job.StartTime).Seconds()) + } + job.Nodes = strings.Split(nodeList, ",") return job, nil } // Helper function for the `jobs` GraphQL-Query. Is also used elsewhere when a list of jobs is needed. -func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) { +func (r *Resolver) queryJobs(ctx context.Context, filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) { query := sq.Select(JobTableCols...).From("job") + query = securityCheck(ctx, query) if order != nil { field := toSnakeCase(order.Field) @@ -100,6 +108,20 @@ func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest return jobs, count, nil } +func securityCheck(ctx context.Context, query sq.SelectBuilder) sq.SelectBuilder { + val := ctx.Value(auth.ContextUserKey) + if val == nil { + return query + } + + user := val.(*auth.User) + if user.IsAdmin { + return query + } + + return query.Where("job.user_id = ?", user.Username) +} + // Build a sq.SelectBuilder out of a model.JobFilter. func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index 63d757b..c7e4699 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -151,7 +151,9 @@ func (r *queryResolver) Tags(ctx context.Context) ([]*model.JobTag, error) { } func (r *queryResolver) Job(ctx context.Context, id string) (*model.Job, error) { - return ScanJob(sq.Select(JobTableCols...).From("job").Where("job.id = ?", id).RunWith(r.DB).QueryRow()) + query := sq.Select(JobTableCols...).From("job").Where("job.id = ?", id) + query = securityCheck(ctx, query) + return ScanJob(query.RunWith(r.DB).QueryRow()) } func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) { @@ -181,7 +183,7 @@ func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobF } func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) { - jobs, count, err := r.queryJobs(filter, page, order) + jobs, count, err := r.queryJobs(ctx, filter, page, order) if err != nil { return nil, err } diff --git a/graph/stats.go b/graph/stats.go index e885b0d..fa0d66e 100644 --- a/graph/stats.go +++ b/graph/stats.go @@ -47,6 +47,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF ).From("job").Where("job.cluster_id = ?", cluster.ClusterID).GroupBy(col) } + query = securityCheck(ctx, query) for _, f := range filter { query = buildWhereClause(f, query) } @@ -82,6 +83,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF if groupBy == nil { query := sq.Select("COUNT(job.id)").From("job").Where("job.duration < 120") + query = securityCheck(ctx, query) for _, f := range filter { query = buildWhereClause(f, query) } @@ -91,6 +93,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF } else { col := groupBy2column[*groupBy] query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < 120") + query = securityCheck(ctx, query) for _, f := range filter { query = buildWhereClause(f, query) } @@ -133,12 +136,12 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF if histogramsNeeded { var err error - stat.HistWalltime, err = r.jobsStatisticsHistogram("ROUND(job.duration / 3600) as value", filter, id, col) + stat.HistWalltime, err = r.jobsStatisticsHistogram(ctx, "ROUND(job.duration / 3600) as value", filter, id, col) if err != nil { return nil, err } - stat.HistNumNodes, err = r.jobsStatisticsHistogram("job.num_nodes as value", filter, id, col) + stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col) if err != nil { return nil, err } @@ -150,8 +153,9 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF // `value` must be the column grouped by, but renamed to "value". `id` and `col` can optionally be used // to add a condition to the query of the kind " = ". -func (r *queryResolver) jobsStatisticsHistogram(value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) { +func (r *queryResolver) jobsStatisticsHistogram(ctx context.Context, value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) { query := sq.Select(value, "COUNT(job.id) AS count").From("job") + query = securityCheck(ctx, query) for _, f := range filters { query = buildWhereClause(f, query) } @@ -179,7 +183,7 @@ func (r *queryResolver) jobsStatisticsHistogram(value string, filters []*model.J // Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full. func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) { - jobs, count, err := r.queryJobs(filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) + jobs, count, err := r.queryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) if err != nil { return nil, err } @@ -232,7 +236,7 @@ func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilte // Helper function for the jobsFootprints GraphQL query placed here so that schema.resolvers.go is not too full. func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) { - jobs, count, err := r.queryJobs(filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) + jobs, count, err := r.queryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) if err != nil { return nil, err } diff --git a/var/ui.config.json b/var/ui.config.json deleted file mode 100644 index f06828d..0000000 --- a/var/ui.config.json +++ /dev/null @@ -1 +0,0 @@ -{"analysis_view_histogramMetrics":["flops_any","mem_bw","mem_used"],"analysis_view_scatterPlotMetrics":[["flops_any","mem_bw"],["flops_any","cpu_load"],["cpu_load","mem_bw"]],"job_view_nodestats_selectedMetrics":["flops_any","mem_bw","mem_used"],"job_view_polarPlotMetrics":["flops_any","mem_bw","mem_used","net_bw","file_bw"],"job_view_selectedMetrics":["flops_any","mem_bw","mem_used"],"plot_general_colorBackground":true,"plot_general_colorscheme":["#00bfff","#0000ff","#ff00ff","#ff0000","#ff8000","#ffff00","#80ff00"],"plot_general_lineWidth":1,"plot_list_jobsPerPage":10,"plot_list_selectedMetrics":["cpu_load","mem_used","flops_any","mem_bw","clock"],"plot_view_plotsPerRow":4,"plot_view_showPolarplot":true,"plot_view_showRoofline":true,"plot_view_showStatTable":true} \ No newline at end of file From 34317e0e644ce64e1a07e595d236f8d356b3f3d2 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:14:45 +0100 Subject: [PATCH 05/25] Add InfluxDBv2 as metric data repo --- .env | 11 +++ metricdata/archive.go | 2 - metricdata/cc-metric-store.go | 8 +- metricdata/influxdb-v2.go | 143 ++++++++++++++++++++++++++++++++++ metricdata/metricdata.go | 44 +++++++++-- 5 files changed, 195 insertions(+), 13 deletions(-) create mode 100644 .env create mode 100644 metricdata/influxdb-v2.go diff --git a/.env b/.env new file mode 100644 index 0000000..48076cd --- /dev/null +++ b/.env @@ -0,0 +1,11 @@ + +export CCMETRICSTORE_URL="http://localhost:8081" +export CCMETRICSTORE_JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw" + +export INFLUXDB_V2_TOKEN="egLfcf7fx0FESqFYU3RpAAbj" + +export JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0=" +export JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q==" +export SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629" + +export LDAP_ADMIN_PASSWORD="mashup" diff --git a/metricdata/archive.go b/metricdata/archive.go index d4c9ab5..1a46290 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -18,8 +18,6 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/schema" ) -var JobArchivePath string = "./var/job-archive" - // For a given job, return the path of the `data.json`/`meta.json` file. // TODO: Implement Issue ClusterCockpit/ClusterCockpit#97 func getPath(job *model.Job, file string) (string, error) { diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 0c94bc7..b65d284 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -46,11 +46,11 @@ type ApiStatsData struct { Max schema.Float `json:"max"` } -func (ccms *CCMetricStore) Init() error { - ccms.url = os.Getenv("CCMETRICSTORE_URL") +func (ccms *CCMetricStore) Init(url string) error { + ccms.url = url // os.Getenv("CCMETRICSTORE_URL") ccms.jwt = os.Getenv("CCMETRICSTORE_JWT") - if ccms.url == "" || ccms.jwt == "" { - return errors.New("environment variables 'CCMETRICSTORE_URL' or 'CCMETRICSTORE_JWT' not set") + if ccms.jwt == "" { + return errors.New("environment variable 'CCMETRICSTORE_JWT' not set") } return nil diff --git a/metricdata/influxdb-v2.go b/metricdata/influxdb-v2.go new file mode 100644 index 0000000..0089d9b --- /dev/null +++ b/metricdata/influxdb-v2.go @@ -0,0 +1,143 @@ +package metricdata + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/ClusterCockpit/cc-jobarchive/config" + "github.com/ClusterCockpit/cc-jobarchive/graph/model" + "github.com/ClusterCockpit/cc-jobarchive/schema" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api" +) + +type InfluxDBv2DataRepository struct { + client influxdb2.Client + queryClient influxdb2Api.QueryAPI + bucket, measurement string +} + +func (idb *InfluxDBv2DataRepository) Init(url string) error { + token := os.Getenv("INFLUXDB_V2_TOKEN") + if token == "" { + return errors.New("warning: environment variable 'INFLUXDB_V2_TOKEN' not set") + } + + idb.client = influxdb2.NewClient(url, token) + idb.queryClient = idb.client.QueryAPI("ClusterCockpit") + idb.bucket = "ClusterCockpit/data" + idb.measurement = "data" + return nil +} + +func (idb *InfluxDBv2DataRepository) formatTime(t time.Time) string { + return fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02dZ", + t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { + fieldsConds := make([]string, 0, len(metrics)) + for _, m := range metrics { + fieldsConds = append(fieldsConds, fmt.Sprintf(`r._field == "%s"`, m)) + } + fieldsCond := strings.Join(fieldsConds, " or ") + + hostsConds := make([]string, 0, len(job.Nodes)) + for _, h := range job.Nodes { + hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h)) + } + hostsCond := strings.Join(hostsConds, " or ") + + query := fmt.Sprintf(`from(bucket: "%s") + |> range(start: %s, stop: %s) + |> filter(fn: (r) => r._measurement == "%s" and (%s) and (%s)) + |> drop(columns: ["_start", "_stop", "_measurement"])`, idb.bucket, + idb.formatTime(job.StartTime), idb.formatTime(job.StartTime.Add(time.Duration(job.Duration)).Add(1*time.Second)), + idb.measurement, hostsCond, fieldsCond) + rows, err := idb.queryClient.Query(ctx, query) + if err != nil { + return nil, err + } + + jobData := make(schema.JobData) + + var currentSeries *schema.MetricSeries = nil + for rows.Next() { + row := rows.Record() + if currentSeries == nil || rows.TableChanged() { + field, host := row.Field(), row.ValueByKey("host").(string) + jobMetric, ok := jobData[field] + if !ok { + mc := config.GetMetricConfig(job.ClusterID, field) + jobMetric = &schema.JobMetric{ + Scope: "node", // TODO: FIXME: Whatever... + Unit: mc.Unit, + Timestep: mc.Sampletime, + Series: make([]*schema.MetricSeries, 0, len(job.Nodes)), + } + jobData[field] = jobMetric + } + + currentSeries = &schema.MetricSeries{ + NodeID: host, + Statistics: nil, + Data: make([]schema.Float, 0), + } + jobMetric.Series = append(jobMetric.Series, currentSeries) + } + + val := row.Value().(float64) + currentSeries.Data = append(currentSeries.Data, schema.Float(val)) + } + + return jobData, idb.addStats(job, jobData, metrics, hostsCond, ctx) +} + +func (idb *InfluxDBv2DataRepository) addStats(job *model.Job, jobData schema.JobData, metrics []string, hostsCond string, ctx context.Context) error { + for _, metric := range metrics { + query := fmt.Sprintf(` + data = from(bucket: "%s") + |> range(start: %s, stop: %s) + |> filter(fn: (r) => r._measurement == "%s" and r._field == "%s" and (%s)) + + union(tables: [ + data |> mean(column: "_value") |> set(key: "_field", value: "avg") + data |> min(column: "_value") |> set(key: "_field", value: "min") + data |> max(column: "_value") |> set(key: "_field", value: "max") + ]) + |> pivot(rowKey: ["host"], columnKey: ["_field"], valueColumn: "_value") + |> group()`, idb.bucket, + idb.formatTime(job.StartTime), idb.formatTime(job.StartTime.Add(time.Duration(job.Duration)).Add(1*time.Second)), + idb.measurement, metric, hostsCond) + rows, err := idb.queryClient.Query(ctx, query) + if err != nil { + return err + } + + jobMetric := jobData[metric] + for rows.Next() { + row := rows.Record() + host := row.ValueByKey("host").(string) + avg, min, max := row.ValueByKey("avg").(float64), + row.ValueByKey("min").(float64), + row.ValueByKey("max").(float64) + + for _, s := range jobMetric.Series { + if s.NodeID == host { + s.Statistics = &schema.MetricStatistics{ + Avg: avg, + Min: min, + Max: max, + } + break + } + } + } + } + + return nil +} diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index 727fd86..9118f7e 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -4,25 +4,55 @@ import ( "context" "errors" "fmt" - "log" + "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/schema" ) -var runningJobs *CCMetricStore +type MetricDataRepository interface { + Init(url string) error + LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) +} -func init() { - runningJobs = &CCMetricStore{} - if err := runningJobs.Init(); err != nil { - log.Fatalln(err) +var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} + +var JobArchivePath string + +func Init(jobArchivePath string) error { + JobArchivePath = jobArchivePath + for _, cluster := range config.Clusters { + if cluster.MetricDataRepository != nil { + switch cluster.MetricDataRepository.Kind { + case "cc-metric-store": + ccms := &CCMetricStore{} + if err := ccms.Init(cluster.MetricDataRepository.Url); err != nil { + return err + } + metricDataRepos[cluster.ClusterID] = ccms + case "influxdb-v2": + idb := &InfluxDBv2DataRepository{} + if err := idb.Init(cluster.MetricDataRepository.Url); err != nil { + return err + } + metricDataRepos[cluster.ClusterID] = idb + default: + return fmt.Errorf("unkown metric data repository '%s' for cluster '%s'", cluster.MetricDataRepository.Kind, cluster.ClusterID) + } + } } + return nil } // Fetches the metric data for a job. func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { if job.State == model.JobStateRunning { - return runningJobs.LoadData(job, metrics, ctx) + repo, ok := metricDataRepos[job.ClusterID] + if !ok { + return nil, fmt.Errorf("no metric data repository configured for '%s'", job.ClusterID) + } + + return repo.LoadData(job, metrics, ctx) } if job.State != model.JobStateCompleted { From bc8ad593fd867cb337c251f2436ff9c8e0760340 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 10:15:25 +0100 Subject: [PATCH 06/25] update main; make REST API compatible to ClusterCockpit --- go.mod | 7 +- go.sum | 77 ++++++++++++++++++ rest-api.go | 81 ++++++++++++------- server.go | 220 ++++++++++++++++++++++++++++++++++++---------------- 4 files changed, 287 insertions(+), 98 deletions(-) diff --git a/go.mod b/go.mod index c78cf88..e2740b7 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,14 @@ go 1.15 require ( github.com/99designs/gqlgen v0.13.0 github.com/Masterminds/squirrel v1.5.1 + github.com/go-ldap/ldap/v3 v3.4.1 + github.com/golang-jwt/jwt/v4 v4.1.0 github.com/gorilla/handlers v1.5.1 - github.com/gorilla/mux v1.6.1 + github.com/gorilla/mux v1.8.0 + github.com/gorilla/sessions v1.2.1 + github.com/influxdata/influxdb-client-go/v2 v2.6.0 github.com/jmoiron/sqlx v1.3.1 github.com/mattn/go-sqlite3 v1.14.6 github.com/vektah/gqlparser/v2 v2.1.0 + golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 ) diff --git a/go.sum b/go.sum index 113a108..41d6210 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ github.com/99designs/gqlgen v0.13.0 h1:haLTcUp3Vwp80xMVEg5KRNwzfUrgFdRmtBY8fuB8scA= github.com/99designs/gqlgen v0.13.0/go.mod h1:NV130r6f4tpRWuAI+zsrSdooO/eWUv+Gyyoi3rEfXIk= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Masterminds/squirrel v1.5.1 h1:kWAKlLLJFxZG7N2E0mBMNWVp5AuUX+JUrnhFN74Eg+w= github.com/Masterminds/squirrel v1.5.1/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= @@ -12,27 +14,53 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/trifles v0.0.0-20190318185328-a8d75aae118c h1:TUuUh0Xgj97tLMNtWtNvI9mIV6isjEb9lBMNv+77IGM= github.com/dgryski/trifles v0.0.0-20190318185328-a8d75aae118c/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f h1:9oNbS1z4rVpbnkHBdPZU4jo9bSmrLpII768arSyMFgk= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.1 h1:KOwqsTYZdeuMacU7CxjMNYEKeBvLbxW+psodrbcEa3A= github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/influxdata/influxdb-client-go/v2 v2.6.0 h1:bIOaGTgvvv1Na2hG+nIvqyv7PK2UiU2WrJN1ck1ykyM= +github.com/influxdata/influxdb-client-go/v2 v2.6.0/go.mod h1:Y/0W1+TZir7ypoQZYd2IrnVOKB3Tq6oegAQeSVN/+EU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -40,6 +68,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -47,9 +77,16 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -59,6 +96,8 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -73,35 +112,73 @@ github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJV github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= github.com/vektah/gqlparser/v2 v2.1.0 h1:uiKJ+T5HMGGQM2kRKQ8Pxw8+Zq9qhhZhz/lieYvCMns= github.com/vektah/gqlparser/v2 v2.1.0/go.mod h1:SyUiHgLATUR8BiYURfTirrTcGpcE+4XkV2se04Px1Ms= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589 h1:rjUrONFu4kLchcZTfp3/96bR8bW8dIa8uz3cR5n0cgM= golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= sourcegraph.com/sourcegraph/appdash v0.0.0-20180110180208-2cc67fd64755/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/rest-api.go b/rest-api.go index b9caff6..1d02aad 100644 --- a/rest-api.go +++ b/rest-api.go @@ -12,33 +12,40 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/metricdata" sq "github.com/Masterminds/squirrel" + "github.com/gorilla/mux" ) -type StartJobRequestBody struct { - JobId string `json:"job_id"` - UserId string `json:"user_id"` - ProjectId string `json:"project_id"` - ClusterId string `json:"cluster_id"` - StartTime int64 `json:"start_time"` +type StartJobApiRequest struct { + JobId int64 `json:"jobId"` + UserId string `json:"userId"` + ClusterId string `json:"clusterId"` + StartTime int64 `json:"startTime"` + MetaData string `json:"metaData"` + ProjectId string `json:"projectId"` Nodes []string `json:"nodes"` - Metadata string `json:"metadata"` + NodeList string `json:"nodeList"` } -type StartJobResponeBody struct { - DBID int64 `json:"db_id"` +type StartJobApiRespone struct { + DBID int64 `json:"id"` } -type StopJobRequestBody struct { - DBID *int64 `json:"db_id"` - JobId string `json:"job_id"` - ClusterId string `json:"cluster_id"` - StartTime int64 `json:"start_time"` +type StopJobApiRequest struct { + // JobId, ClusterId and StartTime are optional. + // They are only used if no database id was provided. + JobId *string `json:"jobId"` + ClusterId *string `json:"clusterId"` + StartTime *int64 `json:"startTime"` - StopTime int64 `json:"stop_time"` + StopTime int64 `json:"stopTime"` +} + +type StopJobApiRespone struct { + DBID string `json:"id"` } func startJob(rw http.ResponseWriter, r *http.Request) { - req := StartJobRequestBody{} + req := StartJobApiRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(rw, err.Error(), http.StatusBadRequest) return @@ -49,9 +56,20 @@ func startJob(rw http.ResponseWriter, r *http.Request) { return } + if req.Nodes == nil { + req.Nodes = strings.Split(req.NodeList, "|") + if len(req.Nodes) == 1 { + req.Nodes = strings.Split(req.NodeList, ",") + } + } + if len(req.Nodes) == 0 || len(req.Nodes[0]) == 0 || len(req.UserId) == 0 { + http.Error(rw, "required fields are missing", http.StatusBadRequest) + return + } + res, err := db.Exec( - `INSERT INTO job (job_id, user_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);`, - req.JobId, req.UserId, req.ClusterId, req.StartTime, 0, model.JobStateRunning, len(req.Nodes), strings.Join(req.Nodes, ","), req.Metadata) + `INSERT INTO job (job_id, user_id, project_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`, + req.JobId, req.UserId, req.ProjectId, req.ClusterId, req.StartTime, 0, model.JobStateRunning, len(req.Nodes), strings.Join(req.Nodes, ","), req.MetaData) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -63,16 +81,16 @@ func startJob(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("New job started (db-id=%d)\n", id) + log.Printf("new job (id: %d): clusterId=%s, jobId=%d, userId=%s, startTime=%d, nodes=%v\n", id, req.ClusterId, req.JobId, req.UserId, req.StartTime, req.Nodes) rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - json.NewEncoder(rw).Encode(StartJobResponeBody{ + rw.WriteHeader(http.StatusCreated) + json.NewEncoder(rw).Encode(StartJobApiRespone{ DBID: id, }) } func stopJob(rw http.ResponseWriter, r *http.Request) { - req := StopJobRequestBody{} + req := StopJobApiRequest{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(rw, err.Error(), http.StatusBadRequest) return @@ -80,8 +98,9 @@ func stopJob(rw http.ResponseWriter, r *http.Request) { var err error var job *model.Job - if req.DBID != nil { - job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job").Where("job.id = ?", req.DBID).RunWith(db).QueryRow()) + id, ok := mux.Vars(r)["id"] + if ok { + job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job").Where("job.id = ?", id).RunWith(db).QueryRow()) } else { job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job"). Where("job.job_id = ?", req.JobId). @@ -99,17 +118,23 @@ func stopJob(rw http.ResponseWriter, r *http.Request) { return } - job.Duration = int(job.StartTime.Unix() - req.StopTime) + job.Duration = int(req.StopTime - job.StartTime.Unix()) + if err := metricdata.ArchiveJob(job, r.Context()); err != nil { + log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return } - if _, err := db.Exec(`UPDATE job SET job.duration = ?, job.job_state = ? WHERE job.id = ?;`, - job.Duration, model.JobStateCompleted, job.ID); err != nil { + if _, err := db.Exec(`UPDATE job SET job_state = ? WHERE job.id = ?`, model.JobStateCompleted, job.ID); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } - rw.WriteHeader(http.StatusOK) + log.Printf("job stoped and archived (id: %s): clusterId=%s, jobId=%s, userId=%s, startTime=%s, nodes=%v\n", job.ID, job.ClusterID, job.JobID, job.UserID, job.StartTime, job.Nodes) + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusCreated) + json.NewEncoder(rw).Encode(StopJobApiRespone{ + DBID: job.ID, + }) } diff --git a/server.go b/server.go index 1859918..58e3d02 100644 --- a/server.go +++ b/server.go @@ -6,16 +6,15 @@ import ( "log" "net/http" "os" - "path/filepath" - "time" "github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/playground" + "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph" "github.com/ClusterCockpit/cc-jobarchive/graph/generated" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/metricdata" + "github.com/ClusterCockpit/cc-jobarchive/templates" "github.com/gorilla/handlers" "github.com/gorilla/mux" "github.com/jmoiron/sqlx" @@ -24,86 +23,169 @@ import ( var db *sqlx.DB -func main() { - var reinitDB bool - var port, staticFiles, jobDBFile string +type ProgramConfig struct { + Addr string `json:"addr"` + DisableAuthentication bool `json:"disable-authentication"` + StaticFiles string `json:"static-files"` + DB string `json:"db"` + JobArchive string `json:"job-archive"` + LdapConfig *auth.LdapConfig `json:"ldap"` + HttpsCertFile string `json:"https-cert-file"` + HttpsKeyFile string `json:"https-key-file"` + UiDefaults map[string]interface{} `json:"ui-defaults"` +} - flag.StringVar(&port, "port", "8080", "Port on which to listen") - flag.StringVar(&staticFiles, "static-files", "./frontend/public", "Directory who's contents shall be served as static files") - flag.StringVar(&jobDBFile, "job-db", "./var/job.db", "SQLite 3 Jobs Database File") - flag.BoolVar(&reinitDB, "init-db", false, "Initialize new SQLite Database") +var programConfig ProgramConfig = ProgramConfig{ + Addr: "0.0.0.0:8080", + DisableAuthentication: false, + StaticFiles: "./frontend/public", + DB: "./var/job.db", + JobArchive: "./var/job-archive", + LdapConfig: &auth.LdapConfig{ + Url: "ldap://localhost", + UserBase: "ou=hpc,dc=rrze,dc=uni-erlangen,dc=de", + SearchDN: "cn=admin,dc=rrze,dc=uni-erlangen,dc=de", + UserBind: "uid={username},ou=hpc,dc=rrze,dc=uni-erlangen,dc=de", + UserFilter: "(&(objectclass=posixAccount)(uid=*))", + }, + HttpsCertFile: "", + HttpsKeyFile: "", + UiDefaults: map[string]interface{}{ + "analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}}, + "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used", "net_bw", "file_bw"}, + "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "plot_general_colorBackground": true, + "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, + "plot_general_lineWidth": 1, + "plot_list_jobsPerPage": 10, + "plot_list_selectedMetrics": []string{"cpu_load", "mem_used", "flops_any", "mem_bw", "clock"}, + "plot_view_plotsPerRow": 4, + "plot_view_showPolarplot": true, + "plot_view_showRoofline": true, + "plot_view_showStatTable": true, + }, +} + +func main() { + var flagReinitDB, flagStopImmediately, flagSyncLDAP bool + var flagConfigFile string + var flagNewUser, flagDelUser string + flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize `job`, `tag`, and `jobtag` tables") + flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the `user` table with ldap") + flag.BoolVar(&flagStopImmediately, "no-server", false, "Do not start a server, stop right after initialization and argument handling") + flag.StringVar(&flagConfigFile, "config", "", "Location of the config file for this server (overwrites the defaults)") + flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin]:`") + flag.StringVar(&flagDelUser, "del-user", "", "Remove user by username") flag.Parse() - var err error - db, err = sqlx.Open("sqlite3", jobDBFile) - if err != nil { - log.Fatal(err) - } - - // See https://github.com/mattn/go-sqlite3/issues/274 - db.SetMaxOpenConns(1) - defer db.Close() - - if reinitDB { - if err = initDB(db, metricdata.JobArchivePath); err != nil { + if flagConfigFile != "" { + data, err := os.ReadFile(flagConfigFile) + if err != nil { + log.Fatal(err) + } + if err := json.Unmarshal(data, &programConfig); err != nil { log.Fatal(err) } } - config.Clusters, err = loadClusters() + var err error + db, err = sqlx.Open("sqlite3", programConfig.DB) if err != nil { log.Fatal(err) } + // Initialize sub-modules... + + if !programConfig.DisableAuthentication { + if err := auth.Init(db, programConfig.LdapConfig); err != nil { + log.Fatal(err) + } + + if flagNewUser != "" { + if err := auth.AddUserToDB(db, flagNewUser); err != nil { + log.Fatal(err) + } + } + if flagDelUser != "" { + if err := auth.DelUserFromDB(db, flagDelUser); err != nil { + log.Fatal(err) + } + } + + if flagSyncLDAP { + auth.SyncWithLDAP(db) + } + } else if flagNewUser != "" || flagDelUser != "" { + log.Fatalln("arguments --add-user and --del-user can only be used if authentication is enabled") + } + + if err := config.Init(db, !programConfig.DisableAuthentication, programConfig.UiDefaults, programConfig.JobArchive); err != nil { + log.Fatal(err) + } + + if err := metricdata.Init(programConfig.JobArchive); err != nil { + log.Fatal(err) + } + + if flagReinitDB { + if err := initDB(db, programConfig.JobArchive); err != nil { + log.Fatal(err) + } + } + + if flagStopImmediately { + return + } + + // Build routes... + + graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{DB: db}})) + graphQLPlayground := playground.Handler("GraphQL playground", "/query") + + handleGetLogin := func(rw http.ResponseWriter, r *http.Request) { + templates.Render(rw, r, "login.html", &templates.Page{ + Title: "Login", + Login: &templates.LoginPage{}, + }) + } + r := mux.NewRouter() - loggedRouter := handlers.LoggingHandler(os.Stdout, r) + r.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + templates.Render(rw, r, "404.html", &templates.Page{ + Title: "Not found", + }) + }) - srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{ - Resolvers: &graph.Resolver{DB: db}})) - r.HandleFunc("/graphql-playground", playground.Handler("GraphQL playground", "/query")) - r.Handle("/query", srv) + r.Handle("/playground", graphQLPlayground) + r.Handle("/login", auth.Login(db)).Methods(http.MethodPost) + r.HandleFunc("/login", handleGetLogin).Methods(http.MethodGet) + r.HandleFunc("/logout", auth.Logout).Methods(http.MethodPost) - r.HandleFunc("/config.json", config.ServeConfig).Methods("GET") - - r.HandleFunc("/api/start-job", startJob).Methods("POST") - r.HandleFunc("/api/stop-job", stopJob).Methods("POST") - - if len(staticFiles) != 0 { - r.PathPrefix("/").Handler(http.FileServer(http.Dir(staticFiles))) + secured := r.PathPrefix("/").Subrouter() + if !programConfig.DisableAuthentication { + secured.Use(auth.Auth) } + secured.Handle("/query", graphQLEndpoint) + secured.HandleFunc("/api/jobs/start_job/", startJob).Methods(http.MethodPost) + secured.HandleFunc("/api/jobs/stop_job/", stopJob).Methods(http.MethodPost, http.MethodPut) + secured.HandleFunc("/api/jobs/stop_job/{id}", stopJob).Methods(http.MethodPost, http.MethodPut) + secured.HandleFunc("/config.json", config.ServeConfig).Methods(http.MethodGet) - log.Printf("GraphQL playground: http://localhost:%s/graphql-playground", port) - log.Printf("Home: http://localhost:%s/index.html", port) - log.Fatal(http.ListenAndServe("127.0.0.1:"+port, - handlers.CORS(handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}), - handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}), - handlers.AllowedOrigins([]string{"*"}))(loggedRouter))) -} - -func loadClusters() ([]*model.Cluster, error) { - entries, err := os.ReadDir(metricdata.JobArchivePath) - if err != nil { - return nil, err - } - - clusters := []*model.Cluster{} - for _, de := range entries { - bytes, err := os.ReadFile(filepath.Join(metricdata.JobArchivePath, de.Name(), "cluster.json")) - if err != nil { - return nil, err - } - - var cluster model.Cluster - if err := json.Unmarshal(bytes, &cluster); err != nil { - return nil, err - } - - if cluster.FilterRanges.StartTime.To.IsZero() { - cluster.FilterRanges.StartTime.To = time.Unix(0, 0) - } - - clusters = append(clusters, &cluster) - } - - return clusters, nil + r.PathPrefix("/").Handler(http.FileServer(http.Dir(programConfig.StaticFiles))) + handler := handlers.CORS( + handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}), + handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}), + handlers.AllowedOrigins([]string{"*"}))(handlers.LoggingHandler(os.Stdout, r)) + + // Start http or https server + if programConfig.HttpsCertFile != "" && programConfig.HttpsKeyFile != "" { + log.Printf("HTTPS server running at %s...", programConfig.Addr) + err = http.ListenAndServeTLS(programConfig.Addr, programConfig.HttpsCertFile, programConfig.HttpsKeyFile, handler) + } else { + log.Printf("HTTP server running at %s...", programConfig.Addr) + err = http.ListenAndServe(programConfig.Addr, handler) + } + log.Fatal(err) } From eb2df5aa1ceaae90b3ec6bb06f880cc070babfea Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 11:50:16 +0100 Subject: [PATCH 07/25] Add queries to metric data repositories --- metricdata/cc-metric-store.go | 122 ++++++++++++++++++++++++++++++++-- metricdata/influxdb-v2.go | 51 ++++++++++---- metricdata/metricdata.go | 55 ++++++++++++--- 3 files changed, 199 insertions(+), 29 deletions(-) diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index b65d284..2d18a79 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -56,7 +56,7 @@ func (ccms *CCMetricStore) Init(url string) error { return nil } -func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { +func (ccms *CCMetricStore) doRequest(job *model.Job, suffix string, metrics []string, ctx context.Context) (*http.Response, error) { from, to := job.StartTime.Unix(), job.StartTime.Add(time.Duration(job.Duration)*time.Second).Unix() reqBody := ApiRequestBody{} reqBody.Metrics = metrics @@ -69,18 +69,21 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex return nil, err } - authHeader := fmt.Sprintf("Bearer %s", ccms.jwt) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/timeseries?with-stats=true", ccms.url, from, to), bytes.NewReader(reqBodyBytes)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/%s", ccms.url, from, to, suffix), bytes.NewReader(reqBodyBytes)) if err != nil { return nil, err } - req.Header.Add("Authorization", authHeader) - res, err := ccms.client.Do(req) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + return ccms.client.Do(req) +} + +func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { + res, err := ccms.doRequest(job, "timeseries?with-stats=true", metrics, ctx) if err != nil { return nil, err } - resdata := make([]map[string]ApiMetricData, 0, len(reqBody.Selectors)) + resdata := make([]map[string]ApiMetricData, 0, len(job.Nodes)) if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { return nil, err } @@ -101,7 +104,7 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex } if data.Avg == nil || data.Min == nil || data.Max == nil { - return nil, errors.New("no data") + return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node, metric) } metricData.Series = append(metricData.Series, &schema.MetricSeries{ @@ -119,3 +122,108 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex return jobData, nil } + +func (ccms *CCMetricStore) LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + res, err := ccms.doRequest(job, "stats", metrics, ctx) + if err != nil { + return nil, err + } + + resdata := make([]map[string]ApiStatsData, 0, len(job.Nodes)) + if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { + return nil, err + } + + stats := map[string]map[string]schema.MetricStatistics{} + for _, metric := range metrics { + nodestats := map[string]schema.MetricStatistics{} + for i, node := range job.Nodes { + data := resdata[i][metric] + if data.Error != nil { + return nil, errors.New(*data.Error) + } + + if data.Samples == 0 { + return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node, metric) + } + + nodestats[node] = schema.MetricStatistics{ + Avg: float64(data.Avg), + Min: float64(data.Min), + Max: float64(data.Max), + } + } + + stats[metric] = nodestats + } + + return stats, nil +} + +func (ccms *CCMetricStore) LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) { + reqBody := ApiRequestBody{} + reqBody.Metrics = metrics + for _, node := range nodes { + reqBody.Selectors = append(reqBody.Selectors, []string{clusterId, node}) + } + + reqBodyBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, err + } + + var req *http.Request + if nodes == nil { + req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/all-nodes", ccms.url, from, to), bytes.NewReader(reqBodyBytes)) + } else { + req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/timeseries", ccms.url, from, to), bytes.NewReader(reqBodyBytes)) + } + if err != nil { + return nil, err + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + res, err := ccms.client.Do(req) + if err != nil { + return nil, err + } + + data := map[string]map[string][]schema.Float{} + if nodes == nil { + resdata := map[string]map[string]ApiMetricData{} + if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { + return nil, err + } + + for node, metrics := range resdata { + nodedata := map[string][]schema.Float{} + for metric, data := range metrics { + if data.Error != nil { + return nil, errors.New(*data.Error) + } + + nodedata[metric] = data.Data + } + data[node] = nodedata + } + } else { + resdata := make([]map[string]ApiMetricData, 0, len(nodes)) + if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { + return nil, err + } + + for i, node := range nodes { + metricsData := map[string][]schema.Float{} + for metric, data := range resdata[i] { + if data.Error != nil { + return nil, errors.New(*data.Error) + } + + metricsData[metric] = data.Data + } + + data[node] = metricsData + } + } + + return data, nil +} diff --git a/metricdata/influxdb-v2.go b/metricdata/influxdb-v2.go index 0089d9b..1719ab3 100644 --- a/metricdata/influxdb-v2.go +++ b/metricdata/influxdb-v2.go @@ -94,10 +94,33 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, currentSeries.Data = append(currentSeries.Data, schema.Float(val)) } - return jobData, idb.addStats(job, jobData, metrics, hostsCond, ctx) + stats, err := idb.LoadStats(job, metrics, ctx) + if err != nil { + return nil, err + } + for metric, nodes := range stats { + jobMetric := jobData[metric] + for node, stats := range nodes { + for _, series := range jobMetric.Series { + if series.NodeID == node { + series.Statistics = &stats + } + } + } + } + + return jobData, nil } -func (idb *InfluxDBv2DataRepository) addStats(job *model.Job, jobData schema.JobData, metrics []string, hostsCond string, ctx context.Context) error { +func (idb *InfluxDBv2DataRepository) LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { + stats := map[string]map[string]schema.MetricStatistics{} + + hostsConds := make([]string, 0, len(job.Nodes)) + for _, h := range job.Nodes { + hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h)) + } + hostsCond := strings.Join(hostsConds, " or ") + for _, metric := range metrics { query := fmt.Sprintf(` data = from(bucket: "%s") @@ -115,10 +138,10 @@ func (idb *InfluxDBv2DataRepository) addStats(job *model.Job, jobData schema.Job idb.measurement, metric, hostsCond) rows, err := idb.queryClient.Query(ctx, query) if err != nil { - return err + return nil, err } - jobMetric := jobData[metric] + nodes := map[string]schema.MetricStatistics{} for rows.Next() { row := rows.Record() host := row.ValueByKey("host").(string) @@ -126,18 +149,18 @@ func (idb *InfluxDBv2DataRepository) addStats(job *model.Job, jobData schema.Job row.ValueByKey("min").(float64), row.ValueByKey("max").(float64) - for _, s := range jobMetric.Series { - if s.NodeID == host { - s.Statistics = &schema.MetricStatistics{ - Avg: avg, - Min: min, - Max: max, - } - break - } + nodes[host] = schema.MetricStatistics{ + Avg: avg, + Min: min, + Max: max, } } + stats[metric] = nodes } - return nil + return stats, nil +} + +func (idb *InfluxDBv2DataRepository) LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) { + return nil, nil } diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index 9118f7e..cdb464b 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -2,7 +2,6 @@ package metricdata import ( "context" - "errors" "fmt" "github.com/ClusterCockpit/cc-jobarchive/config" @@ -13,6 +12,8 @@ import ( type MetricDataRepository interface { Init(url string) error LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) + LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) } var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepository{} @@ -55,10 +56,6 @@ func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.Job return repo.LoadData(job, metrics, ctx) } - if job.State != model.JobStateCompleted { - return nil, fmt.Errorf("job of state '%s' is not supported", job.State) - } - data, err := loadFromArchive(job) if err != nil { return nil, err @@ -78,9 +75,51 @@ func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.Job // Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize. func LoadAverages(job *model.Job, metrics []string, data [][]schema.Float, ctx context.Context) error { - if job.State != model.JobStateCompleted { - return errors.New("only completed jobs are supported") + if job.State != model.JobStateRunning { + return loadAveragesFromArchive(job, metrics, data) } - return loadAveragesFromArchive(job, metrics, data) + repo, ok := metricDataRepos[job.ClusterID] + if !ok { + return fmt.Errorf("no metric data repository configured for '%s'", job.ClusterID) + } + + stats, err := repo.LoadStats(job, metrics, ctx) + if err != nil { + return err + } + + for i, m := range metrics { + nodes, ok := stats[m] + if !ok { + data[i] = append(data[i], schema.NaN) + continue + } + + sum := 0.0 + for _, node := range nodes { + sum += node.Avg + } + data[i] = append(data[i], schema.Float(sum)) + } + + return nil +} + +func LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) { + repo, ok := metricDataRepos[clusterId] + if !ok { + return nil, fmt.Errorf("no metric data repository configured for '%s'", clusterId) + } + + data, err := repo.LoadNodeData(clusterId, metrics, nodes, from, to, ctx) + if err != nil { + return nil, err + } + + if data == nil { + return nil, fmt.Errorf("the metric data repository for '%s' does not support this query", clusterId) + } + + return data, nil } From 45dc12cc0c5c408c2b1d195b9029c5608608f1a7 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 11:50:47 +0100 Subject: [PATCH 08/25] Fix bug in archiving --- metricdata/archive.go | 16 ++++++++++------ rest-api.go | 4 +++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/metricdata/archive.go b/metricdata/archive.go index 1a46290..a4d92fe 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -20,13 +20,17 @@ import ( // For a given job, return the path of the `data.json`/`meta.json` file. // TODO: Implement Issue ClusterCockpit/ClusterCockpit#97 -func getPath(job *model.Job, file string) (string, error) { +func getPath(job *model.Job, file string, checkLegacy bool) (string, error) { id, err := strconv.Atoi(strings.Split(job.JobID, ".")[0]) if err != nil { return "", err } lvl1, lvl2 := fmt.Sprintf("%d", id/1000), fmt.Sprintf("%03d", id%1000) + if !checkLegacy { + return filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil + } + legacyPath := filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, file) if _, err := os.Stat(legacyPath); errors.Is(err, os.ErrNotExist) { return filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil @@ -37,7 +41,7 @@ func getPath(job *model.Job, file string) (string, error) { // Assuming job is completed/archived, return the jobs metric data. func loadFromArchive(job *model.Job) (schema.JobData, error) { - filename, err := getPath(job, "data.json") + filename, err := getPath(job, "data.json", true) if err != nil { return nil, err } @@ -63,7 +67,7 @@ func UpdateTags(job *model.Job, tags []*model.JobTag) error { return nil } - filename, err := getPath(job, "meta.json") + filename, err := getPath(job, "meta.json", true) if err != nil { return err } @@ -106,7 +110,7 @@ func UpdateTags(job *model.Job, tags []*model.JobTag) error { // Helper to metricdata.LoadAverages(). func loadAveragesFromArchive(job *model.Job, metrics []string, data [][]schema.Float) error { - filename, err := getPath(job, "meta.json") + filename, err := getPath(job, "meta.json", true) if err != nil { return err } @@ -191,7 +195,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) error { } } - dirPath, err := getPath(job, "") + dirPath, err := getPath(job, "", false) if err != nil { return err } @@ -218,7 +222,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) error { return err } writer = bufio.NewWriter(f) - if err := json.NewEncoder(writer).Encode(metaData); err != nil { + if err := json.NewEncoder(writer).Encode(jobData); err != nil { return err } if err := writer.Flush(); err != nil { diff --git a/rest-api.go b/rest-api.go index 1d02aad..4cb38d9 100644 --- a/rest-api.go +++ b/rest-api.go @@ -126,7 +126,9 @@ func stopJob(rw http.ResponseWriter, r *http.Request) { return } - if _, err := db.Exec(`UPDATE job SET job_state = ? WHERE job.id = ?`, model.JobStateCompleted, job.ID); err != nil { + if _, err := db.Exec( + `UPDATE job SET job_state = ?, duration = ? WHERE job.id = ?`, + model.JobStateCompleted, job.Duration, job.ID); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return } From c79fcec3ba4a747b7bf286854240df6d937ac3c0 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 12:09:45 +0100 Subject: [PATCH 09/25] Update submodule --- frontend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend b/frontend index b487af3..41b8953 160000 --- a/frontend +++ b/frontend @@ -1 +1 @@ -Subproject commit b487af3496b46942d9848337bc2821575a1390b2 +Subproject commit 41b8953eb14e52fe9f3c4fe69167202f8002de45 From a26d652332579a259b3201f353cda2a9cd3c4d8d Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Wed, 8 Dec 2021 15:50:03 +0100 Subject: [PATCH 10/25] Add templates and custom urls for monitoring views --- README.md | 11 +++- auth/auth.go | 6 +-- server.go | 91 +++++++++++++++++++++++++++++++-- templates/base.html | 6 ++- templates/home.html | 10 ++++ templates/login.html | 1 - templates/monitoring/job.html | 29 +++++++++++ templates/monitoring/jobs.html | 20 ++++++++ templates/monitoring/user.html | 22 ++++++++ templates/monitoring/users.html | 14 +++++ templates/templates.go | 21 ++++++-- 11 files changed, 216 insertions(+), 15 deletions(-) create mode 100644 templates/home.html create mode 100644 templates/monitoring/job.html create mode 100644 templates/monitoring/jobs.html create mode 100644 templates/monitoring/user.html create mode 100644 templates/monitoring/users.html diff --git a/README.md b/README.md index f57a0f2..2c941af 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ git clone --recursive git@github.com:ClusterCockpit/cc-jobarchive.git # Prepare frontend cd ./cc-jobarchive/frontend yarn install -yarn build +CCFRONTEND_ROLLUP_INTRO="" yarn build cd .. go get @@ -33,7 +33,16 @@ touch ./var/job.db ./cc-jobarchive --help ``` +### Configuration + +A config file in the JSON format can be provided using `--config` to override the defaults. Loop at the beginning of `server.go` for the defaults and consequently the format of the configuration file. + ### Update GraphQL schema This project uses [gqlgen](https://github.com/99designs/gqlgen) for the GraphQL API. The schema can be found in `./graph/schema.graphqls`. After changing it, you need to run `go run github.com/99designs/gqlgen` which will update `graph/model`. In case new resolvers are needed, they will be inserted into `graph/schema.resolvers.go`, where you will need to implement them. +### TODO + +- [ ] Documentation +- [ ] Write more TODOs + diff --git a/auth/auth.go b/auth/auth.go index 66fd1e9..d10ac3b 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -168,7 +168,7 @@ func Login(db *sqlx.DB) http.Handler { if err != nil { log.Printf("login failed: %s\n", err.Error()) rw.WriteHeader(http.StatusUnauthorized) - templates.Render(rw, r, "login.html", &templates.Page{ + templates.Render(rw, r, "login", &templates.Page{ Title: "Login failed", Login: &templates.LoginPage{ Error: "Username or password incorrect", @@ -264,7 +264,7 @@ func Auth(next http.Handler) http.Handler { log.Printf("authentication failed: no session or jwt found\n") rw.WriteHeader(http.StatusUnauthorized) - templates.Render(rw, r, "login.html", &templates.Page{ + templates.Render(rw, r, "login", &templates.Page{ Title: "Authentication failed", Login: &templates.LoginPage{ Error: "No valid session or JWT provided", @@ -320,7 +320,7 @@ func Logout(rw http.ResponseWriter, r *http.Request) { } } - templates.Render(rw, r, "login.html", &templates.Page{ + templates.Render(rw, r, "login", &templates.Page{ Title: "Logout successful", Login: &templates.LoginPage{ Info: "Logout successful", diff --git a/server.go b/server.go index 58e3d02..5cd4790 100644 --- a/server.go +++ b/server.go @@ -3,6 +3,7 @@ package main import ( "encoding/json" "flag" + "fmt" "log" "net/http" "os" @@ -141,11 +142,12 @@ func main() { // Build routes... - graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{DB: db}})) + resolver := &graph.Resolver{DB: db} + graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) graphQLPlayground := playground.Handler("GraphQL playground", "/query") handleGetLogin := func(rw http.ResponseWriter, r *http.Request) { - templates.Render(rw, r, "login.html", &templates.Page{ + templates.Render(rw, r, "login", &templates.Page{ Title: "Login", Login: &templates.LoginPage{}, }) @@ -153,7 +155,7 @@ func main() { r := mux.NewRouter() r.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - templates.Render(rw, r, "404.html", &templates.Page{ + templates.Render(rw, r, "404", &templates.Page{ Title: "Not found", }) }) @@ -170,9 +172,17 @@ func main() { secured.Handle("/query", graphQLEndpoint) secured.HandleFunc("/api/jobs/start_job/", startJob).Methods(http.MethodPost) secured.HandleFunc("/api/jobs/stop_job/", stopJob).Methods(http.MethodPost, http.MethodPut) - secured.HandleFunc("/api/jobs/stop_job/{id}", stopJob).Methods(http.MethodPost, http.MethodPut) + secured.HandleFunc("/api/jobs/stop_job/{id:[0-9]+}", stopJob).Methods(http.MethodPost, http.MethodPut) secured.HandleFunc("/config.json", config.ServeConfig).Methods(http.MethodGet) + secured.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + templates.Render(rw, r, "home", &templates.Page{ + Title: "ClusterCockpit", + }) + }) + + monitoringRoutes(secured, resolver) + r.PathPrefix("/").Handler(http.FileServer(http.Dir(programConfig.StaticFiles))) handler := handlers.CORS( handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}), @@ -189,3 +199,76 @@ func main() { } log.Fatal(err) } + +func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { + router.HandleFunc("/monitoring/jobs/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + templates.Render(rw, r, "monitoring/jobs/", &templates.Page{ + Title: "Jobs - ClusterCockpit", + Config: conf, + }) + }) + + router.HandleFunc("/monitoring/job/{id:[0-9]+}", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + id := mux.Vars(r)["id"] + job, err := resolver.Query().Job(r.Context(), id) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + templates.Render(rw, r, "monitoring/job/", &templates.Page{ + Title: fmt.Sprintf("Job %s - ClusterCockpit", job.JobID), + Config: conf, + Infos: map[string]interface{}{ + "id": id, + "jobId": job.JobID, + "clusterId": job.ClusterID, + }, + }) + }) + + router.HandleFunc("/monitoring/users/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + templates.Render(rw, r, "monitoring/users/", &templates.Page{ + Title: "Users - ClusterCockpit", + Config: conf, + }) + }) + + router.HandleFunc("/monitoring/user/{id}", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + id := mux.Vars(r)["id"] + // TODO: One could check if the user exists, but that would be unhelpfull if authentication + // is disabled or the user does not exist but has started jobs. + + templates.Render(rw, r, "monitoring/user/", &templates.Page{ + Title: fmt.Sprintf("User %s - ClusterCockpit", id), + Config: conf, + Infos: map[string]interface{}{ + "userId": id, + }, + }) + }) +} diff --git a/templates/base.html b/templates/base.html index 32d6f6b..4691f30 100644 --- a/templates/base.html +++ b/templates/base.html @@ -9,6 +9,9 @@ + + + {{block "stylesheets" .}}{{end}}
@@ -20,5 +23,6 @@
+ {{block "javascript" .}}{{end}} - \ No newline at end of file + diff --git a/templates/home.html b/templates/home.html new file mode 100644 index 0000000..7bbee5a --- /dev/null +++ b/templates/home.html @@ -0,0 +1,10 @@ +{{define "content"}} +
+
+ +
+
+{{end}} diff --git a/templates/login.html b/templates/login.html index 35776a4..7f6507b 100644 --- a/templates/login.html +++ b/templates/login.html @@ -1,4 +1,3 @@ -{{template "base.html" .}} {{define "content"}}
diff --git a/templates/monitoring/job.html b/templates/monitoring/job.html new file mode 100644 index 0000000..5f8b7f6 --- /dev/null +++ b/templates/monitoring/job.html @@ -0,0 +1,29 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/monitoring/jobs.html b/templates/monitoring/jobs.html new file mode 100644 index 0000000..e6382c2 --- /dev/null +++ b/templates/monitoring/jobs.html @@ -0,0 +1,20 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/monitoring/user.html b/templates/monitoring/user.html new file mode 100644 index 0000000..ee16cdc --- /dev/null +++ b/templates/monitoring/user.html @@ -0,0 +1,22 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/monitoring/users.html b/templates/monitoring/users.html new file mode 100644 index 0000000..ff7c9d6 --- /dev/null +++ b/templates/monitoring/users.html @@ -0,0 +1,14 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/templates.go b/templates/templates.go index c1c7c07..6b0a267 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -6,11 +6,13 @@ import ( "net/http" ) -var templates *template.Template +var templates map[string]*template.Template type Page struct { - Title string - Login *LoginPage + Title string + Login *LoginPage + Infos map[string]interface{} + Config map[string]interface{} } type LoginPage struct { @@ -19,11 +21,20 @@ type LoginPage struct { } func init() { - templates = template.Must(template.ParseGlob("./templates/*.html")) + base := template.Must(template.ParseFiles("./templates/base.html")) + templates = map[string]*template.Template{ + "home": template.Must(template.Must(base.Clone()).ParseFiles("./templates/home.html")), + "404": template.Must(template.Must(base.Clone()).ParseFiles("./templates/404.html")), + "login": template.Must(template.Must(base.Clone()).ParseFiles("./templates/login.html")), + "monitoring/jobs/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/jobs.html")), + "monitoring/job/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/job.html")), + "monitoring/users/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/users.html")), + "monitoring/user/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/user.html")), + } } func Render(rw http.ResponseWriter, r *http.Request, name string, page *Page) { - if err := templates.ExecuteTemplate(rw, name, page); err != nil { + if err := templates[name].Execute(rw, page); err != nil { log.Printf("template error: %s\n", err.Error()) } } From 8178b6e8547563f3ba7ed1b18148040cb9e5bfe8 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Thu, 9 Dec 2021 16:25:48 +0100 Subject: [PATCH 11/25] Implement nodeMetrics query --- graph/generated/generated.go | 469 +++++++++++++++++++++++++++++++++++ graph/model/models_gen.go | 10 + graph/schema.graphqls | 12 + graph/schema.resolvers.go | 33 +++ metricdata/metricdata.go | 8 + 5 files changed, 532 insertions(+) diff --git a/graph/generated/generated.go b/graph/generated/generated.go index 274483c..920feff 100644 --- a/graph/generated/generated.go +++ b/graph/generated/generated.go @@ -166,6 +166,16 @@ type ComplexityRoot struct { UpdateConfiguration func(childComplexity int, name string, value string) int } + NodeMetric struct { + Data func(childComplexity int) int + Name func(childComplexity int) int + } + + NodeMetrics struct { + ID func(childComplexity int) int + Metrics func(childComplexity int) int + } + Query struct { Clusters func(childComplexity int) int Job func(childComplexity int, id string) int @@ -173,6 +183,7 @@ type ComplexityRoot struct { Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int + NodeMetrics func(childComplexity int, cluster string, nodes []string, metrics []string, from time.Time, to time.Time) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int } @@ -202,6 +213,7 @@ type QueryResolver interface { Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) + NodeMetrics(ctx context.Context, cluster string, nodes []string, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) } type executableSchema struct { @@ -769,6 +781,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Mutation.UpdateConfiguration(childComplexity, args["name"].(string), args["value"].(string)), true + case "NodeMetric.data": + if e.complexity.NodeMetric.Data == nil { + break + } + + return e.complexity.NodeMetric.Data(childComplexity), true + + case "NodeMetric.name": + if e.complexity.NodeMetric.Name == nil { + break + } + + return e.complexity.NodeMetric.Name(childComplexity), true + + case "NodeMetrics.id": + if e.complexity.NodeMetrics.ID == nil { + break + } + + return e.complexity.NodeMetrics.ID(childComplexity), true + + case "NodeMetrics.metrics": + if e.complexity.NodeMetrics.Metrics == nil { + break + } + + return e.complexity.NodeMetrics.Metrics(childComplexity), true + case "Query.clusters": if e.complexity.Query.Clusters == nil { break @@ -836,6 +876,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["groupBy"].(*model.Aggregate)), true + case "Query.nodeMetrics": + if e.complexity.Query.NodeMetrics == nil { + break + } + + args, err := ec.field_Query_nodeMetrics_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.NodeMetrics(childComplexity, args["cluster"].(string), args["nodes"].([]string), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)), true + case "Query.rooflineHeatmap": if e.complexity.Query.RooflineHeatmap == nil { break @@ -1022,6 +1074,16 @@ type MetricFootprints { enum Aggregate { USER, PROJECT, CLUSTER } +type NodeMetric { + name: String! + data: [NullableFloat!]! +} + +type NodeMetrics { + id: String! + metrics: [NodeMetric!]! +} + type Query { clusters: [Cluster!]! # List of all clusters tags: [JobTag!]! # List of all tags @@ -1034,6 +1096,8 @@ type Query { jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! + + nodeMetrics(cluster: ID!, nodes: [String!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! } type Mutation { @@ -1393,6 +1457,57 @@ func (ec *executionContext) field_Query_jobs_args(ctx context.Context, rawArgs m return args, nil } +func (ec *executionContext) field_Query_nodeMetrics_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 string + if tmp, ok := rawArgs["cluster"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + arg0, err = ec.unmarshalNID2string(ctx, tmp) + if err != nil { + return nil, err + } + } + args["cluster"] = arg0 + var arg1 []string + if tmp, ok := rawArgs["nodes"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("nodes")) + arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["nodes"] = arg1 + var arg2 []string + if tmp, ok := rawArgs["metrics"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + arg2, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["metrics"] = arg2 + var arg3 time.Time + if tmp, ok := rawArgs["from"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) + arg3, err = ec.unmarshalNTime2timeᚐTime(ctx, tmp) + if err != nil { + return nil, err + } + } + args["from"] = arg3 + var arg4 time.Time + if tmp, ok := rawArgs["to"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) + arg4, err = ec.unmarshalNTime2timeᚐTime(ctx, tmp) + if err != nil { + return nil, err + } + } + args["to"] = arg4 + return args, nil +} + func (ec *executionContext) field_Query_rooflineHeatmap_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} @@ -4127,6 +4242,146 @@ func (ec *executionContext) _Mutation_updateConfiguration(ctx context.Context, f return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } +func (ec *executionContext) _NodeMetric_name(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetric) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "NodeMetric", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _NodeMetric_data(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetric) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "NodeMetric", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _NodeMetrics_id(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetrics) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "NodeMetrics", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _NodeMetrics_metrics(ctx context.Context, field graphql.CollectedField, obj *model.NodeMetrics) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "NodeMetrics", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Metrics, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.NodeMetric) + fc.Result = res + return ec.marshalNNodeMetric2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricᚄ(ctx, field.Selections, res) +} + func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -4446,6 +4701,48 @@ func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field gr return ec.marshalNFloat2ᚕᚕfloat64ᚄ(ctx, field.Selections, res) } +func (ec *executionContext) _Query_nodeMetrics(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Query", + Field: field, + Args: nil, + IsMethod: true, + IsResolver: true, + } + + ctx = graphql.WithFieldContext(ctx, fc) + rawArgs := field.ArgumentMap(ec.Variables) + args, err := ec.field_Query_nodeMetrics_args(ctx, rawArgs) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + fc.Args = args + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().NodeMetrics(rctx, args["cluster"].(string), args["nodes"].([]string), args["metrics"].([]string), args["from"].(time.Time), args["to"].(time.Time)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.NodeMetrics) + fc.Result = res + return ec.marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx, field.Selections, res) +} + func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -6671,6 +6968,70 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) return out } +var nodeMetricImplementors = []string{"NodeMetric"} + +func (ec *executionContext) _NodeMetric(ctx context.Context, sel ast.SelectionSet, obj *model.NodeMetric) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, nodeMetricImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NodeMetric") + case "name": + out.Values[i] = ec._NodeMetric_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "data": + out.Values[i] = ec._NodeMetric_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + +var nodeMetricsImplementors = []string{"NodeMetrics"} + +func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionSet, obj *model.NodeMetrics) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, nodeMetricsImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("NodeMetrics") + case "id": + out.Values[i] = ec._NodeMetrics_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "metrics": + out.Values[i] = ec._NodeMetrics_metrics(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var queryImplementors = []string{"Query"} func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -6795,6 +7156,20 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr } return res }) + case "nodeMetrics": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_nodeMetrics(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + return res + }) case "__type": out.Values[i] = ec._Query___type(ctx, field) case "__schema": @@ -7744,6 +8119,100 @@ func (ec *executionContext) marshalNMetricFootprints2ᚕᚖgithubᚗcomᚋCluste return ret } +func (ec *executionContext) marshalNNodeMetric2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetric) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNNodeMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetric(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNNodeMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetric(ctx context.Context, sel ast.SelectionSet, v *model.NodeMetric) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._NodeMetric(ctx, sel, v) +} + +func (ec *executionContext) marshalNNodeMetrics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetrics) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNNodeMetrics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetrics(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNNodeMetrics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetrics(ctx context.Context, sel ast.SelectionSet, v *model.NodeMetrics) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._NodeMetrics(ctx, sel, v) +} + func (ec *executionContext) unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloat(ctx context.Context, v interface{}) (schema.Float, error) { var res schema.Float err := res.UnmarshalGQL(v) diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index 71e9104..7a5042d 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -111,6 +111,16 @@ type MetricFootprints struct { Footprints []schema.Float `json:"footprints"` } +type NodeMetric struct { + Name string `json:"name"` + Data []schema.Float `json:"data"` +} + +type NodeMetrics struct { + ID string `json:"id"` + Metrics []*NodeMetric `json:"metrics"` +} + type OrderByInput struct { Field string `json:"field"` Order SortDirectionEnum `json:"order"` diff --git a/graph/schema.graphqls b/graph/schema.graphqls index 9a7460b..4c9b3b7 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -87,6 +87,16 @@ type MetricFootprints { enum Aggregate { USER, PROJECT, CLUSTER } +type NodeMetric { + name: String! + data: [NullableFloat!]! +} + +type NodeMetrics { + id: String! + metrics: [NodeMetric!]! +} + type Query { clusters: [Cluster!]! # List of all clusters tags: [JobTag!]! # List of all tags @@ -99,6 +109,8 @@ type Query { jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! + + nodeMetrics(cluster: ID!, nodes: [String!], metrics: [String!], from: Time!, to: Time!): [NodeMetrics!]! } type Mutation { diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index c7e4699..cb5c749 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -5,9 +5,12 @@ package graph import ( "context" + "errors" "fmt" "strconv" + "time" + "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph/generated" "github.com/ClusterCockpit/cc-jobarchive/graph/model" @@ -199,6 +202,36 @@ func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.Job return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY) } +func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) { + user := auth.GetUser(ctx) + if user != nil && !user.IsAdmin { + return nil, errors.New("you need to be an administrator for this query") + } + + data, err := metricdata.LoadNodeData(cluster, metrics, nodes, from.Unix(), to.Unix(), ctx) + if err != nil { + return nil, err + } + + res := make([]*model.NodeMetrics, 0, len(data)) + for node, metrics := range data { + nodeMetrics := make([]*model.NodeMetric, 0, len(metrics)) + for metric, data := range metrics { + nodeMetrics = append(nodeMetrics, &model.NodeMetric{ + Name: metric, + Data: data, + }) + } + + res = append(res, &model.NodeMetrics{ + ID: node, + Metrics: nodeMetrics, + }) + } + + return res, nil +} + // Job returns generated.JobResolver implementation. func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} } diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index cdb464b..dd2ba1b 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -10,9 +10,17 @@ import ( ) type MetricDataRepository interface { + // Initialize this MetricDataRepository. One instance of + // this interface will only ever be responsible for one cluster. Init(url string) error + + // Return the JobData for the given job, only with the requested metrics. LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) + + // Return a map of metrics to a map of nodes to the metric statistics of the job. LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + + // Return a map of nodes to a map of metrics to the data for the requested time. LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) } From b1d2403839d9c51db2307b534dd0da1ca74b6fc1 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Thu, 9 Dec 2021 16:26:59 +0100 Subject: [PATCH 12/25] Bugfixes --- auth/ldap.go | 2 +- metricdata/cc-metric-store.go | 13 +++++++++---- metricdata/influxdb-v2.go | 4 ++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/auth/ldap.go b/auth/ldap.go index bb401bc..1410050 100644 --- a/auth/ldap.go +++ b/auth/ldap.go @@ -29,7 +29,7 @@ var ldapAdminPassword string func initLdap(config *LdapConfig) error { ldapAdminPassword = os.Getenv("LDAP_ADMIN_PASSWORD") if ldapAdminPassword == "" { - log.Println("warning: environment variable 'LDAP_ADMIN_PASSWORD' not set") + log.Println("warning: environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync or authentication will not work)") } ldapConfig = config diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 2d18a79..1d3c193 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "net/http" "os" "time" @@ -50,7 +51,7 @@ func (ccms *CCMetricStore) Init(url string) error { ccms.url = url // os.Getenv("CCMETRICSTORE_URL") ccms.jwt = os.Getenv("CCMETRICSTORE_JWT") if ccms.jwt == "" { - return errors.New("environment variable 'CCMETRICSTORE_JWT' not set") + log.Println("warning: environment variable 'CCMETRICSTORE_JWT' not set") } return nil @@ -73,7 +74,9 @@ func (ccms *CCMetricStore) doRequest(job *model.Job, suffix string, metrics []st if err != nil { return nil, err } - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + if ccms.jwt != "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + } return ccms.client.Do(req) } @@ -174,14 +177,16 @@ func (ccms *CCMetricStore) LoadNodeData(clusterId string, metrics, nodes []strin var req *http.Request if nodes == nil { - req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/all-nodes", ccms.url, from, to), bytes.NewReader(reqBodyBytes)) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%s/%d/%d/all-nodes", ccms.url, clusterId, from, to), bytes.NewReader(reqBodyBytes)) } else { req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/api/%d/%d/timeseries", ccms.url, from, to), bytes.NewReader(reqBodyBytes)) } if err != nil { return nil, err } - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + if ccms.jwt != "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + } res, err := ccms.client.Do(req) if err != nil { return nil, err diff --git a/metricdata/influxdb-v2.go b/metricdata/influxdb-v2.go index 1719ab3..5c1ade0 100644 --- a/metricdata/influxdb-v2.go +++ b/metricdata/influxdb-v2.go @@ -2,8 +2,8 @@ package metricdata import ( "context" - "errors" "fmt" + "log" "os" "strings" "time" @@ -24,7 +24,7 @@ type InfluxDBv2DataRepository struct { func (idb *InfluxDBv2DataRepository) Init(url string) error { token := os.Getenv("INFLUXDB_V2_TOKEN") if token == "" { - return errors.New("warning: environment variable 'INFLUXDB_V2_TOKEN' not set") + log.Println("warning: environment variable 'INFLUXDB_V2_TOKEN' not set") } idb.client = influxdb2.NewClient(url, token) From 9c5c8a05e258bafc927af5b08a54cfc55d48aaba Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Thu, 9 Dec 2021 16:27:48 +0100 Subject: [PATCH 13/25] Add more views --- README.md | 11 ++- frontend | 2 +- server.go | 108 +++++++++++++++++++++++++++-- templates/home.html | 55 +++++++++++++-- templates/monitoring/analysis.html | 18 +++++ templates/monitoring/jobs.html | 2 +- templates/monitoring/node.html | 21 ++++++ templates/monitoring/systems.html | 19 +++++ templates/templates.go | 33 +++++---- 9 files changed, 245 insertions(+), 24 deletions(-) create mode 100644 templates/monitoring/analysis.html create mode 100644 templates/monitoring/node.html create mode 100644 templates/monitoring/systems.html diff --git a/README.md b/README.md index 2c941af..a3e25aa 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ClusterCockpit with a Golang backend (Only supports archived jobs) +# ClusterCockpit with a Golang backend [![Build](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml) @@ -11,7 +11,12 @@ git clone --recursive git@github.com:ClusterCockpit/cc-jobarchive.git # Prepare frontend cd ./cc-jobarchive/frontend yarn install -CCFRONTEND_ROLLUP_INTRO="" yarn build +export CCFRONTEND_ROLLUP_INTRO=' +const JOBVIEW_URL = job => `/monitoring/job/${job.jobId}`; +const USERVIEW_URL = userId => `/monitoring/user/${userId}`; +const TAG_URL = tag => `/monitoring/jobs/?tag=${tag.id}`; +' +yarn build cd .. go get @@ -45,4 +50,6 @@ This project uses [gqlgen](https://github.com/99designs/gqlgen) for the GraphQL - [ ] Documentation - [ ] Write more TODOs +- [ ] Caching +- [ ] Generate JWTs based on the provided keys diff --git a/frontend b/frontend index 41b8953..cc48461 160000 --- a/frontend +++ b/frontend @@ -1 +1 @@ -Subproject commit 41b8953eb14e52fe9f3c4fe69167202f8002de45 +Subproject commit cc48461a810dbd3565000150fc99332743de92ba diff --git a/server.go b/server.go index 5cd4790..b87c881 100644 --- a/server.go +++ b/server.go @@ -176,8 +176,27 @@ func main() { secured.HandleFunc("/config.json", config.ServeConfig).Methods(http.MethodGet) secured.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + infos := map[string]interface{}{ + "clusters": config.Clusters, + "username": "", + "admin": true, + } + + if user := auth.GetUser(r.Context()); user != nil { + infos["username"] = user.Username + infos["admin"] = user.IsAdmin + } + templates.Render(rw, r, "home", &templates.Page{ - Title: "ClusterCockpit", + Title: "ClusterCockpit", + Config: conf, + Infos: infos, }) }) @@ -208,9 +227,34 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { return } + filterPresets := map[string]interface{}{} + query := r.URL.Query() + if query.Get("tag") != "" { + filterPresets["tagId"] = query.Get("tag") + } + if query.Get("cluster") != "" { + filterPresets["clusterId"] = query.Get("cluster") + } + if query.Get("project") != "" { + filterPresets["projectId"] = query.Get("project") + } + if query.Get("running") == "true" { + filterPresets["isRunning"] = true + } + if query.Get("running") == "false" { + filterPresets["isRunning"] = false + } + if query.Get("from") != "" && query.Get("to") != "" { + filterPresets["startTime"] = map[string]string{ + "from": query.Get("from"), + "to": query.Get("to"), + } + } + templates.Render(rw, r, "monitoring/jobs/", &templates.Page{ - Title: "Jobs - ClusterCockpit", - Config: conf, + Title: "Jobs - ClusterCockpit", + Config: conf, + FilterPresets: filterPresets, }) }) @@ -266,8 +310,64 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { templates.Render(rw, r, "monitoring/user/", &templates.Page{ Title: fmt.Sprintf("User %s - ClusterCockpit", id), Config: conf, + Infos: map[string]interface{}{"userId": id}, + }) + }) + + router.HandleFunc("/monitoring/analysis/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + filterPresets := map[string]interface{}{} + query := r.URL.Query() + if query.Get("cluster") != "" { + filterPresets["clusterId"] = query.Get("cluster") + } + + templates.Render(rw, r, "monitoring/analysis/", &templates.Page{ + Title: "Analysis View - ClusterCockpit", + Config: conf, + FilterPresets: filterPresets, + }) + }) + + router.HandleFunc("/monitoring/systems/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + filterPresets := map[string]interface{}{} + query := r.URL.Query() + if query.Get("cluster") != "" { + filterPresets["clusterId"] = query.Get("cluster") + } + + templates.Render(rw, r, "monitoring/systems/", &templates.Page{ + Title: "System View - ClusterCockpit", + Config: conf, + FilterPresets: filterPresets, + }) + }) + + router.HandleFunc("/monitoring/node/{clusterId}/{nodeId}", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + vars := mux.Vars(r) + templates.Render(rw, r, "monitoring/node/", &templates.Page{ + Title: fmt.Sprintf("Node %s - ClusterCockpit", vars["nodeId"]), + Config: conf, Infos: map[string]interface{}{ - "userId": id, + "nodeId": vars["nodeId"], + "clusterId": vars["clusterId"], }, }) }) diff --git a/templates/home.html b/templates/home.html index 7bbee5a..6b23b2f 100644 --- a/templates/home.html +++ b/templates/home.html @@ -1,10 +1,57 @@ {{define "content"}}
- + {{if .Infos.username}} + {{ .Infos.username }} + {{if .Infos.admin}} + Admin + {{end}} + {{end}} +
+
+
+ +
+
+
+
+ {{if .Infos.admin}} +
+ +
+ {{else}} +
+ +
+ {{end}} +
+

Clusters

+ + + + + + + + + + + {{range .Infos.clusters}} + + + + + + + {{end}} + +
Name/IDJobsSystem ViewAnalysis View
{{.ClusterID}}JobsSystem ViewAnalysis View
{{end}} diff --git a/templates/monitoring/analysis.html b/templates/monitoring/analysis.html new file mode 100644 index 0000000..d6c0b80 --- /dev/null +++ b/templates/monitoring/analysis.html @@ -0,0 +1,18 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/monitoring/jobs.html b/templates/monitoring/jobs.html index e6382c2..1d70968 100644 --- a/templates/monitoring/jobs.html +++ b/templates/monitoring/jobs.html @@ -7,7 +7,7 @@ {{end}} {{define "javascript"}} + +{{end}} diff --git a/templates/monitoring/systems.html b/templates/monitoring/systems.html new file mode 100644 index 0000000..b8e6ccf --- /dev/null +++ b/templates/monitoring/systems.html @@ -0,0 +1,19 @@ +{{define "content"}} +
+{{end}} + +{{define "stylesheets"}} + +{{end}} +{{define "javascript"}} + + +{{end}} diff --git a/templates/templates.go b/templates/templates.go index 6b0a267..327ef19 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -9,10 +9,11 @@ import ( var templates map[string]*template.Template type Page struct { - Title string - Login *LoginPage - Infos map[string]interface{} - Config map[string]interface{} + Title string + Login *LoginPage + FilterPresets map[string]interface{} + Infos map[string]interface{} + Config map[string]interface{} } type LoginPage struct { @@ -23,18 +24,26 @@ type LoginPage struct { func init() { base := template.Must(template.ParseFiles("./templates/base.html")) templates = map[string]*template.Template{ - "home": template.Must(template.Must(base.Clone()).ParseFiles("./templates/home.html")), - "404": template.Must(template.Must(base.Clone()).ParseFiles("./templates/404.html")), - "login": template.Must(template.Must(base.Clone()).ParseFiles("./templates/login.html")), - "monitoring/jobs/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/jobs.html")), - "monitoring/job/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/job.html")), - "monitoring/users/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/users.html")), - "monitoring/user/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/user.html")), + "home": template.Must(template.Must(base.Clone()).ParseFiles("./templates/home.html")), + "404": template.Must(template.Must(base.Clone()).ParseFiles("./templates/404.html")), + "login": template.Must(template.Must(base.Clone()).ParseFiles("./templates/login.html")), + "monitoring/jobs/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/jobs.html")), + "monitoring/job/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/job.html")), + "monitoring/users/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/users.html")), + "monitoring/user/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/user.html")), + "monitoring/analysis/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/analysis.html")), + "monitoring/systems/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/systems.html")), + "monitoring/node/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/node.html")), } } func Render(rw http.ResponseWriter, r *http.Request, name string, page *Page) { - if err := templates[name].Execute(rw, page); err != nil { + t, ok := templates[name] + if !ok { + panic("templates must be predefinied!") + } + + if err := t.Execute(rw, page); err != nil { log.Printf("template error: %s\n", err.Error()) } } From 7fcc39a144578f1b0ce785d37572b575992311ed Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Thu, 16 Dec 2021 09:35:03 +0100 Subject: [PATCH 14/25] Add async archiving option; Move REST-API to new package --- README.md | 3 +- api/rest.go | 276 +++++++++++++++++++++++++++++++++++++++ metricdata/archive.go | 31 +++-- metricdata/metricdata.go | 15 ++- rest-api.go | 142 -------------------- server.go | 65 ++++++--- 6 files changed, 358 insertions(+), 174 deletions(-) create mode 100644 api/rest.go delete mode 100644 rest-api.go diff --git a/README.md b/README.md index a3e25aa..189031a 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ git clone --recursive git@github.com:ClusterCockpit/cc-jobarchive.git cd ./cc-jobarchive/frontend yarn install export CCFRONTEND_ROLLUP_INTRO=' -const JOBVIEW_URL = job => `/monitoring/job/${job.jobId}`; +const JOBVIEW_URL = job => `/monitoring/job/${job.id}`; const USERVIEW_URL = userId => `/monitoring/user/${userId}`; const TAG_URL = tag => `/monitoring/jobs/?tag=${tag.id}`; ' @@ -52,4 +52,3 @@ This project uses [gqlgen](https://github.com/99designs/gqlgen) for the GraphQL - [ ] Write more TODOs - [ ] Caching - [ ] Generate JWTs based on the provided keys - diff --git a/api/rest.go b/api/rest.go new file mode 100644 index 0000000..7092e16 --- /dev/null +++ b/api/rest.go @@ -0,0 +1,276 @@ +package api + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "net/http" + "strings" + + "github.com/ClusterCockpit/cc-jobarchive/config" + "github.com/ClusterCockpit/cc-jobarchive/graph" + "github.com/ClusterCockpit/cc-jobarchive/graph/model" + "github.com/ClusterCockpit/cc-jobarchive/metricdata" + sq "github.com/Masterminds/squirrel" + "github.com/gorilla/mux" + "github.com/jmoiron/sqlx" +) + +type RestApi struct { + DB *sqlx.DB + Resolver *graph.Resolver + AsyncArchiving bool +} + +func (api *RestApi) MountRoutes(r *mux.Router) { + r.HandleFunc("/api/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) + r.HandleFunc("/api/jobs/stop_job/", api.stopJob).Methods(http.MethodPost, http.MethodPut) + r.HandleFunc("/api/jobs/stop_job/{id}", api.stopJob).Methods(http.MethodPost, http.MethodPut) + + r.HandleFunc("/api/jobs/{id}", api.getJob).Methods(http.MethodGet) + r.HandleFunc("/api/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) +} + +type StartJobApiRequest struct { + JobId int64 `json:"jobId"` + UserId string `json:"userId"` + ClusterId string `json:"clusterId"` + StartTime int64 `json:"startTime"` + MetaData string `json:"metaData"` + ProjectId string `json:"projectId"` + Nodes []string `json:"nodes"` + NodeList string `json:"nodeList"` +} + +type StartJobApiRespone struct { + DBID int64 `json:"id"` +} + +type StopJobApiRequest struct { + // JobId, ClusterId and StartTime are optional. + // They are only used if no database id was provided. + JobId *string `json:"jobId"` + ClusterId *string `json:"clusterId"` + StartTime *int64 `json:"startTime"` + + // Payload + StopTime int64 `json:"stopTime"` +} + +type StopJobApiRespone struct { + DBID string `json:"id"` +} + +type TagJobApiRequest []*struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (api *RestApi) getJob(rw http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + + job, err := api.Resolver.Query().Job(r.Context(), id) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + job.Tags, err = api.Resolver.Job().Tags(r.Context(), job) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) +} + +func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { + id := mux.Vars(r)["id"] + job, err := api.Resolver.Query().Job(r.Context(), id) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + job.Tags, err = api.Resolver.Job().Tags(r.Context(), job) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + var req TagJobApiRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + for _, tag := range req { + var tagId string + if err := sq.Select("id").From("tag"). + Where("tag.tag_type = ?", tag.Type).Where("tag.tag_name = ?", tag.Name). + RunWith(api.DB).QueryRow().Scan(&tagId); err != nil { + http.Error(rw, fmt.Sprintf("the tag '%s:%s' does not exist", tag.Type, tag.Name), http.StatusNotFound) + return + } + + if _, err := api.DB.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, job.ID, tagId); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + job.Tags = append(job.Tags, &model.JobTag{ + ID: tagId, + TagType: tag.Type, + TagName: tag.Name, + }) + } + + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) +} + +func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { + req := StartJobApiRequest{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + if config.GetClusterConfig(req.ClusterId) == nil { + http.Error(rw, fmt.Sprintf("cluster '%s' does not exist", req.ClusterId), http.StatusBadRequest) + return + } + + if req.Nodes == nil { + req.Nodes = strings.Split(req.NodeList, "|") + if len(req.Nodes) == 1 { + req.Nodes = strings.Split(req.NodeList, ",") + } + } + if len(req.Nodes) == 0 || len(req.Nodes[0]) == 0 || len(req.UserId) == 0 { + http.Error(rw, "required fields are missing", http.StatusBadRequest) + return + } + + // Check if combination of (job_id, cluster_id, start_time) already exists: + rows, err := api.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ? AND job.cluster_id = ? AND job.start_time = ?`, + req.JobId, req.ClusterId, req.StartTime) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + if rows.Next() { + var id int64 = -1 + rows.Scan(&id) + http.Error(rw, fmt.Sprintf("a job with that job_id, cluster_id and start_time already exists (database id: %d)", id), http.StatusUnprocessableEntity) + return + } + + res, err := api.DB.Exec( + `INSERT INTO job (job_id, user_id, project_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`, + req.JobId, req.UserId, req.ProjectId, req.ClusterId, req.StartTime, 0, model.JobStateRunning, len(req.Nodes), strings.Join(req.Nodes, ","), req.MetaData) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + id, err := res.LastInsertId() + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + log.Printf("new job (id: %d): clusterId=%s, jobId=%d, userId=%s, startTime=%d, nodes=%v\n", id, req.ClusterId, req.JobId, req.UserId, req.StartTime, req.Nodes) + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusCreated) + json.NewEncoder(rw).Encode(StartJobApiRespone{ + DBID: id, + }) +} + +func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) { + req := StopJobApiRequest{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + var err error + var job *model.Job + id, ok := mux.Vars(r)["id"] + if ok { + job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job").Where("job.id = ?", id).RunWith(api.DB).QueryRow()) + } else { + job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job"). + Where("job.job_id = ?", req.JobId). + Where("job.cluster_id = ?", req.ClusterId). + Where("job.start_time = ?", req.StartTime). + RunWith(api.DB).QueryRow()) + } + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != model.JobStateRunning { + http.Error(rw, "stop_time must be larger than start_time and only running jobs can be stopped", http.StatusBadRequest) + return + } + + doArchiving := func(job *model.Job, ctx context.Context) error { + job.Duration = int(req.StopTime - job.StartTime.Unix()) + jobMeta, err := metricdata.ArchiveJob(job, ctx) + if err != nil { + log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) + return err + } + + getAvg := func(metric string) sql.NullFloat64 { + stats, ok := jobMeta.Statistics[metric] + if !ok { + return sql.NullFloat64{Valid: false} + } + return sql.NullFloat64{Valid: true, Float64: stats.Avg} + } + + if _, err := api.DB.Exec( + `UPDATE job SET + job_state = ?, duration = ?, + flops_any_avg = ?, mem_bw_avg = ?, net_bw_avg = ?, file_bw_avg = ?, load_avg = ? + WHERE job.id = ?`, + model.JobStateCompleted, job.Duration, + getAvg("flops_any"), getAvg("mem_bw"), getAvg("net_bw"), getAvg("file_bw"), getAvg("load"), + job.ID); err != nil { + log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) + return err + } + + log.Printf("job stopped and archived (id: %s)\n", job.ID) + return nil + } + + log.Printf("archiving job... (id: %s): clusterId=%s, jobId=%s, userId=%s, startTime=%s, nodes=%v\n", job.ID, job.ClusterID, job.JobID, job.UserID, job.StartTime, job.Nodes) + if api.AsyncArchiving { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(StopJobApiRespone{ + DBID: job.ID, + }) + go doArchiving(job, context.Background()) + } else { + err := doArchiving(job, r.Context()) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + } else { + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) + } + } +} diff --git a/metricdata/archive.go b/metricdata/archive.go index a4d92fe..eb51418 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -137,9 +137,9 @@ func loadAveragesFromArchive(job *model.Job, metrics []string, data [][]schema.F } // Writes a running job to the job-archive -func ArchiveJob(job *model.Job, ctx context.Context) error { +func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { if job.State != model.JobStateRunning { - return errors.New("cannot archive job that is not running") + return nil, errors.New("cannot archive job that is not running") } allMetrics := make([]string, 0) @@ -149,7 +149,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) error { } jobData, err := LoadData(job, allMetrics, ctx) if err != nil { - return err + return nil, err } tags := []struct { @@ -195,39 +195,46 @@ func ArchiveJob(job *model.Job, ctx context.Context) error { } } + // If the file based archive is disabled, + // only return the JobMeta structure as the + // statistics in there are needed. + if !useArchive { + return metaData, nil + } + dirPath, err := getPath(job, "", false) if err != nil { - return err + return nil, err } if err := os.MkdirAll(dirPath, 0777); err != nil { - return err + return nil, err } f, err := os.Create(path.Join(dirPath, "meta.json")) if err != nil { - return err + return nil, err } defer f.Close() writer := bufio.NewWriter(f) if err := json.NewEncoder(writer).Encode(metaData); err != nil { - return err + return nil, err } if err := writer.Flush(); err != nil { - return err + return nil, err } f, err = os.Create(path.Join(dirPath, "data.json")) if err != nil { - return err + return nil, err } writer = bufio.NewWriter(f) if err := json.NewEncoder(writer).Encode(jobData); err != nil { - return err + return nil, err } if err := writer.Flush(); err != nil { - return err + return nil, err } - return f.Close() + return metaData, f.Close() } diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index dd2ba1b..5f92a8a 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -28,7 +28,10 @@ var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepos var JobArchivePath string -func Init(jobArchivePath string) error { +var useArchive bool + +func Init(jobArchivePath string, disableArchive bool) error { + useArchive = !disableArchive JobArchivePath = jobArchivePath for _, cluster := range config.Clusters { if cluster.MetricDataRepository != nil { @@ -55,7 +58,7 @@ func Init(jobArchivePath string) error { // Fetches the metric data for a job. func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { - if job.State == model.JobStateRunning { + if job.State == model.JobStateRunning || !useArchive { repo, ok := metricDataRepos[job.ClusterID] if !ok { return nil, fmt.Errorf("no metric data repository configured for '%s'", job.ClusterID) @@ -83,7 +86,7 @@ func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.Job // Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize. func LoadAverages(job *model.Job, metrics []string, data [][]schema.Float, ctx context.Context) error { - if job.State != model.JobStateRunning { + if job.State != model.JobStateRunning && useArchive { return loadAveragesFromArchive(job, metrics, data) } @@ -120,6 +123,12 @@ func LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx return nil, fmt.Errorf("no metric data repository configured for '%s'", clusterId) } + if metrics == nil { + for _, m := range config.GetClusterConfig(clusterId).MetricConfig { + metrics = append(metrics, m.Name) + } + } + data, err := repo.LoadNodeData(clusterId, metrics, nodes, from, to, ctx) if err != nil { return nil, err diff --git a/rest-api.go b/rest-api.go deleted file mode 100644 index 4cb38d9..0000000 --- a/rest-api.go +++ /dev/null @@ -1,142 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "strings" - - "github.com/ClusterCockpit/cc-jobarchive/config" - "github.com/ClusterCockpit/cc-jobarchive/graph" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" - "github.com/ClusterCockpit/cc-jobarchive/metricdata" - sq "github.com/Masterminds/squirrel" - "github.com/gorilla/mux" -) - -type StartJobApiRequest struct { - JobId int64 `json:"jobId"` - UserId string `json:"userId"` - ClusterId string `json:"clusterId"` - StartTime int64 `json:"startTime"` - MetaData string `json:"metaData"` - ProjectId string `json:"projectId"` - Nodes []string `json:"nodes"` - NodeList string `json:"nodeList"` -} - -type StartJobApiRespone struct { - DBID int64 `json:"id"` -} - -type StopJobApiRequest struct { - // JobId, ClusterId and StartTime are optional. - // They are only used if no database id was provided. - JobId *string `json:"jobId"` - ClusterId *string `json:"clusterId"` - StartTime *int64 `json:"startTime"` - - StopTime int64 `json:"stopTime"` -} - -type StopJobApiRespone struct { - DBID string `json:"id"` -} - -func startJob(rw http.ResponseWriter, r *http.Request) { - req := StartJobApiRequest{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - if config.GetClusterConfig(req.ClusterId) == nil { - http.Error(rw, fmt.Sprintf("cluster '%s' does not exist", req.ClusterId), http.StatusBadRequest) - return - } - - if req.Nodes == nil { - req.Nodes = strings.Split(req.NodeList, "|") - if len(req.Nodes) == 1 { - req.Nodes = strings.Split(req.NodeList, ",") - } - } - if len(req.Nodes) == 0 || len(req.Nodes[0]) == 0 || len(req.UserId) == 0 { - http.Error(rw, "required fields are missing", http.StatusBadRequest) - return - } - - res, err := db.Exec( - `INSERT INTO job (job_id, user_id, project_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`, - req.JobId, req.UserId, req.ProjectId, req.ClusterId, req.StartTime, 0, model.JobStateRunning, len(req.Nodes), strings.Join(req.Nodes, ","), req.MetaData) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - id, err := res.LastInsertId() - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - log.Printf("new job (id: %d): clusterId=%s, jobId=%d, userId=%s, startTime=%d, nodes=%v\n", id, req.ClusterId, req.JobId, req.UserId, req.StartTime, req.Nodes) - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusCreated) - json.NewEncoder(rw).Encode(StartJobApiRespone{ - DBID: id, - }) -} - -func stopJob(rw http.ResponseWriter, r *http.Request) { - req := StopJobApiRequest{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - var err error - var job *model.Job - id, ok := mux.Vars(r)["id"] - if ok { - job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job").Where("job.id = ?", id).RunWith(db).QueryRow()) - } else { - job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job"). - Where("job.job_id = ?", req.JobId). - Where("job.cluster_id = ?", req.ClusterId). - Where("job.start_time = ?", req.StartTime). - RunWith(db).QueryRow()) - } - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != model.JobStateRunning { - http.Error(rw, "stop_time must be larger than start_time and only running jobs can be stopped", http.StatusBadRequest) - return - } - - job.Duration = int(req.StopTime - job.StartTime.Unix()) - - if err := metricdata.ArchiveJob(job, r.Context()); err != nil { - log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - if _, err := db.Exec( - `UPDATE job SET job_state = ?, duration = ? WHERE job.id = ?`, - model.JobStateCompleted, job.Duration, job.ID); err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - log.Printf("job stoped and archived (id: %s): clusterId=%s, jobId=%s, userId=%s, startTime=%s, nodes=%v\n", job.ID, job.ClusterID, job.JobID, job.UserID, job.StartTime, job.Nodes) - rw.Header().Add("Content-Type", "application/json") - rw.WriteHeader(http.StatusCreated) - json.NewEncoder(rw).Encode(StopJobApiRespone{ - DBID: job.ID, - }) -} diff --git a/server.go b/server.go index b87c881..8a1775a 100644 --- a/server.go +++ b/server.go @@ -10,6 +10,7 @@ import ( "github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/playground" + "github.com/ClusterCockpit/cc-jobarchive/api" "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph" @@ -24,16 +25,40 @@ import ( var db *sqlx.DB +// Format of the configurartion (file). See below for the defaults. type ProgramConfig struct { - Addr string `json:"addr"` - DisableAuthentication bool `json:"disable-authentication"` - StaticFiles string `json:"static-files"` - DB string `json:"db"` - JobArchive string `json:"job-archive"` - LdapConfig *auth.LdapConfig `json:"ldap"` - HttpsCertFile string `json:"https-cert-file"` - HttpsKeyFile string `json:"https-key-file"` - UiDefaults map[string]interface{} `json:"ui-defaults"` + // Address where the http (or https) server will listen on (for example: 'localhost:80'). + Addr string `json:"addr"` + + // Disable authentication (for everything: API, Web-UI, ...) + DisableAuthentication bool `json:"disable-authentication"` + + // Folder where static assets can be found, will be served directly + StaticFiles string `json:"static-files"` + + // Currently only SQLite3 ist supported, so this should be a filename + DB string `json:"db"` + + // Path to the job-archive + JobArchive string `json:"job-archive"` + + // Make the /api/jobs/stop_job endpoint do the heavy work in the background. + AsyncArchiving bool `json:"async-archive"` + + // Keep all metric data in the metric data repositories, + // do not write to the job-archive. + DisableArchive bool `json:"disable-archive"` + + // For LDAP Authentication and user syncronisation. + LdapConfig *auth.LdapConfig `json:"ldap"` + + // If both those options are not empty, use HTTPS using those certificates. + HttpsCertFile string `json:"https-cert-file"` + HttpsKeyFile string `json:"https-key-file"` + + // If overwriten, at least all the options in the defaults below must + // be provided! Most options here can be overwritten by the user. + UiDefaults map[string]interface{} `json:"ui-defaults"` } var programConfig ProgramConfig = ProgramConfig{ @@ -42,6 +67,8 @@ var programConfig ProgramConfig = ProgramConfig{ StaticFiles: "./frontend/public", DB: "./var/job.db", JobArchive: "./var/job-archive", + AsyncArchiving: true, + DisableArchive: false, LdapConfig: &auth.LdapConfig{ Url: "ldap://localhost", UserBase: "ou=hpc,dc=rrze,dc=uni-erlangen,dc=de", @@ -92,11 +119,15 @@ func main() { } var err error - db, err = sqlx.Open("sqlite3", programConfig.DB) + // This might need to change for other databases: + db, err = sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", programConfig.DB)) if err != nil { log.Fatal(err) } + // Only for sqlite, not needed for any other database: + db.SetMaxOpenConns(1) + // Initialize sub-modules... if !programConfig.DisableAuthentication { @@ -126,7 +157,7 @@ func main() { log.Fatal(err) } - if err := metricdata.Init(programConfig.JobArchive); err != nil { + if err := metricdata.Init(programConfig.JobArchive, programConfig.DisableArchive); err != nil { log.Fatal(err) } @@ -145,6 +176,11 @@ func main() { resolver := &graph.Resolver{DB: db} graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) graphQLPlayground := playground.Handler("GraphQL playground", "/query") + restApi := &api.RestApi{ + DB: db, + Resolver: resolver, + AsyncArchiving: programConfig.AsyncArchiving, + } handleGetLogin := func(rw http.ResponseWriter, r *http.Request) { templates.Render(rw, r, "login", &templates.Page{ @@ -170,9 +206,7 @@ func main() { secured.Use(auth.Auth) } secured.Handle("/query", graphQLEndpoint) - secured.HandleFunc("/api/jobs/start_job/", startJob).Methods(http.MethodPost) - secured.HandleFunc("/api/jobs/stop_job/", stopJob).Methods(http.MethodPost, http.MethodPut) - secured.HandleFunc("/api/jobs/stop_job/{id:[0-9]+}", stopJob).Methods(http.MethodPost, http.MethodPut) + secured.HandleFunc("/config.json", config.ServeConfig).Methods(http.MethodGet) secured.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { @@ -201,12 +235,13 @@ func main() { }) monitoringRoutes(secured, resolver) + restApi.MountRoutes(secured) r.PathPrefix("/").Handler(http.FileServer(http.Dir(programConfig.StaticFiles))) handler := handlers.CORS( handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}), handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}), - handlers.AllowedOrigins([]string{"*"}))(handlers.LoggingHandler(os.Stdout, r)) + handlers.AllowedOrigins([]string{"*"}))(handlers.LoggingHandler(os.Stdout, handlers.CompressHandler(r))) // Start http or https server if programConfig.HttpsCertFile != "" && programConfig.HttpsKeyFile != "" { From 89333666b3c091126a8d36905c9b88cafa6bf7ab Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Thu, 16 Dec 2021 13:17:48 +0100 Subject: [PATCH 15/25] BC: new schemas for basically everything --- api/rest.go | 3 +- gqlgen.yml | 6 +- graph/generated/generated.go | 1567 ++++++++++++++++++++++++++------- graph/model/models_gen.go | 75 +- graph/resolver.go | 44 +- graph/schema.graphqls | 128 +-- graph/schema.resolvers.go | 9 + init-db.go | 120 ++- metricdata/archive.go | 63 +- metricdata/cc-metric-store.go | 43 +- metricdata/influxdb-v2.go | 33 +- metricdata/metricdata.go | 8 +- schema/metrics.go | 77 +- server.go | 4 +- 14 files changed, 1631 insertions(+), 549 deletions(-) diff --git a/api/rest.go b/api/rest.go index 7092e16..97f5a83 100644 --- a/api/rest.go +++ b/api/rest.go @@ -33,6 +33,7 @@ func (api *RestApi) MountRoutes(r *mux.Router) { r.HandleFunc("/api/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) } +// TODO/FIXME: UPDATE API! type StartJobApiRequest struct { JobId int64 `json:"jobId"` UserId string `json:"userId"` @@ -255,7 +256,7 @@ func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) { return nil } - log.Printf("archiving job... (id: %s): clusterId=%s, jobId=%s, userId=%s, startTime=%s, nodes=%v\n", job.ID, job.ClusterID, job.JobID, job.UserID, job.StartTime, job.Nodes) + log.Printf("archiving job... (id: %s): clusterId=%s, jobId=%d, userId=%s, startTime=%s\n", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) if api.AsyncArchiving { rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) diff --git a/gqlgen.yml b/gqlgen.yml index bce5b5e..ea78535 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -56,7 +56,7 @@ models: - github.com/99designs/gqlgen/graphql.Int32 Job: fields: - tags: + Tags: resolver: true JobMetric: model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobMetric" @@ -68,4 +68,8 @@ models: model: "github.com/ClusterCockpit/cc-jobarchive/schema.Float" JobMetricScope: model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricScope" + JobResource: + model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobResource" + Accelerator: + model: "github.com/ClusterCockpit/cc-jobarchive/schema.Accelerator" diff --git a/graph/generated/generated.go b/graph/generated/generated.go index 920feff..72619c9 100644 --- a/graph/generated/generated.go +++ b/graph/generated/generated.go @@ -37,6 +37,7 @@ type Config struct { } type ResolverRoot interface { + Accelerator() AcceleratorResolver Job() JobResolver Mutation() MutationResolver Query() QueryResolver @@ -46,6 +47,12 @@ type DirectiveRoot struct { } type ComplexityRoot struct { + Accelerator struct { + ID func(childComplexity int) int + Model func(childComplexity int) int + Type func(childComplexity int) int + } + Cluster struct { ClusterID func(childComplexity int) int CoresPerSocket func(childComplexity int) int @@ -76,24 +83,30 @@ type ComplexityRoot struct { } Job struct { - ClusterID func(childComplexity int) int - Duration func(childComplexity int) int - FileBwAvg func(childComplexity int) int - FlopsAnyAvg func(childComplexity int) int - HasProfile func(childComplexity int) int - ID func(childComplexity int) int - JobID func(childComplexity int) int - LoadAvg func(childComplexity int) int - MemBwAvg func(childComplexity int) int - MemUsedMax func(childComplexity int) int - NetBwAvg func(childComplexity int) int - Nodes func(childComplexity int) int - NumNodes func(childComplexity int) int - ProjectID func(childComplexity int) int - StartTime func(childComplexity int) int - State func(childComplexity int) int - Tags func(childComplexity int) int - UserID func(childComplexity int) int + ArrayJobID func(childComplexity int) int + Cluster func(childComplexity int) int + Duration func(childComplexity int) int + Exclusive func(childComplexity int) int + FileBwAvg func(childComplexity int) int + FlopsAnyAvg func(childComplexity int) int + ID func(childComplexity int) int + JobID func(childComplexity int) int + LoadAvg func(childComplexity int) int + MemBwAvg func(childComplexity int) int + MemUsedMax func(childComplexity int) int + MonitoringStatus func(childComplexity int) int + NetBwAvg func(childComplexity int) int + NumAcc func(childComplexity int) int + NumHWThreads func(childComplexity int) int + NumNodes func(childComplexity int) int + Partition func(childComplexity int) int + Project func(childComplexity int) int + Resources func(childComplexity int) int + Smt func(childComplexity int) int + StartTime func(childComplexity int) int + State func(childComplexity int) int + Tags func(childComplexity int) int + User func(childComplexity int) int } JobMetric struct { @@ -105,7 +118,8 @@ type ComplexityRoot struct { JobMetricSeries struct { Data func(childComplexity int) int - NodeID func(childComplexity int) int + Hostname func(childComplexity int) int + Id func(childComplexity int) int Statistics func(childComplexity int) int } @@ -120,6 +134,12 @@ type ComplexityRoot struct { Name func(childComplexity int) int } + JobResource struct { + Accelerators func(childComplexity int) int + HWThreads func(childComplexity int) int + Hostname func(childComplexity int) int + } + JobResultList struct { Count func(childComplexity int) int Items func(childComplexity int) int @@ -144,13 +164,14 @@ type ComplexityRoot struct { } MetricConfig struct { - Alert func(childComplexity int) int - Caution func(childComplexity int) int - Name func(childComplexity int) int - Normal func(childComplexity int) int - Peak func(childComplexity int) int - Sampletime func(childComplexity int) int - Unit func(childComplexity int) int + Alert func(childComplexity int) int + Caution func(childComplexity int) int + Name func(childComplexity int) int + Normal func(childComplexity int) int + Peak func(childComplexity int) int + Scope func(childComplexity int) int + Timestep func(childComplexity int) int + Unit func(childComplexity int) int } MetricFootprints struct { @@ -194,6 +215,9 @@ type ComplexityRoot struct { } } +type AcceleratorResolver interface { + ID(ctx context.Context, obj *schema.Accelerator) (string, error) +} type JobResolver interface { Tags(ctx context.Context, obj *model.Job) ([]*model.JobTag, error) } @@ -231,70 +255,91 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in _ = ec switch typeName + "." + field { - case "Cluster.clusterID": + case "Accelerator.Id": + if e.complexity.Accelerator.ID == nil { + break + } + + return e.complexity.Accelerator.ID(childComplexity), true + + case "Accelerator.Model": + if e.complexity.Accelerator.Model == nil { + break + } + + return e.complexity.Accelerator.Model(childComplexity), true + + case "Accelerator.Type": + if e.complexity.Accelerator.Type == nil { + break + } + + return e.complexity.Accelerator.Type(childComplexity), true + + case "Cluster.ClusterID": if e.complexity.Cluster.ClusterID == nil { break } return e.complexity.Cluster.ClusterID(childComplexity), true - case "Cluster.coresPerSocket": + case "Cluster.CoresPerSocket": if e.complexity.Cluster.CoresPerSocket == nil { break } return e.complexity.Cluster.CoresPerSocket(childComplexity), true - case "Cluster.filterRanges": + case "Cluster.FilterRanges": if e.complexity.Cluster.FilterRanges == nil { break } return e.complexity.Cluster.FilterRanges(childComplexity), true - case "Cluster.flopRateScalar": + case "Cluster.FlopRateScalar": if e.complexity.Cluster.FlopRateScalar == nil { break } return e.complexity.Cluster.FlopRateScalar(childComplexity), true - case "Cluster.flopRateSimd": + case "Cluster.FlopRateSimd": if e.complexity.Cluster.FlopRateSimd == nil { break } return e.complexity.Cluster.FlopRateSimd(childComplexity), true - case "Cluster.memoryBandwidth": + case "Cluster.MemoryBandwidth": if e.complexity.Cluster.MemoryBandwidth == nil { break } return e.complexity.Cluster.MemoryBandwidth(childComplexity), true - case "Cluster.metricConfig": + case "Cluster.MetricConfig": if e.complexity.Cluster.MetricConfig == nil { break } return e.complexity.Cluster.MetricConfig(childComplexity), true - case "Cluster.processorType": + case "Cluster.ProcessorType": if e.complexity.Cluster.ProcessorType == nil { break } return e.complexity.Cluster.ProcessorType(childComplexity), true - case "Cluster.socketsPerNode": + case "Cluster.SocketsPerNode": if e.complexity.Cluster.SocketsPerNode == nil { break } return e.complexity.Cluster.SocketsPerNode(childComplexity), true - case "Cluster.threadsPerCore": + case "Cluster.ThreadsPerCore": if e.complexity.Cluster.ThreadsPerCore == nil { break } @@ -350,196 +395,245 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.IntRangeOutput.To(childComplexity), true - case "Job.clusterId": - if e.complexity.Job.ClusterID == nil { + case "Job.ArrayJobId": + if e.complexity.Job.ArrayJobID == nil { break } - return e.complexity.Job.ClusterID(childComplexity), true + return e.complexity.Job.ArrayJobID(childComplexity), true - case "Job.duration": + case "Job.Cluster": + if e.complexity.Job.Cluster == nil { + break + } + + return e.complexity.Job.Cluster(childComplexity), true + + case "Job.Duration": if e.complexity.Job.Duration == nil { break } return e.complexity.Job.Duration(childComplexity), true - case "Job.fileBwAvg": + case "Job.Exclusive": + if e.complexity.Job.Exclusive == nil { + break + } + + return e.complexity.Job.Exclusive(childComplexity), true + + case "Job.FileBwAvg": if e.complexity.Job.FileBwAvg == nil { break } return e.complexity.Job.FileBwAvg(childComplexity), true - case "Job.flopsAnyAvg": + case "Job.FlopsAnyAvg": if e.complexity.Job.FlopsAnyAvg == nil { break } return e.complexity.Job.FlopsAnyAvg(childComplexity), true - case "Job.hasProfile": - if e.complexity.Job.HasProfile == nil { - break - } - - return e.complexity.Job.HasProfile(childComplexity), true - - case "Job.id": + case "Job.Id": if e.complexity.Job.ID == nil { break } return e.complexity.Job.ID(childComplexity), true - case "Job.jobId": + case "Job.JobId": if e.complexity.Job.JobID == nil { break } return e.complexity.Job.JobID(childComplexity), true - case "Job.loadAvg": + case "Job.LoadAvg": if e.complexity.Job.LoadAvg == nil { break } return e.complexity.Job.LoadAvg(childComplexity), true - case "Job.memBwAvg": + case "Job.MemBwAvg": if e.complexity.Job.MemBwAvg == nil { break } return e.complexity.Job.MemBwAvg(childComplexity), true - case "Job.memUsedMax": + case "Job.MemUsedMax": if e.complexity.Job.MemUsedMax == nil { break } return e.complexity.Job.MemUsedMax(childComplexity), true - case "Job.netBwAvg": + case "Job.MonitoringStatus": + if e.complexity.Job.MonitoringStatus == nil { + break + } + + return e.complexity.Job.MonitoringStatus(childComplexity), true + + case "Job.NetBwAvg": if e.complexity.Job.NetBwAvg == nil { break } return e.complexity.Job.NetBwAvg(childComplexity), true - case "Job.nodes": - if e.complexity.Job.Nodes == nil { + case "Job.NumAcc": + if e.complexity.Job.NumAcc == nil { break } - return e.complexity.Job.Nodes(childComplexity), true + return e.complexity.Job.NumAcc(childComplexity), true - case "Job.numNodes": + case "Job.NumHWThreads": + if e.complexity.Job.NumHWThreads == nil { + break + } + + return e.complexity.Job.NumHWThreads(childComplexity), true + + case "Job.NumNodes": if e.complexity.Job.NumNodes == nil { break } return e.complexity.Job.NumNodes(childComplexity), true - case "Job.projectId": - if e.complexity.Job.ProjectID == nil { + case "Job.Partition": + if e.complexity.Job.Partition == nil { break } - return e.complexity.Job.ProjectID(childComplexity), true + return e.complexity.Job.Partition(childComplexity), true - case "Job.startTime": + case "Job.Project": + if e.complexity.Job.Project == nil { + break + } + + return e.complexity.Job.Project(childComplexity), true + + case "Job.Resources": + if e.complexity.Job.Resources == nil { + break + } + + return e.complexity.Job.Resources(childComplexity), true + + case "Job.SMT": + if e.complexity.Job.Smt == nil { + break + } + + return e.complexity.Job.Smt(childComplexity), true + + case "Job.StartTime": if e.complexity.Job.StartTime == nil { break } return e.complexity.Job.StartTime(childComplexity), true - case "Job.state": + case "Job.State": if e.complexity.Job.State == nil { break } return e.complexity.Job.State(childComplexity), true - case "Job.tags": + case "Job.Tags": if e.complexity.Job.Tags == nil { break } return e.complexity.Job.Tags(childComplexity), true - case "Job.userId": - if e.complexity.Job.UserID == nil { + case "Job.User": + if e.complexity.Job.User == nil { break } - return e.complexity.Job.UserID(childComplexity), true + return e.complexity.Job.User(childComplexity), true - case "JobMetric.scope": + case "JobMetric.Scope": if e.complexity.JobMetric.Scope == nil { break } return e.complexity.JobMetric.Scope(childComplexity), true - case "JobMetric.series": + case "JobMetric.Series": if e.complexity.JobMetric.Series == nil { break } return e.complexity.JobMetric.Series(childComplexity), true - case "JobMetric.timestep": + case "JobMetric.Timestep": if e.complexity.JobMetric.Timestep == nil { break } return e.complexity.JobMetric.Timestep(childComplexity), true - case "JobMetric.unit": + case "JobMetric.Unit": if e.complexity.JobMetric.Unit == nil { break } return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricSeries.data": + case "JobMetricSeries.Data": if e.complexity.JobMetricSeries.Data == nil { break } return e.complexity.JobMetricSeries.Data(childComplexity), true - case "JobMetricSeries.node_id": - if e.complexity.JobMetricSeries.NodeID == nil { + case "JobMetricSeries.Hostname": + if e.complexity.JobMetricSeries.Hostname == nil { break } - return e.complexity.JobMetricSeries.NodeID(childComplexity), true + return e.complexity.JobMetricSeries.Hostname(childComplexity), true - case "JobMetricSeries.statistics": + case "JobMetricSeries.Id": + if e.complexity.JobMetricSeries.Id == nil { + break + } + + return e.complexity.JobMetricSeries.Id(childComplexity), true + + case "JobMetricSeries.Statistics": if e.complexity.JobMetricSeries.Statistics == nil { break } return e.complexity.JobMetricSeries.Statistics(childComplexity), true - case "JobMetricStatistics.avg": + case "JobMetricStatistics.Avg": if e.complexity.JobMetricStatistics.Avg == nil { break } return e.complexity.JobMetricStatistics.Avg(childComplexity), true - case "JobMetricStatistics.max": + case "JobMetricStatistics.Max": if e.complexity.JobMetricStatistics.Max == nil { break } return e.complexity.JobMetricStatistics.Max(childComplexity), true - case "JobMetricStatistics.min": + case "JobMetricStatistics.Min": if e.complexity.JobMetricStatistics.Min == nil { break } @@ -560,6 +654,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetricWithName.Name(childComplexity), true + case "JobResource.Accelerators": + if e.complexity.JobResource.Accelerators == nil { + break + } + + return e.complexity.JobResource.Accelerators(childComplexity), true + + case "JobResource.HWThreads": + if e.complexity.JobResource.HWThreads == nil { + break + } + + return e.complexity.JobResource.HWThreads(childComplexity), true + + case "JobResource.Hostname": + if e.complexity.JobResource.Hostname == nil { + break + } + + return e.complexity.JobResource.Hostname(childComplexity), true + case "JobResultList.count": if e.complexity.JobResultList.Count == nil { break @@ -588,21 +703,21 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true - case "JobTag.id": + case "JobTag.Id": if e.complexity.JobTag.ID == nil { break } return e.complexity.JobTag.ID(childComplexity), true - case "JobTag.tagName": + case "JobTag.TagName": if e.complexity.JobTag.TagName == nil { break } return e.complexity.JobTag.TagName(childComplexity), true - case "JobTag.tagType": + case "JobTag.TagType": if e.complexity.JobTag.TagType == nil { break } @@ -658,49 +773,56 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalWalltime(childComplexity), true - case "MetricConfig.alert": + case "MetricConfig.Alert": if e.complexity.MetricConfig.Alert == nil { break } return e.complexity.MetricConfig.Alert(childComplexity), true - case "MetricConfig.caution": + case "MetricConfig.Caution": if e.complexity.MetricConfig.Caution == nil { break } return e.complexity.MetricConfig.Caution(childComplexity), true - case "MetricConfig.name": + case "MetricConfig.Name": if e.complexity.MetricConfig.Name == nil { break } return e.complexity.MetricConfig.Name(childComplexity), true - case "MetricConfig.normal": + case "MetricConfig.Normal": if e.complexity.MetricConfig.Normal == nil { break } return e.complexity.MetricConfig.Normal(childComplexity), true - case "MetricConfig.peak": + case "MetricConfig.Peak": if e.complexity.MetricConfig.Peak == nil { break } return e.complexity.MetricConfig.Peak(childComplexity), true - case "MetricConfig.sampletime": - if e.complexity.MetricConfig.Sampletime == nil { + case "MetricConfig.Scope": + if e.complexity.MetricConfig.Scope == nil { break } - return e.complexity.MetricConfig.Sampletime(childComplexity), true + return e.complexity.MetricConfig.Scope(childComplexity), true - case "MetricConfig.unit": + case "MetricConfig.Timestep": + if e.complexity.MetricConfig.Timestep == nil { + break + } + + return e.complexity.MetricConfig.Timestep(childComplexity), true + + case "MetricConfig.Unit": if e.complexity.MetricConfig.Unit == nil { break } @@ -986,80 +1108,104 @@ func (ec *executionContext) introspectType(name string) (*introspection.Type, er var sources = []*ast.Source{ {Name: "graph/schema.graphqls", Input: `type Job { - id: ID! # Database ID, unique - jobId: String! # ID given to the job by the cluster scheduler - userId: String! # Username - projectId: String! # Project - clusterId: String! # Name of the cluster this job was running on - startTime: Time! # RFC3339 formated string - duration: Int! # For running jobs, the time it has already run - numNodes: Int! # Number of nodes this job was running on - nodes: [String!]! # List of hostnames - hasProfile: Boolean! # TODO: Could be removed? - state: JobState! # State of the job - tags: [JobTag!]! # List of tags this job has + Id: ID! # Database ID, unique + JobId: Int! # ID given to the job by the cluster scheduler + User: String! # Username + Project: String! # Project + Cluster: String! # Name of the cluster this job was running on + StartTime: Time! # RFC3339 formated string + Duration: Int! # For running jobs, the time it has already run + NumNodes: Int! # Number of nodes this job was running on + NumHWThreads: Int! + NumAcc: Int! + SMT: Int! + Exclusive: Int! + Partition: String! + ArrayJobId: Int! + MonitoringStatus: Int! + State: JobState! # State of the job + Tags: [JobTag!]! # List of tags this job has + Resources: [JobResource!]! # List of hosts/hwthreads/gpus/... # Will be null for running jobs. - loadAvg: Float - memUsedMax: Float - flopsAnyAvg: Float - memBwAvg: Float - netBwAvg: Float - fileBwAvg: Float + LoadAvg: Float + MemUsedMax: Float + FlopsAnyAvg: Float + MemBwAvg: Float + NetBwAvg: Float + FileBwAvg: Float +} + +type JobResource { + Hostname: String! + HWThreads: [Int!] + Accelerators: [Accelerator!] +} + +type Accelerator { + Id: String! + Type: String! + Model: String! } # TODO: Extend by more possible states? enum JobState { running completed + failed + canceled + stopped + timeout } type JobTag { - id: ID! # Database ID, unique - tagType: String! # Type - tagName: String! # Name + Id: ID! # Database ID, unique + TagType: String! # Type + TagName: String! # Name } type Cluster { - clusterID: String! - processorType: String! - socketsPerNode: Int! - coresPerSocket: Int! - threadsPerCore: Int! - flopRateScalar: Int! - flopRateSimd: Int! - memoryBandwidth: Int! - metricConfig: [MetricConfig!]! - filterRanges: FilterRanges! + ClusterID: String! + ProcessorType: String! + SocketsPerNode: Int! + CoresPerSocket: Int! + ThreadsPerCore: Int! + FlopRateScalar: Int! + FlopRateSimd: Int! + MemoryBandwidth: Int! + MetricConfig: [MetricConfig!]! + FilterRanges: FilterRanges! } type MetricConfig { - name: String! - unit: String! - sampletime: Int! - peak: Int! - normal: Int! - caution: Int! - alert: Int! + Name: String! + Unit: String! + Timestep: Int! + Peak: Int! + Normal: Int! + Caution: Int! + Alert: Int! + Scope: String! } type JobMetric { - unit: String! - scope: JobMetricScope! - timestep: Int! - series: [JobMetricSeries!]! + Unit: String! + Scope: JobMetricScope! + Timestep: Int! + Series: [JobMetricSeries!]! } type JobMetricSeries { - node_id: String! - statistics: JobMetricStatistics - data: [NullableFloat!]! + Hostname: String! + Id: Int + Statistics: JobMetricStatistics + Data: [NullableFloat!]! } type JobMetricStatistics { - avg: Float! - min: Float! - max: Float! + Avg: Float! + Min: Float! + Max: Float! } type JobMetricWithName { @@ -1128,13 +1274,13 @@ type FilterRanges { input JobFilter { tags: [ID!] jobId: StringInput - userId: StringInput - projectId: StringInput - clusterId: StringInput + user: StringInput + project: StringInput + cluster: StringInput duration: IntRange numNodes: IntRange startTime: TimeRange - isRunning: Boolean + jobState: [JobState!] flopsAnyAvg: FloatRange memBwAvg: FloatRange loadAvg: FloatRange @@ -1615,7 +1761,112 @@ func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArg // region **************************** field.gotpl ***************************** -func (ec *executionContext) _Cluster_clusterID(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Accelerator_Id(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Accelerator", + Field: field, + Args: nil, + IsMethod: true, + IsResolver: true, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Accelerator().ID(rctx, obj) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Accelerator_Type(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Accelerator", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Accelerator_Model(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Accelerator", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Model, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Cluster_ClusterID(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1650,7 +1901,7 @@ func (ec *executionContext) _Cluster_clusterID(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_processorType(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_ProcessorType(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1685,7 +1936,7 @@ func (ec *executionContext) _Cluster_processorType(ctx context.Context, field gr return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_socketsPerNode(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_SocketsPerNode(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1720,7 +1971,7 @@ func (ec *executionContext) _Cluster_socketsPerNode(ctx context.Context, field g return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_coresPerSocket(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_CoresPerSocket(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1755,7 +2006,7 @@ func (ec *executionContext) _Cluster_coresPerSocket(ctx context.Context, field g return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_threadsPerCore(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_ThreadsPerCore(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1790,7 +2041,7 @@ func (ec *executionContext) _Cluster_threadsPerCore(ctx context.Context, field g return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_flopRateScalar(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_FlopRateScalar(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1825,7 +2076,7 @@ func (ec *executionContext) _Cluster_flopRateScalar(ctx context.Context, field g return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_flopRateSimd(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_FlopRateSimd(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1860,7 +2111,7 @@ func (ec *executionContext) _Cluster_flopRateSimd(ctx context.Context, field gra return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_memoryBandwidth(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_MemoryBandwidth(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1895,7 +2146,7 @@ func (ec *executionContext) _Cluster_memoryBandwidth(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_MetricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1930,7 +2181,7 @@ func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field gra return ec.marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐMetricConfigᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_filterRanges(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_FilterRanges(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2210,7 +2461,7 @@ func (ec *executionContext) _IntRangeOutput_to(ctx context.Context, field graphq return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_id(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Id(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2245,7 +2496,7 @@ func (ec *executionContext) _Job_id(ctx context.Context, field graphql.Collected return ec.marshalNID2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_jobId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_JobId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2275,12 +2526,12 @@ func (ec *executionContext) _Job_jobId(ctx context.Context, field graphql.Collec } return graphql.Null } - res := resTmp.(string) + res := resTmp.(int) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_userId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_User(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2298,7 +2549,7 @@ func (ec *executionContext) _Job_userId(ctx context.Context, field graphql.Colle ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.UserID, nil + return obj.User, nil }) if err != nil { ec.Error(ctx, err) @@ -2315,7 +2566,7 @@ func (ec *executionContext) _Job_userId(ctx context.Context, field graphql.Colle return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_projectId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Project(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2333,7 +2584,7 @@ func (ec *executionContext) _Job_projectId(ctx context.Context, field graphql.Co ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.ProjectID, nil + return obj.Project, nil }) if err != nil { ec.Error(ctx, err) @@ -2350,7 +2601,7 @@ func (ec *executionContext) _Job_projectId(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_clusterId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Cluster(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2368,7 +2619,7 @@ func (ec *executionContext) _Job_clusterId(ctx context.Context, field graphql.Co ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.ClusterID, nil + return obj.Cluster, nil }) if err != nil { ec.Error(ctx, err) @@ -2385,7 +2636,7 @@ func (ec *executionContext) _Job_clusterId(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_StartTime(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2420,7 +2671,7 @@ func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.Co return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } -func (ec *executionContext) _Job_duration(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Duration(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2455,7 +2706,7 @@ func (ec *executionContext) _Job_duration(ctx context.Context, field graphql.Col return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_numNodes(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_NumNodes(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2490,7 +2741,7 @@ func (ec *executionContext) _Job_numNodes(ctx context.Context, field graphql.Col return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_nodes(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_NumHWThreads(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2508,7 +2759,7 @@ func (ec *executionContext) _Job_nodes(ctx context.Context, field graphql.Collec ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Nodes, nil + return obj.NumHWThreads, nil }) if err != nil { ec.Error(ctx, err) @@ -2520,12 +2771,12 @@ func (ec *executionContext) _Job_nodes(ctx context.Context, field graphql.Collec } return graphql.Null } - res := resTmp.([]string) + res := resTmp.(int) fc.Result = res - return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_hasProfile(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_NumAcc(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2543,7 +2794,7 @@ func (ec *executionContext) _Job_hasProfile(ctx context.Context, field graphql.C ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HasProfile, nil + return obj.NumAcc, nil }) if err != nil { ec.Error(ctx, err) @@ -2555,12 +2806,187 @@ func (ec *executionContext) _Job_hasProfile(ctx context.Context, field graphql.C } return graphql.Null } - res := resTmp.(bool) + res := resTmp.(int) fc.Result = res - return ec.marshalNBoolean2bool(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_state(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Smt, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_Exclusive(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Exclusive, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_Partition(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Partition, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_ArrayJobId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ArrayJobID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_MonitoringStatus(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MonitoringStatus, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_State(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2595,7 +3021,7 @@ func (ec *executionContext) _Job_state(ctx context.Context, field graphql.Collec return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, field.Selections, res) } -func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Tags(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2630,7 +3056,42 @@ func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.Collect return ec.marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_Resources(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Job", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Resources, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*schema.JobResource) + fc.Result = res + return ec.marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResourceᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Job_LoadAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2662,7 +3123,7 @@ func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.Coll return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_MemUsedMax(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2694,7 +3155,7 @@ func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.C return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_FlopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2726,7 +3187,7 @@ func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql. return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_MemBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2758,7 +3219,7 @@ func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.Col return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_netBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_NetBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2790,7 +3251,7 @@ func (ec *executionContext) _Job_netBwAvg(ctx context.Context, field graphql.Col return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_fileBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_FileBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2822,7 +3283,7 @@ func (ec *executionContext) _Job_fileBwAvg(ctx context.Context, field graphql.Co return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_Unit(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2857,7 +3318,7 @@ func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_scope(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_Scope(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2892,7 +3353,7 @@ func (ec *executionContext) _JobMetric_scope(ctx context.Context, field graphql. return ec.marshalNJobMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_Timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2927,7 +3388,7 @@ func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graph return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_Series(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2962,7 +3423,7 @@ func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql return ec.marshalNJobMetricSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricSeriesᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricSeries_node_id(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricSeries_Hostname(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2980,7 +3441,7 @@ func (ec *executionContext) _JobMetricSeries_node_id(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.NodeID, nil + return obj.Hostname, nil }) if err != nil { ec.Error(ctx, err) @@ -2997,7 +3458,39 @@ func (ec *executionContext) _JobMetricSeries_node_id(ctx context.Context, field return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricSeries_statistics(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricSeries_Id(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobMetricSeries", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Id, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalOInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobMetricSeries_Statistics(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3029,7 +3522,7 @@ func (ec *executionContext) _JobMetricSeries_statistics(ctx context.Context, fie return ec.marshalOJobMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricSeries_data(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricSeries_Data(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3064,7 +3557,7 @@ func (ec *executionContext) _JobMetricSeries_data(ctx context.Context, field gra return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricStatistics_avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricStatistics_Avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3099,7 +3592,7 @@ func (ec *executionContext) _JobMetricStatistics_avg(ctx context.Context, field return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricStatistics_min(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricStatistics_Min(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3134,7 +3627,7 @@ func (ec *executionContext) _JobMetricStatistics_min(ctx context.Context, field return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricStatistics_max(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricStatistics_Max(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3239,6 +3732,105 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) } +func (ec *executionContext) _JobResource_Hostname(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobResource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobResource_HWThreads(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobResource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HWThreads, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]int) + fc.Result = res + return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobResource_Accelerators(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobResource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Accelerators, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]schema.Accelerator) + fc.Result = res + return ec.marshalOAccelerator2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res) +} + func (ec *executionContext) _JobResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -3370,7 +3962,7 @@ func (ec *executionContext) _JobResultList_count(ctx context.Context, field grap return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) _JobTag_id(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { +func (ec *executionContext) _JobTag_Id(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3405,7 +3997,7 @@ func (ec *executionContext) _JobTag_id(ctx context.Context, field graphql.Collec return ec.marshalNID2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobTag_tagType(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { +func (ec *executionContext) _JobTag_TagType(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3440,7 +4032,7 @@ func (ec *executionContext) _JobTag_tagType(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobTag_tagName(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { +func (ec *executionContext) _JobTag_TagName(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3720,7 +4312,7 @@ func (ec *executionContext) _JobsStatistics_histNumNodes(ctx context.Context, fi return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Name(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3755,7 +4347,7 @@ func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3790,7 +4382,7 @@ func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_sampletime(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Timestep(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3808,7 +4400,7 @@ func (ec *executionContext) _MetricConfig_sampletime(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Sampletime, nil + return obj.Timestep, nil }) if err != nil { ec.Error(ctx, err) @@ -3825,7 +4417,7 @@ func (ec *executionContext) _MetricConfig_sampletime(ctx context.Context, field return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3860,7 +4452,7 @@ func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphq return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3895,7 +4487,7 @@ func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field grap return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3930,7 +4522,7 @@ func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field gra return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_Alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3965,6 +4557,41 @@ func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graph return ec.marshalNInt2int(ctx, field.Selections, res) } +func (ec *executionContext) _MetricConfig_Scope(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "MetricConfig", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + func (ec *executionContext) _MetricFootprints_name(ctx context.Context, field graphql.CollectedField, obj *model.MetricFootprints) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -6049,27 +6676,27 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int if err != nil { return it, err } - case "userId": + case "user": var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("userId")) - it.UserID, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user")) + it.User, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { return it, err } - case "projectId": + case "project": var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("projectId")) - it.ProjectID, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project")) + it.Project, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { return it, err } - case "clusterId": + case "cluster": var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterId")) - it.ClusterID, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) + it.Cluster, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { return it, err } @@ -6097,11 +6724,11 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int if err != nil { return it, err } - case "isRunning": + case "jobState": var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("isRunning")) - it.IsRunning, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobState")) + it.JobState, err = ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx, v) if err != nil { return it, err } @@ -6283,6 +6910,52 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int // region **************************** object.gotpl **************************** +var acceleratorImplementors = []string{"Accelerator"} + +func (ec *executionContext) _Accelerator(ctx context.Context, sel ast.SelectionSet, obj *schema.Accelerator) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, acceleratorImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Accelerator") + case "Id": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Accelerator_Id(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + return res + }) + case "Type": + out.Values[i] = ec._Accelerator_Type(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "Model": + out.Values[i] = ec._Accelerator_Model(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var clusterImplementors = []string{"Cluster"} func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, obj *model.Cluster) graphql.Marshaler { @@ -6294,53 +6967,53 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Cluster") - case "clusterID": - out.Values[i] = ec._Cluster_clusterID(ctx, field, obj) + case "ClusterID": + out.Values[i] = ec._Cluster_ClusterID(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "processorType": - out.Values[i] = ec._Cluster_processorType(ctx, field, obj) + case "ProcessorType": + out.Values[i] = ec._Cluster_ProcessorType(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "socketsPerNode": - out.Values[i] = ec._Cluster_socketsPerNode(ctx, field, obj) + case "SocketsPerNode": + out.Values[i] = ec._Cluster_SocketsPerNode(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "coresPerSocket": - out.Values[i] = ec._Cluster_coresPerSocket(ctx, field, obj) + case "CoresPerSocket": + out.Values[i] = ec._Cluster_CoresPerSocket(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "threadsPerCore": - out.Values[i] = ec._Cluster_threadsPerCore(ctx, field, obj) + case "ThreadsPerCore": + out.Values[i] = ec._Cluster_ThreadsPerCore(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "flopRateScalar": - out.Values[i] = ec._Cluster_flopRateScalar(ctx, field, obj) + case "FlopRateScalar": + out.Values[i] = ec._Cluster_FlopRateScalar(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "flopRateSimd": - out.Values[i] = ec._Cluster_flopRateSimd(ctx, field, obj) + case "FlopRateSimd": + out.Values[i] = ec._Cluster_FlopRateSimd(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "memoryBandwidth": - out.Values[i] = ec._Cluster_memoryBandwidth(ctx, field, obj) + case "MemoryBandwidth": + out.Values[i] = ec._Cluster_MemoryBandwidth(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "metricConfig": - out.Values[i] = ec._Cluster_metricConfig(ctx, field, obj) + case "MetricConfig": + out.Values[i] = ec._Cluster_MetricConfig(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "filterRanges": - out.Values[i] = ec._Cluster_filterRanges(ctx, field, obj) + case "FilterRanges": + out.Values[i] = ec._Cluster_FilterRanges(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -6467,62 +7140,87 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Job") - case "id": - out.Values[i] = ec._Job_id(ctx, field, obj) + case "Id": + out.Values[i] = ec._Job_Id(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "jobId": - out.Values[i] = ec._Job_jobId(ctx, field, obj) + case "JobId": + out.Values[i] = ec._Job_JobId(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "userId": - out.Values[i] = ec._Job_userId(ctx, field, obj) + case "User": + out.Values[i] = ec._Job_User(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "projectId": - out.Values[i] = ec._Job_projectId(ctx, field, obj) + case "Project": + out.Values[i] = ec._Job_Project(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "clusterId": - out.Values[i] = ec._Job_clusterId(ctx, field, obj) + case "Cluster": + out.Values[i] = ec._Job_Cluster(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "startTime": - out.Values[i] = ec._Job_startTime(ctx, field, obj) + case "StartTime": + out.Values[i] = ec._Job_StartTime(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "duration": - out.Values[i] = ec._Job_duration(ctx, field, obj) + case "Duration": + out.Values[i] = ec._Job_Duration(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "numNodes": - out.Values[i] = ec._Job_numNodes(ctx, field, obj) + case "NumNodes": + out.Values[i] = ec._Job_NumNodes(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "nodes": - out.Values[i] = ec._Job_nodes(ctx, field, obj) + case "NumHWThreads": + out.Values[i] = ec._Job_NumHWThreads(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "hasProfile": - out.Values[i] = ec._Job_hasProfile(ctx, field, obj) + case "NumAcc": + out.Values[i] = ec._Job_NumAcc(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "state": - out.Values[i] = ec._Job_state(ctx, field, obj) + case "SMT": + out.Values[i] = ec._Job_SMT(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "tags": + case "Exclusive": + out.Values[i] = ec._Job_Exclusive(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "Partition": + out.Values[i] = ec._Job_Partition(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "ArrayJobId": + out.Values[i] = ec._Job_ArrayJobId(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "MonitoringStatus": + out.Values[i] = ec._Job_MonitoringStatus(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "State": + out.Values[i] = ec._Job_State(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "Tags": field := field out.Concurrently(i, func() (res graphql.Marshaler) { defer func() { @@ -6530,24 +7228,29 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._Job_tags(ctx, field, obj) + res = ec._Job_Tags(ctx, field, obj) if res == graphql.Null { atomic.AddUint32(&invalids, 1) } return res }) - case "loadAvg": - out.Values[i] = ec._Job_loadAvg(ctx, field, obj) - case "memUsedMax": - out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) - case "flopsAnyAvg": - out.Values[i] = ec._Job_flopsAnyAvg(ctx, field, obj) - case "memBwAvg": - out.Values[i] = ec._Job_memBwAvg(ctx, field, obj) - case "netBwAvg": - out.Values[i] = ec._Job_netBwAvg(ctx, field, obj) - case "fileBwAvg": - out.Values[i] = ec._Job_fileBwAvg(ctx, field, obj) + case "Resources": + out.Values[i] = ec._Job_Resources(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + case "LoadAvg": + out.Values[i] = ec._Job_LoadAvg(ctx, field, obj) + case "MemUsedMax": + out.Values[i] = ec._Job_MemUsedMax(ctx, field, obj) + case "FlopsAnyAvg": + out.Values[i] = ec._Job_FlopsAnyAvg(ctx, field, obj) + case "MemBwAvg": + out.Values[i] = ec._Job_MemBwAvg(ctx, field, obj) + case "NetBwAvg": + out.Values[i] = ec._Job_NetBwAvg(ctx, field, obj) + case "FileBwAvg": + out.Values[i] = ec._Job_FileBwAvg(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -6570,23 +7273,23 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobMetric") - case "unit": - out.Values[i] = ec._JobMetric_unit(ctx, field, obj) + case "Unit": + out.Values[i] = ec._JobMetric_Unit(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "scope": - out.Values[i] = ec._JobMetric_scope(ctx, field, obj) + case "Scope": + out.Values[i] = ec._JobMetric_Scope(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "timestep": - out.Values[i] = ec._JobMetric_timestep(ctx, field, obj) + case "Timestep": + out.Values[i] = ec._JobMetric_Timestep(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "series": - out.Values[i] = ec._JobMetric_series(ctx, field, obj) + case "Series": + out.Values[i] = ec._JobMetric_Series(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -6612,15 +7315,17 @@ func (ec *executionContext) _JobMetricSeries(ctx context.Context, sel ast.Select switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobMetricSeries") - case "node_id": - out.Values[i] = ec._JobMetricSeries_node_id(ctx, field, obj) + case "Hostname": + out.Values[i] = ec._JobMetricSeries_Hostname(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "statistics": - out.Values[i] = ec._JobMetricSeries_statistics(ctx, field, obj) - case "data": - out.Values[i] = ec._JobMetricSeries_data(ctx, field, obj) + case "Id": + out.Values[i] = ec._JobMetricSeries_Id(ctx, field, obj) + case "Statistics": + out.Values[i] = ec._JobMetricSeries_Statistics(ctx, field, obj) + case "Data": + out.Values[i] = ec._JobMetricSeries_Data(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -6646,18 +7351,18 @@ func (ec *executionContext) _JobMetricStatistics(ctx context.Context, sel ast.Se switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobMetricStatistics") - case "avg": - out.Values[i] = ec._JobMetricStatistics_avg(ctx, field, obj) + case "Avg": + out.Values[i] = ec._JobMetricStatistics_Avg(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "min": - out.Values[i] = ec._JobMetricStatistics_min(ctx, field, obj) + case "Min": + out.Values[i] = ec._JobMetricStatistics_Min(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "max": - out.Values[i] = ec._JobMetricStatistics_max(ctx, field, obj) + case "Max": + out.Values[i] = ec._JobMetricStatistics_Max(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -6704,6 +7409,37 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele return out } +var jobResourceImplementors = []string{"JobResource"} + +func (ec *executionContext) _JobResource(ctx context.Context, sel ast.SelectionSet, obj *schema.JobResource) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, jobResourceImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("JobResource") + case "Hostname": + out.Values[i] = ec._JobResource_Hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "HWThreads": + out.Values[i] = ec._JobResource_HWThreads(ctx, field, obj) + case "Accelerators": + out.Values[i] = ec._JobResource_Accelerators(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var jobResultListImplementors = []string{"JobResultList"} func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.SelectionSet, obj *model.JobResultList) graphql.Marshaler { @@ -6748,18 +7484,18 @@ func (ec *executionContext) _JobTag(ctx context.Context, sel ast.SelectionSet, o switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobTag") - case "id": - out.Values[i] = ec._JobTag_id(ctx, field, obj) + case "Id": + out.Values[i] = ec._JobTag_Id(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "tagType": - out.Values[i] = ec._JobTag_tagType(ctx, field, obj) + case "TagType": + out.Values[i] = ec._JobTag_TagType(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "tagName": - out.Values[i] = ec._JobTag_tagName(ctx, field, obj) + case "TagName": + out.Values[i] = ec._JobTag_TagName(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -6842,38 +7578,43 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("MetricConfig") - case "name": - out.Values[i] = ec._MetricConfig_name(ctx, field, obj) + case "Name": + out.Values[i] = ec._MetricConfig_Name(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "unit": - out.Values[i] = ec._MetricConfig_unit(ctx, field, obj) + case "Unit": + out.Values[i] = ec._MetricConfig_Unit(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "sampletime": - out.Values[i] = ec._MetricConfig_sampletime(ctx, field, obj) + case "Timestep": + out.Values[i] = ec._MetricConfig_Timestep(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "peak": - out.Values[i] = ec._MetricConfig_peak(ctx, field, obj) + case "Peak": + out.Values[i] = ec._MetricConfig_Peak(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "normal": - out.Values[i] = ec._MetricConfig_normal(ctx, field, obj) + case "Normal": + out.Values[i] = ec._MetricConfig_Normal(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "caution": - out.Values[i] = ec._MetricConfig_caution(ctx, field, obj) + case "Caution": + out.Values[i] = ec._MetricConfig_Caution(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "alert": - out.Values[i] = ec._MetricConfig_alert(ctx, field, obj) + case "Alert": + out.Values[i] = ec._MetricConfig_Alert(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "Scope": + out.Values[i] = ec._MetricConfig_Scope(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -7462,6 +8203,10 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o // region ***************************** type.gotpl ***************************** +func (ec *executionContext) marshalNAccelerator2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v schema.Accelerator) graphql.Marshaler { + return ec._Accelerator(ctx, sel, &v) +} + func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) { res, err := graphql.UnmarshalBoolean(v) return res, graphql.ErrorOnPath(ctx, err) @@ -7913,6 +8658,53 @@ func (ec *executionContext) marshalNJobMetricWithName2ᚖgithubᚗcomᚋClusterC return ec._JobMetricWithName(ctx, sel, v) } +func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.JobResource) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResource(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResource(ctx context.Context, sel ast.SelectionSet, v *schema.JobResource) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._JobResource(ctx, sel, v) +} + func (ec *executionContext) marshalNJobResultList2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResultList(ctx context.Context, sel ast.SelectionSet, v model.JobResultList) graphql.Marshaler { return ec._JobResultList(ctx, sel, &v) } @@ -8562,6 +9354,46 @@ func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel a return res } +func (ec *executionContext) marshalOAccelerator2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Accelerator) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNAccelerator2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAccelerator(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + func (ec *executionContext) unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐAggregate(ctx context.Context, v interface{}) (*model.Aggregate, error) { if v == nil { return nil, nil @@ -8661,6 +9493,51 @@ func (ec *executionContext) marshalOID2ᚕstringᚄ(ctx context.Context, sel ast return ret } +func (ec *executionContext) unmarshalOInt2int(ctx context.Context, v interface{}) (int, error) { + res, err := graphql.UnmarshalInt(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + return graphql.MarshalInt(v) +} + +func (ec *executionContext) unmarshalOInt2ᚕintᚄ(ctx context.Context, v interface{}) ([]int, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]int, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNInt2int(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOInt2ᚕintᚄ(ctx context.Context, sel ast.SelectionSet, v []int) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNInt2int(ctx, sel, v[i]) + } + + return ret +} + func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) { if v == nil { return nil, nil @@ -8722,6 +9599,70 @@ func (ec *executionContext) marshalOJobMetricStatistics2ᚖgithubᚗcomᚋCluste return ec._JobMetricStatistics(ctx, sel, v) } +func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx context.Context, v interface{}) ([]model.JobState, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]model.JobState, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []model.JobState) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + func (ec *executionContext) marshalOMetricFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐMetricFootprints(ctx context.Context, sel ast.SelectionSet, v *model.MetricFootprints) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index 7a5042d..8cf6015 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -38,36 +38,42 @@ type IntRangeOutput struct { } type Job struct { - ID string `json:"id"` - JobID string `json:"jobId"` - UserID string `json:"userId"` - ProjectID string `json:"projectId"` - ClusterID string `json:"clusterId"` - StartTime time.Time `json:"startTime"` - Duration int `json:"duration"` - NumNodes int `json:"numNodes"` - Nodes []string `json:"nodes"` - HasProfile bool `json:"hasProfile"` - State JobState `json:"state"` - Tags []*JobTag `json:"tags"` - LoadAvg *float64 `json:"loadAvg"` - MemUsedMax *float64 `json:"memUsedMax"` - FlopsAnyAvg *float64 `json:"flopsAnyAvg"` - MemBwAvg *float64 `json:"memBwAvg"` - NetBwAvg *float64 `json:"netBwAvg"` - FileBwAvg *float64 `json:"fileBwAvg"` + ID string `json:"Id"` + JobID int `json:"JobId"` + User string `json:"User"` + Project string `json:"Project"` + Cluster string `json:"Cluster"` + StartTime time.Time `json:"StartTime"` + Duration int `json:"Duration"` + NumNodes int `json:"NumNodes"` + NumHWThreads int `json:"NumHWThreads"` + NumAcc int `json:"NumAcc"` + Smt int `json:"SMT"` + Exclusive int `json:"Exclusive"` + Partition string `json:"Partition"` + ArrayJobID int `json:"ArrayJobId"` + MonitoringStatus int `json:"MonitoringStatus"` + State JobState `json:"State"` + Tags []*JobTag `json:"Tags"` + Resources []*schema.JobResource `json:"Resources"` + LoadAvg *float64 `json:"LoadAvg"` + MemUsedMax *float64 `json:"MemUsedMax"` + FlopsAnyAvg *float64 `json:"FlopsAnyAvg"` + MemBwAvg *float64 `json:"MemBwAvg"` + NetBwAvg *float64 `json:"NetBwAvg"` + FileBwAvg *float64 `json:"FileBwAvg"` } type JobFilter struct { Tags []string `json:"tags"` JobID *StringInput `json:"jobId"` - UserID *StringInput `json:"userId"` - ProjectID *StringInput `json:"projectId"` - ClusterID *StringInput `json:"clusterId"` + User *StringInput `json:"user"` + Project *StringInput `json:"project"` + Cluster *StringInput `json:"cluster"` Duration *IntRange `json:"duration"` NumNodes *IntRange `json:"numNodes"` StartTime *TimeRange `json:"startTime"` - IsRunning *bool `json:"isRunning"` + JobState []JobState `json:"jobState"` FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"` MemBwAvg *FloatRange `json:"memBwAvg"` LoadAvg *FloatRange `json:"loadAvg"` @@ -97,13 +103,14 @@ type JobsStatistics struct { } type MetricConfig struct { - Name string `json:"name"` - Unit string `json:"unit"` - Sampletime int `json:"sampletime"` - Peak int `json:"peak"` - Normal int `json:"normal"` - Caution int `json:"caution"` - Alert int `json:"alert"` + Name string `json:"Name"` + Unit string `json:"Unit"` + Timestep int `json:"Timestep"` + Peak int `json:"Peak"` + Normal int `json:"Normal"` + Caution int `json:"Caution"` + Alert int `json:"Alert"` + Scope string `json:"Scope"` } type MetricFootprints struct { @@ -196,16 +203,24 @@ type JobState string const ( JobStateRunning JobState = "running" JobStateCompleted JobState = "completed" + JobStateFailed JobState = "failed" + JobStateCanceled JobState = "canceled" + JobStateStopped JobState = "stopped" + JobStateTimeout JobState = "timeout" ) var AllJobState = []JobState{ JobStateRunning, JobStateCompleted, + JobStateFailed, + JobStateCanceled, + JobStateStopped, + JobStateTimeout, } func (e JobState) IsValid() bool { switch e { - case JobStateRunning, JobStateCompleted: + case JobStateRunning, JobStateCompleted, JobStateFailed, JobStateCanceled, JobStateStopped, JobStateTimeout: return true } return false diff --git a/graph/resolver.go b/graph/resolver.go index badf516..8a1ba7b 100644 --- a/graph/resolver.go +++ b/graph/resolver.go @@ -2,6 +2,7 @@ package graph import ( "context" + "encoding/json" "errors" "fmt" "regexp" @@ -22,7 +23,12 @@ type Resolver struct { DB *sqlx.DB } -var JobTableCols []string = []string{"id", "job_id", "user_id", "project_id", "cluster_id", "start_time", "duration", "job_state", "num_nodes", "node_list", "flops_any_avg", "mem_bw_avg", "net_bw_avg", "file_bw_avg", "load_avg"} +var JobTableCols []string = []string{ + "id", "job_id", "cluster", "start_time", + "user", "project", "partition", "array_job_id", "duration", "job_state", "resources", + "num_nodes", "num_hwthreads", "num_acc", "smt", "exclusive", "monitoring_status", + "load_avg", "mem_used_max", "flops_any_avg", "mem_bw_avg", "net_bw_avg", "file_bw_avg", +} type Scannable interface { Scan(dest ...interface{}) error @@ -30,13 +36,18 @@ type Scannable interface { // Helper function for scanning jobs with the `jobTableCols` columns selected. func ScanJob(row Scannable) (*model.Job, error) { - job := &model.Job{HasProfile: true} + job := &model.Job{} - var nodeList string + var rawResources []byte if err := row.Scan( - &job.ID, &job.JobID, &job.UserID, &job.ProjectID, &job.ClusterID, - &job.StartTime, &job.Duration, &job.State, &job.NumNodes, &nodeList, - &job.FlopsAnyAvg, &job.MemBwAvg, &job.NetBwAvg, &job.FileBwAvg, &job.LoadAvg); err != nil { + &job.ID, &job.JobID, &job.Cluster, &job.StartTime, + &job.User, &job.Project, &job.Partition, &job.ArrayJobID, &job.Duration, &job.State, &rawResources, + &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Smt, &job.Exclusive, &job.MonitoringStatus, + &job.LoadAvg, &job.MemUsedMax, &job.FlopsAnyAvg, &job.MemBwAvg, &job.NetBwAvg, &job.FileBwAvg); err != nil { + return nil, err + } + + if err := json.Unmarshal(rawResources, &job.Resources); err != nil { return nil, err } @@ -44,7 +55,6 @@ func ScanJob(row Scannable) (*model.Job, error) { job.Duration = int(time.Since(job.StartTime).Seconds()) } - job.Nodes = strings.Split(nodeList, ",") return job, nil } @@ -130,14 +140,14 @@ func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select if filter.JobID != nil { query = buildStringCondition("job.job_id", filter.JobID, query) } - if filter.UserID != nil { - query = buildStringCondition("job.user_id", filter.UserID, query) + if filter.User != nil { + query = buildStringCondition("job.user", filter.User, query) } - if filter.ProjectID != nil { - query = buildStringCondition("job.project_id", filter.ProjectID, query) + if filter.Project != nil { + query = buildStringCondition("job.project", filter.Project, query) } - if filter.ClusterID != nil { - query = buildStringCondition("job.cluster_id", filter.ClusterID, query) + if filter.Cluster != nil { + query = buildStringCondition("job.cluster", filter.Cluster, query) } if filter.StartTime != nil { query = buildTimeCondition("job.start_time", filter.StartTime, query) @@ -145,12 +155,8 @@ func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select if filter.Duration != nil { query = buildIntCondition("job.duration", filter.Duration, query) } - if filter.IsRunning != nil { - if *filter.IsRunning { - query = query.Where("job.job_state = 'running'") - } else { - query = query.Where("job.job_state = 'completed'") - } + if filter.JobState != nil { + query = query.Where("job.job_state IN ?", filter.JobState) } if filter.NumNodes != nil { query = buildIntCondition("job.num_nodes", filter.NumNodes, query) diff --git a/graph/schema.graphqls b/graph/schema.graphqls index 4c9b3b7..cec1b2d 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -1,78 +1,102 @@ type Job { - id: ID! # Database ID, unique - jobId: String! # ID given to the job by the cluster scheduler - userId: String! # Username - projectId: String! # Project - clusterId: String! # Name of the cluster this job was running on - startTime: Time! # RFC3339 formated string - duration: Int! # For running jobs, the time it has already run - numNodes: Int! # Number of nodes this job was running on - nodes: [String!]! # List of hostnames - hasProfile: Boolean! # TODO: Could be removed? - state: JobState! # State of the job - tags: [JobTag!]! # List of tags this job has + Id: ID! # Database ID, unique + JobId: Int! # ID given to the job by the cluster scheduler + User: String! # Username + Project: String! # Project + Cluster: String! # Name of the cluster this job was running on + StartTime: Time! # RFC3339 formated string + Duration: Int! # For running jobs, the time it has already run + NumNodes: Int! # Number of nodes this job was running on + NumHWThreads: Int! + NumAcc: Int! + SMT: Int! + Exclusive: Int! + Partition: String! + ArrayJobId: Int! + MonitoringStatus: Int! + State: JobState! # State of the job + Tags: [JobTag!]! # List of tags this job has + Resources: [JobResource!]! # List of hosts/hwthreads/gpus/... # Will be null for running jobs. - loadAvg: Float - memUsedMax: Float - flopsAnyAvg: Float - memBwAvg: Float - netBwAvg: Float - fileBwAvg: Float + LoadAvg: Float + MemUsedMax: Float + FlopsAnyAvg: Float + MemBwAvg: Float + NetBwAvg: Float + FileBwAvg: Float +} + +type JobResource { + Hostname: String! + HWThreads: [Int!] + Accelerators: [Accelerator!] +} + +type Accelerator { + Id: String! + Type: String! + Model: String! } # TODO: Extend by more possible states? enum JobState { running completed + failed + canceled + stopped + timeout } type JobTag { - id: ID! # Database ID, unique - tagType: String! # Type - tagName: String! # Name + Id: ID! # Database ID, unique + TagType: String! # Type + TagName: String! # Name } type Cluster { - clusterID: String! - processorType: String! - socketsPerNode: Int! - coresPerSocket: Int! - threadsPerCore: Int! - flopRateScalar: Int! - flopRateSimd: Int! - memoryBandwidth: Int! - metricConfig: [MetricConfig!]! - filterRanges: FilterRanges! + ClusterID: String! + ProcessorType: String! + SocketsPerNode: Int! + CoresPerSocket: Int! + ThreadsPerCore: Int! + FlopRateScalar: Int! + FlopRateSimd: Int! + MemoryBandwidth: Int! + MetricConfig: [MetricConfig!]! + FilterRanges: FilterRanges! } type MetricConfig { - name: String! - unit: String! - sampletime: Int! - peak: Int! - normal: Int! - caution: Int! - alert: Int! + Name: String! + Unit: String! + Timestep: Int! + Peak: Int! + Normal: Int! + Caution: Int! + Alert: Int! + Scope: String! } type JobMetric { - unit: String! - scope: JobMetricScope! - timestep: Int! - series: [JobMetricSeries!]! + Unit: String! + Scope: JobMetricScope! + Timestep: Int! + Series: [JobMetricSeries!]! } type JobMetricSeries { - node_id: String! - statistics: JobMetricStatistics - data: [NullableFloat!]! + Hostname: String! + Id: Int + Statistics: JobMetricStatistics + Data: [NullableFloat!]! } type JobMetricStatistics { - avg: Float! - min: Float! - max: Float! + Avg: Float! + Min: Float! + Max: Float! } type JobMetricWithName { @@ -141,13 +165,13 @@ type FilterRanges { input JobFilter { tags: [ID!] jobId: StringInput - userId: StringInput - projectId: StringInput - clusterId: StringInput + user: StringInput + project: StringInput + cluster: StringInput duration: IntRange numNodes: IntRange startTime: TimeRange - isRunning: Boolean + jobState: [JobState!] flopsAnyAvg: FloatRange memBwAvg: FloatRange loadAvg: FloatRange diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index cb5c749..b16e296 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -15,9 +15,14 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/graph/generated" "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/metricdata" + "github.com/ClusterCockpit/cc-jobarchive/schema" sq "github.com/Masterminds/squirrel" ) +func (r *acceleratorResolver) ID(ctx context.Context, obj *schema.Accelerator) (string, error) { + panic(fmt.Errorf("not implemented")) +} + func (r *jobResolver) Tags(ctx context.Context, obj *model.Job) ([]*model.JobTag, error) { query := sq. Select("tag.id", "tag.tag_type", "tag.tag_name"). @@ -232,6 +237,9 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ return res, nil } +// Accelerator returns generated.AcceleratorResolver implementation. +func (r *Resolver) Accelerator() generated.AcceleratorResolver { return &acceleratorResolver{r} } + // Job returns generated.JobResolver implementation. func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} } @@ -241,6 +249,7 @@ func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResol // Query returns generated.QueryResolver implementation. func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } +type acceleratorResolver struct{ *Resolver } type jobResolver struct{ *Resolver } type mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver } diff --git a/init-db.go b/init-db.go index ef49145..496714a 100644 --- a/init-db.go +++ b/init-db.go @@ -8,13 +8,61 @@ import ( "log" "os" "path/filepath" - "strings" "time" "github.com/ClusterCockpit/cc-jobarchive/schema" "github.com/jmoiron/sqlx" ) +const JOBS_DB_SCHEMA string = ` + DROP TABLE IF EXISTS job; + DROP TABLE IF EXISTS tag; + DROP TABLE IF EXISTS jobtag; + + CREATE TABLE job ( + id INTEGER PRIMARY KEY AUTOINCREMENT, -- Not needed in sqlite + job_id BIGINT NOT NULL, + cluster VARCHAR(255) NOT NULL, + start_time BITINT NOT NULL, + + user VARCHAR(255) NOT NULL, + project VARCHAR(255) NOT NULL, + partition VARCHAR(255) NOT NULL, + array_job_id BIGINT NOT NULL, + duration INT, + job_state VARCHAR(255) CHECK(job_state IN ('running', 'completed', 'failed', 'canceled', 'stopped', 'timeout')) NOT NULL, + meta_data TEXT, -- json, but sqlite has no json type + resources TEXT NOT NULL, -- json, but sqlite has no json type + + num_nodes INT NOT NULL, + num_hwthreads INT NOT NULL, + num_acc INT NOT NULL, + smt TINYINT CHECK(smt IN (0, 1 )) NOT NULL DEFAULT 1, + exclusive TINYINT CHECK(exclusive IN (0, 1, 2)) NOT NULL DEFAULT 1, + monitoring_status TINYINT CHECK(monitoring_status IN (0, 1 )) NOT NULL DEFAULT 1, + + mem_used_max REAL NOT NULL DEFAULT 0.0, + flops_any_avg REAL NOT NULL DEFAULT 0.0, + mem_bw_avg REAL NOT NULL DEFAULT 0.0, + load_avg REAL NOT NULL DEFAULT 0.0, + net_bw_avg REAL NOT NULL DEFAULT 0.0, + net_data_vol_total REAL NOT NULL DEFAULT 0.0, + file_bw_avg REAL NOT NULL DEFAULT 0.0, + file_data_vol_total REAL NOT NULL DEFAULT 0.0); + + CREATE TABLE tag ( + id INTEGER PRIMARY KEY, + tag_type VARCHAR(255) NOT NULL, + tag_name VARCHAR(255) NOT NULL); + + CREATE TABLE jobtag ( + job_id INTEGER, + tag_id INTEGER, + PRIMARY KEY (job_id, tag_id), + FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE); +` + // Delete the tables "job", "tag" and "jobtag" from the database and // repopulate them using the jobs found in `archive`. func initDB(db *sqlx.DB, archive string) error { @@ -22,39 +70,7 @@ func initDB(db *sqlx.DB, archive string) error { fmt.Println("Building database...") // Basic database structure: - _, err := db.Exec(` - DROP TABLE IF EXISTS job; - DROP TABLE IF EXISTS tag; - DROP TABLE IF EXISTS jobtag; - - CREATE TABLE job ( - id INTEGER PRIMARY KEY, - job_id TEXT, - user_id TEXT, - project_id TEXT, - cluster_id TEXT, - start_time TIMESTAMP, - duration INTEGER, - job_state TEXT, - num_nodes INTEGER, - node_list TEXT, - metadata TEXT, - - flops_any_avg REAL, - mem_bw_avg REAL, - net_bw_avg REAL, - file_bw_avg REAL, - load_avg REAL); - CREATE TABLE tag ( - id INTEGER PRIMARY KEY, - tag_type TEXT, - tag_name TEXT); - CREATE TABLE jobtag ( - job_id INTEGER, - tag_id INTEGER, - PRIMARY KEY (job_id, tag_id), - FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE ON UPDATE NO ACTION, - FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE ON UPDATE NO ACTION);`) + _, err := db.Exec(JOBS_DB_SCHEMA) if err != nil { return err } @@ -64,9 +80,17 @@ func initDB(db *sqlx.DB, archive string) error { return err } - insertstmt, err := db.Prepare(`INSERT INTO job - (job_id, user_id, project_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata, flops_any_avg, mem_bw_avg, net_bw_avg, file_bw_avg, load_avg) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`) + insertstmt, err := db.Prepare(`INSERT INTO job ( + job_id, cluster, start_time, + user, project, partition, array_job_id, duration, job_state, meta_data, resources, + num_nodes, num_hwthreads, num_acc, smt, exclusive, monitoring_status, + flops_any_avg, mem_bw_avg + ) VALUES ( + ?, ?, ?, + ?, ?, ?, ?, ?, ?, ?, ?, + ?, ?, ?, ?, ?, ?, + ?, ? + );`) if err != nil { return err } @@ -149,7 +173,7 @@ func initDB(db *sqlx.DB, archive string) error { // Create indexes after inserts so that they do not // need to be continually updated. if _, err := db.Exec(` - CREATE INDEX job_by_user ON job (user_id); + CREATE INDEX job_by_user ON job (user); CREATE INDEX job_by_starttime ON job (start_time);`); err != nil { return err } @@ -167,19 +191,27 @@ func loadJob(tx *sql.Tx, stmt *sql.Stmt, tags map[string]int64, path string) err } defer f.Close() - var job schema.JobMeta + var job schema.JobMeta = schema.JobMeta{ + Exclusive: 1, + } if err := json.NewDecoder(bufio.NewReader(f)).Decode(&job); err != nil { return err } + // TODO: Other metrics... flopsAnyAvg := loadJobStat(&job, "flops_any") memBwAvg := loadJobStat(&job, "mem_bw") - netBwAvg := loadJobStat(&job, "net_bw") - fileBwAvg := loadJobStat(&job, "file_bw") - loadAvg := loadJobStat(&job, "load_one") - res, err := stmt.Exec(job.JobId, job.UserId, job.ProjectId, job.ClusterId, job.StartTime, job.Duration, job.JobState, - job.NumNodes, strings.Join(job.Nodes, ","), nil, flopsAnyAvg, memBwAvg, netBwAvg, fileBwAvg, loadAvg) + resources, err := json.Marshal(job.Resources) + if err != nil { + return err + } + + res, err := stmt.Exec( + job.JobId, job.Cluster, job.StartTime, + job.User, job.Project, job.Partition, job.ArrayJobId, job.Duration, job.JobState, job.MetaData, string(resources), + job.NumNodes, job.NumHWThreads, job.NumAcc, job.SMT, job.Exclusive, job.MonitoringStatus, + flopsAnyAvg, memBwAvg) if err != nil { return err } diff --git a/metricdata/archive.go b/metricdata/archive.go index eb51418..dca84fd 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -11,7 +11,6 @@ import ( "path" "path/filepath" "strconv" - "strings" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph/model" @@ -21,19 +20,14 @@ import ( // For a given job, return the path of the `data.json`/`meta.json` file. // TODO: Implement Issue ClusterCockpit/ClusterCockpit#97 func getPath(job *model.Job, file string, checkLegacy bool) (string, error) { - id, err := strconv.Atoi(strings.Split(job.JobID, ".")[0]) - if err != nil { - return "", err - } - - lvl1, lvl2 := fmt.Sprintf("%d", id/1000), fmt.Sprintf("%03d", id%1000) + lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000) if !checkLegacy { - return filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil + return filepath.Join(JobArchivePath, job.Cluster, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil } - legacyPath := filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, file) + legacyPath := filepath.Join(JobArchivePath, job.Cluster, lvl1, lvl2, file) if _, err := os.Stat(legacyPath); errors.Is(err, os.ErrNotExist) { - return filepath.Join(JobArchivePath, job.ClusterID, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil + return filepath.Join(JobArchivePath, job.Cluster, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil } return legacyPath, nil @@ -87,13 +81,13 @@ func UpdateTags(job *model.Job, tags []*model.JobTag) error { f.Close() metaFile.Tags = make([]struct { - Name string "json:\"name\"" - Type string "json:\"type\"" + Name string "json:\"Name\"" + Type string "json:\"Type\"" }, 0) for _, tag := range tags { metaFile.Tags = append(metaFile.Tags, struct { - Name string "json:\"name\"" - Type string "json:\"type\"" + Name string "json:\"Name\"" + Type string "json:\"Type\"" }{ Name: tag.TagName, Type: tag.TagType, @@ -143,7 +137,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { } allMetrics := make([]string, 0) - metricConfigs := config.GetClusterConfig(job.ClusterID).MetricConfig + metricConfigs := config.GetClusterConfig(job.Cluster).MetricConfig for _, mc := range metricConfigs { allMetrics = append(allMetrics, mc.Name) } @@ -153,13 +147,13 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { } tags := []struct { - Name string `json:"name"` - Type string `json:"type"` + Name string `json:"Name"` + Type string `json:"Type"` }{} for _, tag := range job.Tags { tags = append(tags, struct { - Name string `json:"name"` - Type string `json:"type"` + Name string `json:"Name"` + Type string `json:"Type"` }{ Name: tag.TagName, Type: tag.TagType, @@ -167,16 +161,25 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { } metaData := &schema.JobMeta{ - JobId: job.JobID, - UserId: job.UserID, - ClusterId: job.ClusterID, - NumNodes: job.NumNodes, - JobState: job.State.String(), - StartTime: job.StartTime.Unix(), - Duration: int64(job.Duration), - Nodes: job.Nodes, - Tags: tags, - Statistics: make(map[string]*schema.JobMetaStatistics), + JobId: int64(job.JobID), + User: job.User, + Project: job.Project, + Cluster: job.Cluster, + NumNodes: job.NumNodes, + NumHWThreads: job.NumHWThreads, + NumAcc: job.NumAcc, + Exclusive: int8(job.Exclusive), + MonitoringStatus: int8(job.MonitoringStatus), + SMT: int8(job.Smt), + Partition: job.Partition, + ArrayJobId: job.ArrayJobID, + JobState: string(job.State), + StartTime: job.StartTime.Unix(), + Duration: int64(job.Duration), + Resources: job.Resources, + MetaData: "", // TODO/FIXME: Handle `meta_data`! + Tags: tags, + Statistics: make(map[string]*schema.JobMetaStatistics), } for metric, data := range jobData { @@ -188,7 +191,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { } metaData.Statistics[metric] = &schema.JobMetaStatistics{ - Unit: config.GetMetricConfig(job.ClusterID, metric).Unit, + Unit: config.GetMetricConfig(job.Cluster, metric).Unit, Avg: avg / float64(job.NumNodes), Min: min, Max: max, diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 1d3c193..5bcd31a 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -61,8 +61,13 @@ func (ccms *CCMetricStore) doRequest(job *model.Job, suffix string, metrics []st from, to := job.StartTime.Unix(), job.StartTime.Add(time.Duration(job.Duration)*time.Second).Unix() reqBody := ApiRequestBody{} reqBody.Metrics = metrics - for _, node := range job.Nodes { - reqBody.Selectors = append(reqBody.Selectors, []string{job.ClusterID, node}) + for _, node := range job.Resources { + if node.Accelerators != nil || node.HWThreads != nil { + // TODO/FIXME: + return nil, errors.New("todo: cc-metric-store resources: Accelerator/HWThreads") + } + + reqBody.Selectors = append(reqBody.Selectors, []string{job.Cluster, node.Hostname}) } reqBodyBytes, err := json.Marshal(reqBody) @@ -86,33 +91,38 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex return nil, err } - resdata := make([]map[string]ApiMetricData, 0, len(job.Nodes)) + resdata := make([]map[string]ApiMetricData, 0, len(job.Resources)) if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { return nil, err } var jobData schema.JobData = make(schema.JobData) for _, metric := range metrics { - mc := config.GetMetricConfig(job.ClusterID, metric) + mc := config.GetMetricConfig(job.Cluster, metric) metricData := &schema.JobMetric{ Scope: "node", // TODO: FIXME: Whatever... Unit: mc.Unit, - Timestep: mc.Sampletime, - Series: make([]*schema.MetricSeries, 0, len(job.Nodes)), + Timestep: mc.Timestep, + Series: make([]*schema.MetricSeries, 0, len(job.Resources)), } - for i, node := range job.Nodes { + for i, node := range job.Resources { + if node.Accelerators != nil || node.HWThreads != nil { + // TODO/FIXME: + return nil, errors.New("todo: cc-metric-store resources: Accelerator/HWThreads") + } + data := resdata[i][metric] if data.Error != nil { return nil, errors.New(*data.Error) } if data.Avg == nil || data.Min == nil || data.Max == nil { - return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node, metric) + return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node.Hostname, metric) } metricData.Series = append(metricData.Series, &schema.MetricSeries{ - NodeID: node, - Data: data.Data, + Hostname: node.Hostname, + Data: data.Data, Statistics: &schema.MetricStatistics{ Avg: *data.Avg, Min: *data.Min, @@ -132,7 +142,7 @@ func (ccms *CCMetricStore) LoadStats(job *model.Job, metrics []string, ctx conte return nil, err } - resdata := make([]map[string]ApiStatsData, 0, len(job.Nodes)) + resdata := make([]map[string]ApiStatsData, 0, len(job.Resources)) if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { return nil, err } @@ -140,17 +150,22 @@ func (ccms *CCMetricStore) LoadStats(job *model.Job, metrics []string, ctx conte stats := map[string]map[string]schema.MetricStatistics{} for _, metric := range metrics { nodestats := map[string]schema.MetricStatistics{} - for i, node := range job.Nodes { + for i, node := range job.Resources { + if node.Accelerators != nil || node.HWThreads != nil { + // TODO/FIXME: + return nil, errors.New("todo: cc-metric-store resources: Accelerator/HWThreads") + } + data := resdata[i][metric] if data.Error != nil { return nil, errors.New(*data.Error) } if data.Samples == 0 { - return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node, metric) + return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node.Hostname, metric) } - nodestats[node] = schema.MetricStatistics{ + nodestats[node.Hostname] = schema.MetricStatistics{ Avg: float64(data.Avg), Min: float64(data.Min), Max: float64(data.Max), diff --git a/metricdata/influxdb-v2.go b/metricdata/influxdb-v2.go index 5c1ade0..184be79 100644 --- a/metricdata/influxdb-v2.go +++ b/metricdata/influxdb-v2.go @@ -2,6 +2,7 @@ package metricdata import ( "context" + "errors" "fmt" "log" "os" @@ -46,9 +47,14 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, } fieldsCond := strings.Join(fieldsConds, " or ") - hostsConds := make([]string, 0, len(job.Nodes)) - for _, h := range job.Nodes { - hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h)) + hostsConds := make([]string, 0, len(job.Resources)) + for _, h := range job.Resources { + if h.HWThreads != nil || h.Accelerators != nil { + // TODO/FIXME... + return nil, errors.New("the InfluxDB metric data repository does not support HWThreads or Accelerators") + } + + hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h.Hostname)) } hostsCond := strings.Join(hostsConds, " or ") @@ -72,18 +78,18 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, field, host := row.Field(), row.ValueByKey("host").(string) jobMetric, ok := jobData[field] if !ok { - mc := config.GetMetricConfig(job.ClusterID, field) + mc := config.GetMetricConfig(job.Cluster, field) jobMetric = &schema.JobMetric{ Scope: "node", // TODO: FIXME: Whatever... Unit: mc.Unit, - Timestep: mc.Sampletime, - Series: make([]*schema.MetricSeries, 0, len(job.Nodes)), + Timestep: mc.Timestep, + Series: make([]*schema.MetricSeries, 0, len(job.Resources)), } jobData[field] = jobMetric } currentSeries = &schema.MetricSeries{ - NodeID: host, + Hostname: host, Statistics: nil, Data: make([]schema.Float, 0), } @@ -102,7 +108,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, jobMetric := jobData[metric] for node, stats := range nodes { for _, series := range jobMetric.Series { - if series.NodeID == node { + if series.Hostname == node { series.Statistics = &stats } } @@ -115,9 +121,14 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *model.Job, metrics []string, func (idb *InfluxDBv2DataRepository) LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { stats := map[string]map[string]schema.MetricStatistics{} - hostsConds := make([]string, 0, len(job.Nodes)) - for _, h := range job.Nodes { - hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h)) + hostsConds := make([]string, 0, len(job.Resources)) + for _, h := range job.Resources { + if h.HWThreads != nil || h.Accelerators != nil { + // TODO/FIXME... + return nil, errors.New("the InfluxDB metric data repository does not support HWThreads or Accelerators") + } + + hostsConds = append(hostsConds, fmt.Sprintf(`r.host == "%s"`, h.Hostname)) } hostsCond := strings.Join(hostsConds, " or ") diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index 5f92a8a..b0c6e82 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -59,9 +59,9 @@ func Init(jobArchivePath string, disableArchive bool) error { // Fetches the metric data for a job. func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { if job.State == model.JobStateRunning || !useArchive { - repo, ok := metricDataRepos[job.ClusterID] + repo, ok := metricDataRepos[job.Cluster] if !ok { - return nil, fmt.Errorf("no metric data repository configured for '%s'", job.ClusterID) + return nil, fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) } return repo.LoadData(job, metrics, ctx) @@ -90,9 +90,9 @@ func LoadAverages(job *model.Job, metrics []string, data [][]schema.Float, ctx c return loadAveragesFromArchive(job, metrics, data) } - repo, ok := metricDataRepos[job.ClusterID] + repo, ok := metricDataRepos[job.Cluster] if !ok { - return fmt.Errorf("no metric data repository configured for '%s'", job.ClusterID) + return fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) } stats, err := repo.LoadStats(job, metrics, ctx) diff --git a/schema/metrics.go b/schema/metrics.go index 7939596..181083e 100644 --- a/schema/metrics.go +++ b/schema/metrics.go @@ -9,10 +9,10 @@ import ( type JobData map[string]*JobMetric type JobMetric struct { - Unit string `json:"unit"` - Scope MetricScope `json:"scope"` - Timestep int `json:"timestep"` - Series []*MetricSeries `json:"series"` + Unit string `json:"Unit"` + Scope MetricScope `json:"Scope"` + Timestep int `json:"Timestep"` + Series []*MetricSeries `json:"Series"` } type MetricScope string @@ -41,38 +41,59 @@ func (e MetricScope) MarshalGQL(w io.Writer) { } type MetricStatistics struct { - Avg float64 `json:"avg"` - Min float64 `json:"min"` - Max float64 `json:"max"` + Avg float64 `json:"Avg"` + Min float64 `json:"Min"` + Max float64 `json:"Max"` } type MetricSeries struct { - NodeID string `json:"node_id"` - Statistics *MetricStatistics `json:"statistics"` - Data []Float `json:"data"` + Hostname string `json:"Hostname"` + Id int `json:"Id"` + Statistics *MetricStatistics `json:"Statistics"` + Data []Float `json:"Data"` } type JobMetaStatistics struct { - Unit string `json:"unit"` - Avg float64 `json:"avg"` - Min float64 `json:"min"` - Max float64 `json:"max"` + Unit string `json:"Unit"` + Avg float64 `json:"Avg"` + Min float64 `json:"Min"` + Max float64 `json:"Max"` +} + +type Accelerator struct { + ID int `json:"Id"` + Type string `json:"Type"` + Model string `json:"Model"` +} + +type JobResource struct { + Hostname string `json:"Hostname"` + HWThreads []int `json:"HWThreads,omitempty"` + Accelerators []Accelerator `json:"Accelerators,omitempty"` } // Format of `meta.json` files. type JobMeta struct { - JobId string `json:"job_id"` - UserId string `json:"user_id"` - ProjectId string `json:"project_id"` - ClusterId string `json:"cluster_id"` - NumNodes int `json:"num_nodes"` - JobState string `json:"job_state"` - StartTime int64 `json:"start_time"` - Duration int64 `json:"duration"` - Nodes []string `json:"nodes"` - Tags []struct { - Name string `json:"name"` - Type string `json:"type"` - } `json:"tags"` - Statistics map[string]*JobMetaStatistics `json:"statistics"` + JobId int64 `json:"JobId"` + User string `json:"User"` + Project string `json:"Project"` + Cluster string `json:"Cluster"` + NumNodes int `json:"NumNodes"` + NumHWThreads int `json:"NumHWThreads"` + NumAcc int `json:"NumAcc"` + Exclusive int8 `json:"Exclusive"` + MonitoringStatus int8 `json:"MonitoringStatus"` + SMT int8 `json:"SMT"` + Partition string `json:"Partition"` + ArrayJobId int `json:"ArrayJobId"` + JobState string `json:"JobState"` + StartTime int64 `json:"StartTime"` + Duration int64 `json:"Duration"` + Resources []*JobResource `json:"Resources"` + MetaData string `json:"MetaData"` + Tags []struct { + Name string `json:"Name"` + Type string `json:"Type"` + } `json:"Tags"` + Statistics map[string]*JobMetaStatistics `json:"Statistics"` } diff --git a/server.go b/server.go index 8a1775a..991436a 100644 --- a/server.go +++ b/server.go @@ -308,12 +308,12 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { } templates.Render(rw, r, "monitoring/job/", &templates.Page{ - Title: fmt.Sprintf("Job %s - ClusterCockpit", job.JobID), + Title: fmt.Sprintf("Job %d - ClusterCockpit", job.JobID), Config: conf, Infos: map[string]interface{}{ "id": id, "jobId": job.JobID, - "clusterId": job.ClusterID, + "clusterId": job.Cluster, }, }) }) From 5403177edc902f3fac689eff2f313e6ecf759995 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Fri, 17 Dec 2021 15:49:22 +0100 Subject: [PATCH 16/25] all schemas new --- README.md | 7 +- api/rest.go | 147 +- config/config.go | 8 +- gqlgen.yml | 28 +- graph/generated/generated.go | 4207 ++++++++++++++++++++------------- graph/model/models.go | 35 +- graph/model/models_gen.go | 168 +- graph/resolver.go | 69 +- graph/schema.graphqls | 235 +- graph/schema.resolvers.go | 86 +- graph/stats.go | 108 +- init-db.go | 64 +- metricdata/archive.go | 93 +- metricdata/cc-metric-store.go | 14 +- metricdata/influxdb-v2.go | 2 + metricdata/metricdata.go | 29 +- schema/job.go | 153 ++ schema/metrics.go | 90 +- server.go | 5 +- templates/home.html | 10 +- 20 files changed, 3175 insertions(+), 2383 deletions(-) create mode 100644 schema/job.go diff --git a/README.md b/README.md index 189031a..8a836e5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # ClusterCockpit with a Golang backend +__*DOES NOT WORK WITH CURRENT FRONTEND*__ + [![Build](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml) ### Run server @@ -11,11 +13,6 @@ git clone --recursive git@github.com:ClusterCockpit/cc-jobarchive.git # Prepare frontend cd ./cc-jobarchive/frontend yarn install -export CCFRONTEND_ROLLUP_INTRO=' -const JOBVIEW_URL = job => `/monitoring/job/${job.id}`; -const USERVIEW_URL = userId => `/monitoring/user/${userId}`; -const TAG_URL = tag => `/monitoring/jobs/?tag=${tag.id}`; -' yarn build cd .. diff --git a/api/rest.go b/api/rest.go index 97f5a83..8a64b3a 100644 --- a/api/rest.go +++ b/api/rest.go @@ -2,17 +2,15 @@ package api import ( "context" - "database/sql" "encoding/json" "fmt" "log" "net/http" - "strings" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/metricdata" + "github.com/ClusterCockpit/cc-jobarchive/schema" sq "github.com/Masterminds/squirrel" "github.com/gorilla/mux" "github.com/jmoiron/sqlx" @@ -33,18 +31,6 @@ func (api *RestApi) MountRoutes(r *mux.Router) { r.HandleFunc("/api/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) } -// TODO/FIXME: UPDATE API! -type StartJobApiRequest struct { - JobId int64 `json:"jobId"` - UserId string `json:"userId"` - ClusterId string `json:"clusterId"` - StartTime int64 `json:"startTime"` - MetaData string `json:"metaData"` - ProjectId string `json:"projectId"` - Nodes []string `json:"nodes"` - NodeList string `json:"nodeList"` -} - type StartJobApiRespone struct { DBID int64 `json:"id"` } @@ -53,15 +39,12 @@ type StopJobApiRequest struct { // JobId, ClusterId and StartTime are optional. // They are only used if no database id was provided. JobId *string `json:"jobId"` - ClusterId *string `json:"clusterId"` + Cluster *string `json:"clusterId"` StartTime *int64 `json:"startTime"` // Payload - StopTime int64 `json:"stopTime"` -} - -type StopJobApiRespone struct { - DBID string `json:"id"` + StopTime int64 `json:"stopTime"` + State schema.JobState `json:"jobState"` } type TagJobApiRequest []*struct { @@ -110,7 +93,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { } for _, tag := range req { - var tagId string + var tagId int64 if err := sq.Select("id").From("tag"). Where("tag.tag_type = ?", tag.Type).Where("tag.tag_name = ?", tag.Name). RunWith(api.DB).QueryRow().Scan(&tagId); err != nil { @@ -123,10 +106,10 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { return } - job.Tags = append(job.Tags, &model.JobTag{ - ID: tagId, - TagType: tag.Type, - TagName: tag.Name, + job.Tags = append(job.Tags, &schema.Tag{ + ID: tagId, + Type: tag.Type, + Name: tag.Name, }) } @@ -136,31 +119,25 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { } func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { - req := StartJobApiRequest{} + req := schema.JobMeta{BaseJob: schema.JobDefaults} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(rw, err.Error(), http.StatusBadRequest) return } - if config.GetClusterConfig(req.ClusterId) == nil { - http.Error(rw, fmt.Sprintf("cluster '%s' does not exist", req.ClusterId), http.StatusBadRequest) + if config.GetClusterConfig(req.Cluster) == nil { + http.Error(rw, fmt.Sprintf("cluster '%s' does not exist", req.Cluster), http.StatusBadRequest) return } - if req.Nodes == nil { - req.Nodes = strings.Split(req.NodeList, "|") - if len(req.Nodes) == 1 { - req.Nodes = strings.Split(req.NodeList, ",") - } - } - if len(req.Nodes) == 0 || len(req.Nodes[0]) == 0 || len(req.UserId) == 0 { + if len(req.Resources) == 0 || len(req.User) == 0 || req.NumNodes == 0 { http.Error(rw, "required fields are missing", http.StatusBadRequest) return } // Check if combination of (job_id, cluster_id, start_time) already exists: - rows, err := api.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ? AND job.cluster_id = ? AND job.start_time = ?`, - req.JobId, req.ClusterId, req.StartTime) + rows, err := api.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ? AND job.cluster = ? AND job.start_time = ?`, + req.JobID, req.Cluster, req.StartTime) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -173,9 +150,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - res, err := api.DB.Exec( - `INSERT INTO job (job_id, user_id, project_id, cluster_id, start_time, duration, job_state, num_nodes, node_list, metadata) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`, - req.JobId, req.UserId, req.ProjectId, req.ClusterId, req.StartTime, 0, model.JobStateRunning, len(req.Nodes), strings.Join(req.Nodes, ","), req.MetaData) + req.RawResources, err = json.Marshal(req.Resources) + if err != nil { + log.Fatal(err) + } + + res, err := api.DB.NamedExec(schema.JobInsertStmt, req) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -187,7 +167,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - log.Printf("new job (id: %d): clusterId=%s, jobId=%d, userId=%s, startTime=%d, nodes=%v\n", id, req.ClusterId, req.JobId, req.UserId, req.StartTime, req.Nodes) + log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d\n", id, req.Cluster, req.JobID, req.User, req.StartTime) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusCreated) json.NewEncoder(rw).Encode(StartJobApiRespone{ @@ -203,66 +183,89 @@ func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) { } var err error - var job *model.Job + var sql string + var args []interface{} id, ok := mux.Vars(r)["id"] if ok { - job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job").Where("job.id = ?", id).RunWith(api.DB).QueryRow()) + sql, args, err = sq.Select(schema.JobColumns...).From("job").Where("job.id = ?", id).ToSql() } else { - job, err = graph.ScanJob(sq.Select(graph.JobTableCols...).From("job"). + sql, args, err = sq.Select(schema.JobColumns...).From("job"). Where("job.job_id = ?", req.JobId). - Where("job.cluster_id = ?", req.ClusterId). - Where("job.start_time = ?", req.StartTime). - RunWith(api.DB).QueryRow()) + Where("job.cluster = ?", req.Cluster). + Where("job.start_time = ?", req.StartTime).ToSql() } if err != nil { - http.Error(rw, err.Error(), http.StatusNotFound) + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + job, err := schema.ScanJob(api.DB.QueryRowx(sql, args...)) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) return } - if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != model.JobStateRunning { + if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { http.Error(rw, "stop_time must be larger than start_time and only running jobs can be stopped", http.StatusBadRequest) return } - doArchiving := func(job *model.Job, ctx context.Context) error { - job.Duration = int(req.StopTime - job.StartTime.Unix()) + if req.State != "" && !req.State.Valid() { + http.Error(rw, fmt.Sprintf("invalid job state: '%s'", req.State), http.StatusBadRequest) + return + } else { + req.State = schema.JobStateCompleted + } + + doArchiving := func(job *schema.Job, ctx context.Context) error { + job.Duration = int32(req.StopTime - job.StartTime.Unix()) jobMeta, err := metricdata.ArchiveJob(job, ctx) if err != nil { - log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) + log.Printf("archiving job (dbid: %d) failed: %s\n", job.ID, err.Error()) return err } - getAvg := func(metric string) sql.NullFloat64 { - stats, ok := jobMeta.Statistics[metric] - if !ok { - return sql.NullFloat64{Valid: false} + stmt := sq.Update("job"). + Set("job_state", req.State). + Set("duration", job.Duration). + Where("job.id = ?", job.ID) + + for metric, stats := range jobMeta.Statistics { + switch metric { + case "flops_any": + stmt = stmt.Set("flops_any_avg", stats.Avg) + case "mem_used": + stmt = stmt.Set("mem_used_max", stats.Max) + case "mem_bw": + stmt = stmt.Set("mem_bw_avg", stats.Avg) + case "load": + stmt = stmt.Set("load_avg", stats.Avg) + case "net_bw": + stmt = stmt.Set("net_bw_avg", stats.Avg) + case "file_bw": + stmt = stmt.Set("file_bw_avg", stats.Avg) } - return sql.NullFloat64{Valid: true, Float64: stats.Avg} } - if _, err := api.DB.Exec( - `UPDATE job SET - job_state = ?, duration = ?, - flops_any_avg = ?, mem_bw_avg = ?, net_bw_avg = ?, file_bw_avg = ?, load_avg = ? - WHERE job.id = ?`, - model.JobStateCompleted, job.Duration, - getAvg("flops_any"), getAvg("mem_bw"), getAvg("net_bw"), getAvg("file_bw"), getAvg("load"), - job.ID); err != nil { - log.Printf("archiving job (id: %s) failed: %s\n", job.ID, err.Error()) + sql, args, err := stmt.ToSql() + if err != nil { + log.Printf("archiving job (dbid: %d) failed: %s\n", job.ID, err.Error()) return err } - log.Printf("job stopped and archived (id: %s)\n", job.ID) + if _, err := api.DB.Exec(sql, args...); err != nil { + log.Printf("archiving job (dbid: %d) failed: %s\n", job.ID, err.Error()) + return err + } + + log.Printf("job stopped and archived (dbid: %d)\n", job.ID) return nil } - log.Printf("archiving job... (id: %s): clusterId=%s, jobId=%d, userId=%s, startTime=%s\n", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) + log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s\n", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) if api.AsyncArchiving { rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) - json.NewEncoder(rw).Encode(StopJobApiRespone{ - DBID: job.ID, - }) + json.NewEncoder(rw).Encode(job) go doArchiving(job, context.Background()) } else { err := doArchiving(job, r.Context()) diff --git a/config/config.go b/config/config.go index 502ac67..e4011ac 100644 --- a/config/config.go +++ b/config/config.go @@ -46,8 +46,8 @@ func Init(usersdb *sqlx.DB, authEnabled bool, uiConfig map[string]interface{}, j cluster.FilterRanges.StartTime.To = time.Unix(0, 0) } - if cluster.ClusterID != de.Name() { - return fmt.Errorf("the file '%s/cluster.json' contains the clusterId '%s'", de.Name(), cluster.ClusterID) + if cluster.Name != de.Name() { + return fmt.Errorf("the file '%s/cluster.json' contains the clusterId '%s'", de.Name(), cluster.Name) } Clusters = append(Clusters, &cluster) @@ -149,7 +149,7 @@ func ServeConfig(rw http.ResponseWriter, r *http.Request) { func GetClusterConfig(cluster string) *model.Cluster { for _, c := range Clusters { - if c.ClusterID == cluster { + if c.Name == cluster { return c } } @@ -158,7 +158,7 @@ func GetClusterConfig(cluster string) *model.Cluster { func GetMetricConfig(cluster, metric string) *model.MetricConfig { for _, c := range Clusters { - if c.ClusterID == cluster { + if c.Name == cluster { for _, m := range c.MetricConfig { if m.Name == metric { return m diff --git a/gqlgen.yml b/gqlgen.yml index ea78535..e9ae8e3 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -55,21 +55,19 @@ models: - github.com/99designs/gqlgen/graphql.Int64 - github.com/99designs/gqlgen/graphql.Int32 Job: + model: "github.com/ClusterCockpit/cc-jobarchive/schema.Job" fields: - Tags: + tags: resolver: true - JobMetric: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobMetric" - JobMetricSeries: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricSeries" - JobMetricStatistics: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricStatistics" - NullableFloat: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.Float" - JobMetricScope: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricScope" - JobResource: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobResource" - Accelerator: - model: "github.com/ClusterCockpit/cc-jobarchive/schema.Accelerator" + NullableFloat: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.Float" } + MetricScope: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricScope" } + JobStatistics: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobStatistics" } + Tag: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.Tag" } + Resource: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.Resource" } + JobState: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobState" } + JobMetric: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.JobMetric" } + Series: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.Series" } + MetricStatistics: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.MetricStatistics" } + StatsSeries: { model: "github.com/ClusterCockpit/cc-jobarchive/schema.StatsSeries" } + diff --git a/graph/generated/generated.go b/graph/generated/generated.go index 72619c9..3134b6d 100644 --- a/graph/generated/generated.go +++ b/graph/generated/generated.go @@ -37,8 +37,8 @@ type Config struct { } type ResolverRoot interface { - Accelerator() AcceleratorResolver Job() JobResolver + JobMetric() JobMetricResolver Mutation() MutationResolver Query() QueryResolver } @@ -54,16 +54,10 @@ type ComplexityRoot struct { } Cluster struct { - ClusterID func(childComplexity int) int - CoresPerSocket func(childComplexity int) int - FilterRanges func(childComplexity int) int - FlopRateScalar func(childComplexity int) int - FlopRateSimd func(childComplexity int) int - MemoryBandwidth func(childComplexity int) int - MetricConfig func(childComplexity int) int - ProcessorType func(childComplexity int) int - SocketsPerNode func(childComplexity int) int - ThreadsPerCore func(childComplexity int) int + FilterRanges func(childComplexity int) int + MetricConfig func(childComplexity int) int + Name func(childComplexity int) int + Partitions func(childComplexity int) int } FilterRanges struct { @@ -83,26 +77,20 @@ type ComplexityRoot struct { } Job struct { - ArrayJobID func(childComplexity int) int + ArrayJobId func(childComplexity int) int Cluster func(childComplexity int) int Duration func(childComplexity int) int Exclusive func(childComplexity int) int - FileBwAvg func(childComplexity int) int - FlopsAnyAvg func(childComplexity int) int ID func(childComplexity int) int JobID func(childComplexity int) int - LoadAvg func(childComplexity int) int - MemBwAvg func(childComplexity int) int - MemUsedMax func(childComplexity int) int MonitoringStatus func(childComplexity int) int - NetBwAvg func(childComplexity int) int NumAcc func(childComplexity int) int NumHWThreads func(childComplexity int) int NumNodes func(childComplexity int) int Partition func(childComplexity int) int Project func(childComplexity int) int Resources func(childComplexity int) int - Smt func(childComplexity int) int + SMT func(childComplexity int) int StartTime func(childComplexity int) int State func(childComplexity int) int Tags func(childComplexity int) int @@ -110,34 +98,27 @@ type ComplexityRoot struct { } JobMetric struct { - Scope func(childComplexity int) int - Series func(childComplexity int) int - Timestep func(childComplexity int) int - Unit func(childComplexity int) int - } - - JobMetricSeries struct { - Data func(childComplexity int) int - Hostname func(childComplexity int) int - Id func(childComplexity int) int - Statistics func(childComplexity int) int - } - - JobMetricStatistics struct { - Avg func(childComplexity int) int - Max func(childComplexity int) int - Min func(childComplexity int) int + Scope func(childComplexity int) int + Series func(childComplexity int) int + StatisticsSeries func(childComplexity int) int + Timestep func(childComplexity int) int + Unit func(childComplexity int) int } JobMetricWithName struct { - Metric func(childComplexity int) int - Name func(childComplexity int) int + Core func(childComplexity int) int + Hwthread func(childComplexity int) int + MemoryDomain func(childComplexity int) int + Name func(childComplexity int) int + Node func(childComplexity int) int + Socket func(childComplexity int) int } JobResource struct { - Accelerators func(childComplexity int) int - HWThreads func(childComplexity int) int - Hostname func(childComplexity int) int + Accelerators func(childComplexity int) int + Configuration func(childComplexity int) int + Hostname func(childComplexity int) int + Hwthreads func(childComplexity int) int } JobResultList struct { @@ -147,12 +128,6 @@ type ComplexityRoot struct { Offset func(childComplexity int) int } - JobTag struct { - ID func(childComplexity int) int - TagName func(childComplexity int) int - TagType func(childComplexity int) int - } - JobsStatistics struct { HistNumNodes func(childComplexity int) int HistWalltime func(childComplexity int) int @@ -179,6 +154,12 @@ type ComplexityRoot struct { Name func(childComplexity int) int } + MetricStatistics struct { + Avg func(childComplexity int) int + Max func(childComplexity int) int + Min func(childComplexity int) int + } + Mutation struct { AddTagsToJob func(childComplexity int, job string, tagIds []string) int CreateTag func(childComplexity int, typeArg string, name string) int @@ -197,6 +178,18 @@ type ComplexityRoot struct { Metrics func(childComplexity int) int } + Partition struct { + CoresPerSocket func(childComplexity int) int + FlopRateScalar func(childComplexity int) int + FlopRateSimd func(childComplexity int) int + MemoryBandwidth func(childComplexity int) int + Name func(childComplexity int) int + ProcessorType func(childComplexity int) int + SocketsPerNode func(childComplexity int) int + ThreadsPerCore func(childComplexity int) int + Topology func(childComplexity int) int + } + Query struct { Clusters func(childComplexity int) int Job func(childComplexity int, id string) int @@ -209,29 +202,58 @@ type ComplexityRoot struct { Tags func(childComplexity int) int } + Series struct { + Data func(childComplexity int) int + Hostname func(childComplexity int) int + Id func(childComplexity int) int + Statistics func(childComplexity int) int + } + + StatsSeries struct { + Max func(childComplexity int) int + Mean func(childComplexity int) int + Min func(childComplexity int) int + } + + Tag struct { + ID func(childComplexity int) int + Name func(childComplexity int) int + Type func(childComplexity int) int + } + TimeRangeOutput struct { From func(childComplexity int) int To func(childComplexity int) int } + + Topology struct { + Accelerators func(childComplexity int) int + Core func(childComplexity int) int + Die func(childComplexity int) int + MemoryDomain func(childComplexity int) int + Node func(childComplexity int) int + Socket func(childComplexity int) int + } } -type AcceleratorResolver interface { - ID(ctx context.Context, obj *schema.Accelerator) (string, error) -} type JobResolver interface { - Tags(ctx context.Context, obj *model.Job) ([]*model.JobTag, error) + Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) + Resources(ctx context.Context, obj *schema.Job) ([]*model.JobResource, error) +} +type JobMetricResolver interface { + StatisticsSeries(ctx context.Context, obj *schema.JobMetric) ([]*schema.StatsSeries, error) } type MutationResolver interface { - CreateTag(ctx context.Context, typeArg string, name string) (*model.JobTag, error) + CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) DeleteTag(ctx context.Context, id string) (string, error) - AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*model.JobTag, error) - RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*model.JobTag, error) + AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) + RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) } type QueryResolver interface { Clusters(ctx context.Context) ([]*model.Cluster, error) - Tags(ctx context.Context) ([]*model.JobTag, error) - Job(ctx context.Context, id string) (*model.Job, error) + Tags(ctx context.Context) ([]*schema.Tag, error) + Job(ctx context.Context, id string) (*schema.Job, error) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) @@ -255,96 +277,54 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in _ = ec switch typeName + "." + field { - case "Accelerator.Id": + case "Accelerator.id": if e.complexity.Accelerator.ID == nil { break } return e.complexity.Accelerator.ID(childComplexity), true - case "Accelerator.Model": + case "Accelerator.model": if e.complexity.Accelerator.Model == nil { break } return e.complexity.Accelerator.Model(childComplexity), true - case "Accelerator.Type": + case "Accelerator.type": if e.complexity.Accelerator.Type == nil { break } return e.complexity.Accelerator.Type(childComplexity), true - case "Cluster.ClusterID": - if e.complexity.Cluster.ClusterID == nil { - break - } - - return e.complexity.Cluster.ClusterID(childComplexity), true - - case "Cluster.CoresPerSocket": - if e.complexity.Cluster.CoresPerSocket == nil { - break - } - - return e.complexity.Cluster.CoresPerSocket(childComplexity), true - - case "Cluster.FilterRanges": + case "Cluster.filterRanges": if e.complexity.Cluster.FilterRanges == nil { break } return e.complexity.Cluster.FilterRanges(childComplexity), true - case "Cluster.FlopRateScalar": - if e.complexity.Cluster.FlopRateScalar == nil { - break - } - - return e.complexity.Cluster.FlopRateScalar(childComplexity), true - - case "Cluster.FlopRateSimd": - if e.complexity.Cluster.FlopRateSimd == nil { - break - } - - return e.complexity.Cluster.FlopRateSimd(childComplexity), true - - case "Cluster.MemoryBandwidth": - if e.complexity.Cluster.MemoryBandwidth == nil { - break - } - - return e.complexity.Cluster.MemoryBandwidth(childComplexity), true - - case "Cluster.MetricConfig": + case "Cluster.metricConfig": if e.complexity.Cluster.MetricConfig == nil { break } return e.complexity.Cluster.MetricConfig(childComplexity), true - case "Cluster.ProcessorType": - if e.complexity.Cluster.ProcessorType == nil { + case "Cluster.name": + if e.complexity.Cluster.Name == nil { break } - return e.complexity.Cluster.ProcessorType(childComplexity), true + return e.complexity.Cluster.Name(childComplexity), true - case "Cluster.SocketsPerNode": - if e.complexity.Cluster.SocketsPerNode == nil { + case "Cluster.partitions": + if e.complexity.Cluster.Partitions == nil { break } - return e.complexity.Cluster.SocketsPerNode(childComplexity), true - - case "Cluster.ThreadsPerCore": - if e.complexity.Cluster.ThreadsPerCore == nil { - break - } - - return e.complexity.Cluster.ThreadsPerCore(childComplexity), true + return e.complexity.Cluster.Partitions(childComplexity), true case "FilterRanges.duration": if e.complexity.FilterRanges.Duration == nil { @@ -395,133 +375,91 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.IntRangeOutput.To(childComplexity), true - case "Job.ArrayJobId": - if e.complexity.Job.ArrayJobID == nil { + case "Job.arrayJobId": + if e.complexity.Job.ArrayJobId == nil { break } - return e.complexity.Job.ArrayJobID(childComplexity), true + return e.complexity.Job.ArrayJobId(childComplexity), true - case "Job.Cluster": + case "Job.cluster": if e.complexity.Job.Cluster == nil { break } return e.complexity.Job.Cluster(childComplexity), true - case "Job.Duration": + case "Job.duration": if e.complexity.Job.Duration == nil { break } return e.complexity.Job.Duration(childComplexity), true - case "Job.Exclusive": + case "Job.exclusive": if e.complexity.Job.Exclusive == nil { break } return e.complexity.Job.Exclusive(childComplexity), true - case "Job.FileBwAvg": - if e.complexity.Job.FileBwAvg == nil { - break - } - - return e.complexity.Job.FileBwAvg(childComplexity), true - - case "Job.FlopsAnyAvg": - if e.complexity.Job.FlopsAnyAvg == nil { - break - } - - return e.complexity.Job.FlopsAnyAvg(childComplexity), true - - case "Job.Id": + case "Job.id": if e.complexity.Job.ID == nil { break } return e.complexity.Job.ID(childComplexity), true - case "Job.JobId": + case "Job.jobId": if e.complexity.Job.JobID == nil { break } return e.complexity.Job.JobID(childComplexity), true - case "Job.LoadAvg": - if e.complexity.Job.LoadAvg == nil { - break - } - - return e.complexity.Job.LoadAvg(childComplexity), true - - case "Job.MemBwAvg": - if e.complexity.Job.MemBwAvg == nil { - break - } - - return e.complexity.Job.MemBwAvg(childComplexity), true - - case "Job.MemUsedMax": - if e.complexity.Job.MemUsedMax == nil { - break - } - - return e.complexity.Job.MemUsedMax(childComplexity), true - - case "Job.MonitoringStatus": + case "Job.monitoringStatus": if e.complexity.Job.MonitoringStatus == nil { break } return e.complexity.Job.MonitoringStatus(childComplexity), true - case "Job.NetBwAvg": - if e.complexity.Job.NetBwAvg == nil { - break - } - - return e.complexity.Job.NetBwAvg(childComplexity), true - - case "Job.NumAcc": + case "Job.numAcc": if e.complexity.Job.NumAcc == nil { break } return e.complexity.Job.NumAcc(childComplexity), true - case "Job.NumHWThreads": + case "Job.numHWThreads": if e.complexity.Job.NumHWThreads == nil { break } return e.complexity.Job.NumHWThreads(childComplexity), true - case "Job.NumNodes": + case "Job.numNodes": if e.complexity.Job.NumNodes == nil { break } return e.complexity.Job.NumNodes(childComplexity), true - case "Job.Partition": + case "Job.partition": if e.complexity.Job.Partition == nil { break } return e.complexity.Job.Partition(childComplexity), true - case "Job.Project": + case "Job.project": if e.complexity.Job.Project == nil { break } return e.complexity.Job.Project(childComplexity), true - case "Job.Resources": + case "Job.resources": if e.complexity.Job.Resources == nil { break } @@ -529,123 +467,95 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.Resources(childComplexity), true case "Job.SMT": - if e.complexity.Job.Smt == nil { + if e.complexity.Job.SMT == nil { break } - return e.complexity.Job.Smt(childComplexity), true + return e.complexity.Job.SMT(childComplexity), true - case "Job.StartTime": + case "Job.startTime": if e.complexity.Job.StartTime == nil { break } return e.complexity.Job.StartTime(childComplexity), true - case "Job.State": + case "Job.state": if e.complexity.Job.State == nil { break } return e.complexity.Job.State(childComplexity), true - case "Job.Tags": + case "Job.tags": if e.complexity.Job.Tags == nil { break } return e.complexity.Job.Tags(childComplexity), true - case "Job.User": + case "Job.user": if e.complexity.Job.User == nil { break } return e.complexity.Job.User(childComplexity), true - case "JobMetric.Scope": + case "JobMetric.scope": if e.complexity.JobMetric.Scope == nil { break } return e.complexity.JobMetric.Scope(childComplexity), true - case "JobMetric.Series": + case "JobMetric.series": if e.complexity.JobMetric.Series == nil { break } return e.complexity.JobMetric.Series(childComplexity), true - case "JobMetric.Timestep": + case "JobMetric.statisticsSeries": + if e.complexity.JobMetric.StatisticsSeries == nil { + break + } + + return e.complexity.JobMetric.StatisticsSeries(childComplexity), true + + case "JobMetric.timestep": if e.complexity.JobMetric.Timestep == nil { break } return e.complexity.JobMetric.Timestep(childComplexity), true - case "JobMetric.Unit": + case "JobMetric.unit": if e.complexity.JobMetric.Unit == nil { break } return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricSeries.Data": - if e.complexity.JobMetricSeries.Data == nil { + case "JobMetricWithName.core": + if e.complexity.JobMetricWithName.Core == nil { break } - return e.complexity.JobMetricSeries.Data(childComplexity), true + return e.complexity.JobMetricWithName.Core(childComplexity), true - case "JobMetricSeries.Hostname": - if e.complexity.JobMetricSeries.Hostname == nil { + case "JobMetricWithName.hwthread": + if e.complexity.JobMetricWithName.Hwthread == nil { break } - return e.complexity.JobMetricSeries.Hostname(childComplexity), true + return e.complexity.JobMetricWithName.Hwthread(childComplexity), true - case "JobMetricSeries.Id": - if e.complexity.JobMetricSeries.Id == nil { + case "JobMetricWithName.memoryDomain": + if e.complexity.JobMetricWithName.MemoryDomain == nil { break } - return e.complexity.JobMetricSeries.Id(childComplexity), true - - case "JobMetricSeries.Statistics": - if e.complexity.JobMetricSeries.Statistics == nil { - break - } - - return e.complexity.JobMetricSeries.Statistics(childComplexity), true - - case "JobMetricStatistics.Avg": - if e.complexity.JobMetricStatistics.Avg == nil { - break - } - - return e.complexity.JobMetricStatistics.Avg(childComplexity), true - - case "JobMetricStatistics.Max": - if e.complexity.JobMetricStatistics.Max == nil { - break - } - - return e.complexity.JobMetricStatistics.Max(childComplexity), true - - case "JobMetricStatistics.Min": - if e.complexity.JobMetricStatistics.Min == nil { - break - } - - return e.complexity.JobMetricStatistics.Min(childComplexity), true - - case "JobMetricWithName.metric": - if e.complexity.JobMetricWithName.Metric == nil { - break - } - - return e.complexity.JobMetricWithName.Metric(childComplexity), true + return e.complexity.JobMetricWithName.MemoryDomain(childComplexity), true case "JobMetricWithName.name": if e.complexity.JobMetricWithName.Name == nil { @@ -654,27 +564,48 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetricWithName.Name(childComplexity), true - case "JobResource.Accelerators": + case "JobMetricWithName.node": + if e.complexity.JobMetricWithName.Node == nil { + break + } + + return e.complexity.JobMetricWithName.Node(childComplexity), true + + case "JobMetricWithName.socket": + if e.complexity.JobMetricWithName.Socket == nil { + break + } + + return e.complexity.JobMetricWithName.Socket(childComplexity), true + + case "JobResource.accelerators": if e.complexity.JobResource.Accelerators == nil { break } return e.complexity.JobResource.Accelerators(childComplexity), true - case "JobResource.HWThreads": - if e.complexity.JobResource.HWThreads == nil { + case "JobResource.configuration": + if e.complexity.JobResource.Configuration == nil { break } - return e.complexity.JobResource.HWThreads(childComplexity), true + return e.complexity.JobResource.Configuration(childComplexity), true - case "JobResource.Hostname": + case "JobResource.hostname": if e.complexity.JobResource.Hostname == nil { break } return e.complexity.JobResource.Hostname(childComplexity), true + case "JobResource.hwthreads": + if e.complexity.JobResource.Hwthreads == nil { + break + } + + return e.complexity.JobResource.Hwthreads(childComplexity), true + case "JobResultList.count": if e.complexity.JobResultList.Count == nil { break @@ -703,27 +634,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Offset(childComplexity), true - case "JobTag.Id": - if e.complexity.JobTag.ID == nil { - break - } - - return e.complexity.JobTag.ID(childComplexity), true - - case "JobTag.TagName": - if e.complexity.JobTag.TagName == nil { - break - } - - return e.complexity.JobTag.TagName(childComplexity), true - - case "JobTag.TagType": - if e.complexity.JobTag.TagType == nil { - break - } - - return e.complexity.JobTag.TagType(childComplexity), true - case "JobsStatistics.histNumNodes": if e.complexity.JobsStatistics.HistNumNodes == nil { break @@ -787,7 +697,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricConfig.Caution(childComplexity), true - case "MetricConfig.Name": + case "MetricConfig.name": if e.complexity.MetricConfig.Name == nil { break } @@ -808,21 +718,21 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricConfig.Peak(childComplexity), true - case "MetricConfig.Scope": + case "MetricConfig.scope": if e.complexity.MetricConfig.Scope == nil { break } return e.complexity.MetricConfig.Scope(childComplexity), true - case "MetricConfig.Timestep": + case "MetricConfig.timestep": if e.complexity.MetricConfig.Timestep == nil { break } return e.complexity.MetricConfig.Timestep(childComplexity), true - case "MetricConfig.Unit": + case "MetricConfig.unit": if e.complexity.MetricConfig.Unit == nil { break } @@ -843,6 +753,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricFootprints.Name(childComplexity), true + case "MetricStatistics.avg": + if e.complexity.MetricStatistics.Avg == nil { + break + } + + return e.complexity.MetricStatistics.Avg(childComplexity), true + + case "MetricStatistics.max": + if e.complexity.MetricStatistics.Max == nil { + break + } + + return e.complexity.MetricStatistics.Max(childComplexity), true + + case "MetricStatistics.min": + if e.complexity.MetricStatistics.Min == nil { + break + } + + return e.complexity.MetricStatistics.Min(childComplexity), true + case "Mutation.addTagsToJob": if e.complexity.Mutation.AddTagsToJob == nil { break @@ -931,6 +862,69 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.NodeMetrics.Metrics(childComplexity), true + case "Partition.coresPerSocket": + if e.complexity.Partition.CoresPerSocket == nil { + break + } + + return e.complexity.Partition.CoresPerSocket(childComplexity), true + + case "Partition.flopRateScalar": + if e.complexity.Partition.FlopRateScalar == nil { + break + } + + return e.complexity.Partition.FlopRateScalar(childComplexity), true + + case "Partition.flopRateSimd": + if e.complexity.Partition.FlopRateSimd == nil { + break + } + + return e.complexity.Partition.FlopRateSimd(childComplexity), true + + case "Partition.memoryBandwidth": + if e.complexity.Partition.MemoryBandwidth == nil { + break + } + + return e.complexity.Partition.MemoryBandwidth(childComplexity), true + + case "Partition.name": + if e.complexity.Partition.Name == nil { + break + } + + return e.complexity.Partition.Name(childComplexity), true + + case "Partition.processorType": + if e.complexity.Partition.ProcessorType == nil { + break + } + + return e.complexity.Partition.ProcessorType(childComplexity), true + + case "Partition.socketsPerNode": + if e.complexity.Partition.SocketsPerNode == nil { + break + } + + return e.complexity.Partition.SocketsPerNode(childComplexity), true + + case "Partition.threadsPerCore": + if e.complexity.Partition.ThreadsPerCore == nil { + break + } + + return e.complexity.Partition.ThreadsPerCore(childComplexity), true + + case "Partition.topology": + if e.complexity.Partition.Topology == nil { + break + } + + return e.complexity.Partition.Topology(childComplexity), true + case "Query.clusters": if e.complexity.Query.Clusters == nil { break @@ -1029,6 +1023,76 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Tags(childComplexity), true + case "Series.data": + if e.complexity.Series.Data == nil { + break + } + + return e.complexity.Series.Data(childComplexity), true + + case "Series.hostname": + if e.complexity.Series.Hostname == nil { + break + } + + return e.complexity.Series.Hostname(childComplexity), true + + case "Series.id": + if e.complexity.Series.Id == nil { + break + } + + return e.complexity.Series.Id(childComplexity), true + + case "Series.statistics": + if e.complexity.Series.Statistics == nil { + break + } + + return e.complexity.Series.Statistics(childComplexity), true + + case "StatsSeries.max": + if e.complexity.StatsSeries.Max == nil { + break + } + + return e.complexity.StatsSeries.Max(childComplexity), true + + case "StatsSeries.mean": + if e.complexity.StatsSeries.Mean == nil { + break + } + + return e.complexity.StatsSeries.Mean(childComplexity), true + + case "StatsSeries.min": + if e.complexity.StatsSeries.Min == nil { + break + } + + return e.complexity.StatsSeries.Min(childComplexity), true + + case "Tag.id": + if e.complexity.Tag.ID == nil { + break + } + + return e.complexity.Tag.ID(childComplexity), true + + case "Tag.name": + if e.complexity.Tag.Name == nil { + break + } + + return e.complexity.Tag.Name(childComplexity), true + + case "Tag.type": + if e.complexity.Tag.Type == nil { + break + } + + return e.complexity.Tag.Type(childComplexity), true + case "TimeRangeOutput.from": if e.complexity.TimeRangeOutput.From == nil { break @@ -1043,6 +1107,48 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.TimeRangeOutput.To(childComplexity), true + case "Topology.accelerators": + if e.complexity.Topology.Accelerators == nil { + break + } + + return e.complexity.Topology.Accelerators(childComplexity), true + + case "Topology.core": + if e.complexity.Topology.Core == nil { + break + } + + return e.complexity.Topology.Core(childComplexity), true + + case "Topology.die": + if e.complexity.Topology.Die == nil { + break + } + + return e.complexity.Topology.Die(childComplexity), true + + case "Topology.memoryDomain": + if e.complexity.Topology.MemoryDomain == nil { + break + } + + return e.complexity.Topology.MemoryDomain(childComplexity), true + + case "Topology.node": + if e.complexity.Topology.Node == nil { + break + } + + return e.complexity.Topology.Node(childComplexity), true + + case "Topology.socket": + if e.complexity.Topology.Socket == nil { + break + } + + return e.complexity.Topology.Socket(childComplexity), true + } return 0, false } @@ -1107,110 +1213,125 @@ func (ec *executionContext) introspectType(name string) (*introspection.Type, er } var sources = []*ast.Source{ - {Name: "graph/schema.graphqls", Input: `type Job { - Id: ID! # Database ID, unique - JobId: Int! # ID given to the job by the cluster scheduler - User: String! # Username - Project: String! # Project - Cluster: String! # Name of the cluster this job was running on - StartTime: Time! # RFC3339 formated string - Duration: Int! # For running jobs, the time it has already run - NumNodes: Int! # Number of nodes this job was running on - NumHWThreads: Int! - NumAcc: Int! + {Name: "graph/schema.graphqls", Input: `scalar Time +scalar NullableFloat +scalar MetricScope +scalar JobState + +type Job { + id: ID! + jobId: Int! + user: String! + project: String! + cluster: String! + startTime: Time! + duration: Int! + numNodes: Int! + numHWThreads: Int! + numAcc: Int! SMT: Int! - Exclusive: Int! - Partition: String! - ArrayJobId: Int! - MonitoringStatus: Int! - State: JobState! # State of the job - Tags: [JobTag!]! # List of tags this job has - Resources: [JobResource!]! # List of hosts/hwthreads/gpus/... - - # Will be null for running jobs. - LoadAvg: Float - MemUsedMax: Float - FlopsAnyAvg: Float - MemBwAvg: Float - NetBwAvg: Float - FileBwAvg: Float -} - -type JobResource { - Hostname: String! - HWThreads: [Int!] - Accelerators: [Accelerator!] -} - -type Accelerator { - Id: String! - Type: String! - Model: String! -} - -# TODO: Extend by more possible states? -enum JobState { - running - completed - failed - canceled - stopped - timeout -} - -type JobTag { - Id: ID! # Database ID, unique - TagType: String! # Type - TagName: String! # Name + exclusive: Int! + partition: String! + arrayJobId: Int! + monitoringStatus: Int! + state: JobState! + tags: [Tag!]! + resources: [JobResource!]! } type Cluster { - ClusterID: String! - ProcessorType: String! - SocketsPerNode: Int! - CoresPerSocket: Int! - ThreadsPerCore: Int! - FlopRateScalar: Int! - FlopRateSimd: Int! - MemoryBandwidth: Int! - MetricConfig: [MetricConfig!]! - FilterRanges: FilterRanges! + name: String! + metricConfig: [MetricConfig!]! + filterRanges: FilterRanges! + partitions: [Partition!]! +} + +type Partition { + name: String! + processorType: String! + socketsPerNode: Int! + coresPerSocket: Int! + threadsPerCore: Int! + flopRateScalar: Int! + flopRateSimd: Int! + memoryBandwidth: Int! + topology: Topology! +} + +type Topology { + node: [Int!] + socket: [[Int!]!] + memoryDomain: [[Int!]!] + die: [[Int!]!] + core: [[Int!]!] + accelerators: [Accelerator!] +} + +type Accelerator { + id: String! + type: String! + model: String! } type MetricConfig { - Name: String! - Unit: String! - Timestep: Int! - Peak: Int! - Normal: Int! - Caution: Int! - Alert: Int! - Scope: String! + name: String! + unit: String! + scope: String! + timestep: Int! + Peak: Float! + Normal: Float! + Caution: Float! + Alert: Float! } -type JobMetric { - Unit: String! - Scope: JobMetricScope! - Timestep: Int! - Series: [JobMetricSeries!]! +type Tag { + id: ID! + type: String! + name: String! } -type JobMetricSeries { - Hostname: String! - Id: Int - Statistics: JobMetricStatistics - Data: [NullableFloat!]! -} - -type JobMetricStatistics { - Avg: Float! - Min: Float! - Max: Float! +type JobResource { + hostname: String! + hwthreads: [Int!] + accelerators: [Int!] + configuration: String } type JobMetricWithName { - name: String! - metric: JobMetric! + name: String! + + node: JobMetric + socket: JobMetric + memoryDomain: JobMetric + core: JobMetric + hwthread: JobMetric +} + +type JobMetric { + unit: String! + scope: MetricScope! + timestep: Int! + series: [Series!]! + statisticsSeries: [StatsSeries!] +} + +type Series { + hostname: String! + id: Int + statistics: MetricStatistics + data: [NullableFloat!]! +} + +type MetricStatistics { + avg: Float! + min: Float! + max: Float! +} + +type StatsSeries { + mean: [NullableFloat!] + min: [NullableFloat!] + max: [NullableFloat!] } type MetricFootprints { @@ -1232,7 +1353,7 @@ type NodeMetrics { type Query { clusters: [Cluster!]! # List of all clusters - tags: [JobTag!]! # List of all tags + tags: [Tag!]! # List of all tags job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! @@ -1247,23 +1368,16 @@ type Query { } type Mutation { - createTag(type: String!, name: String!): JobTag! + createTag(type: String!, name: String!): Tag! deleteTag(id: ID!): ID! - addTagsToJob(job: ID!, tagIds: [ID!]!): [JobTag!]! - removeTagsFromJob(job: ID!, tagIds: [ID!]!): [JobTag!]! + addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]! + removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]! updateConfiguration(name: String!, value: String!): String } -type IntRangeOutput { - from: Int! - to: Int! -} - -type TimeRangeOutput { - from: Time! - to: Time! -} +type IntRangeOutput { from: Int!, to: Int! } +type TimeRangeOutput { from: Time!, to: Time! } type FilterRanges { duration: IntRangeOutput! @@ -1280,7 +1394,7 @@ input JobFilter { duration: IntRange numNodes: IntRange startTime: TimeRange - jobState: [JobState!] + state: [JobState!] flopsAnyAvg: FloatRange memBwAvg: FloatRange loadAvg: FloatRange @@ -1304,20 +1418,9 @@ input StringInput { endsWith: String } -input IntRange { - from: Int! - to: Int! -} - -input FloatRange { - from: Float! - to: Float! -} - -input TimeRange { - from: Time - to: Time -} +input IntRange { from: Int!, to: Int! } +input FloatRange { from: Float!, to: Float! } +input TimeRange { from: Time, to: Time } type JobResultList { items: [Job!]! @@ -1345,10 +1448,6 @@ input PageRequest { itemsPerPage: Int! page: Int! } - -scalar Time -scalar NullableFloat -scalar JobMetricScope `, BuiltIn: false}, } var parsedSchema = gqlparser.MustLoadSchema(sources...) @@ -1761,7 +1860,7 @@ func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArg // region **************************** field.gotpl ***************************** -func (ec *executionContext) _Accelerator_Id(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { +func (ec *executionContext) _Accelerator_id(ctx context.Context, field graphql.CollectedField, obj *model.Accelerator) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1772,14 +1871,14 @@ func (ec *executionContext) _Accelerator_Id(ctx context.Context, field graphql.C Object: "Accelerator", Field: field, Args: nil, - IsMethod: true, - IsResolver: true, + IsMethod: false, + IsResolver: false, } ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Accelerator().ID(rctx, obj) + return obj.ID, nil }) if err != nil { ec.Error(ctx, err) @@ -1796,7 +1895,7 @@ func (ec *executionContext) _Accelerator_Id(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Accelerator_Type(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { +func (ec *executionContext) _Accelerator_type(ctx context.Context, field graphql.CollectedField, obj *model.Accelerator) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1831,7 +1930,7 @@ func (ec *executionContext) _Accelerator_Type(ctx context.Context, field graphql return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Accelerator_Model(ctx context.Context, field graphql.CollectedField, obj *schema.Accelerator) (ret graphql.Marshaler) { +func (ec *executionContext) _Accelerator_model(ctx context.Context, field graphql.CollectedField, obj *model.Accelerator) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1866,7 +1965,7 @@ func (ec *executionContext) _Accelerator_Model(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_ClusterID(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_name(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -1884,7 +1983,7 @@ func (ec *executionContext) _Cluster_ClusterID(ctx context.Context, field graphq ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.ClusterID, nil + return obj.Name, nil }) if err != nil { ec.Error(ctx, err) @@ -1901,252 +2000,7 @@ func (ec *executionContext) _Cluster_ClusterID(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_ProcessorType(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.ProcessorType, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_SocketsPerNode(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.SocketsPerNode, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_CoresPerSocket(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.CoresPerSocket, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_ThreadsPerCore(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.ThreadsPerCore, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_FlopRateScalar(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.FlopRateScalar, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_FlopRateSimd(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.FlopRateSimd, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_MemoryBandwidth(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Cluster", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.MemoryBandwidth, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(int) - fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _Cluster_MetricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_metricConfig(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2181,7 +2035,7 @@ func (ec *executionContext) _Cluster_MetricConfig(ctx context.Context, field gra return ec.marshalNMetricConfig2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐMetricConfigᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _Cluster_FilterRanges(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { +func (ec *executionContext) _Cluster_filterRanges(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2216,6 +2070,41 @@ func (ec *executionContext) _Cluster_FilterRanges(ctx context.Context, field gra return ec.marshalNFilterRanges2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFilterRanges(ctx, field.Selections, res) } +func (ec *executionContext) _Cluster_partitions(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Cluster", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Partitions, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.Partition) + fc.Result = res + return ec.marshalNPartition2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPartitionᚄ(ctx, field.Selections, res) +} + func (ec *executionContext) _FilterRanges_duration(ctx context.Context, field graphql.CollectedField, obj *model.FilterRanges) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -2461,7 +2350,7 @@ func (ec *executionContext) _IntRangeOutput_to(ctx context.Context, field graphq return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Id(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_id(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2491,12 +2380,12 @@ func (ec *executionContext) _Job_Id(ctx context.Context, field graphql.Collected } return graphql.Null } - res := resTmp.(string) + res := resTmp.(int64) fc.Result = res - return ec.marshalNID2string(ctx, field.Selections, res) + return ec.marshalNID2int64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_JobId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_jobId(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2526,12 +2415,12 @@ func (ec *executionContext) _Job_JobId(ctx context.Context, field graphql.Collec } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int64) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int64(ctx, field.Selections, res) } -func (ec *executionContext) _Job_User(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_user(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2566,7 +2455,7 @@ func (ec *executionContext) _Job_User(ctx context.Context, field graphql.Collect return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Project(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_project(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2601,7 +2490,7 @@ func (ec *executionContext) _Job_Project(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Cluster(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2636,7 +2525,7 @@ func (ec *executionContext) _Job_Cluster(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_StartTime(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_startTime(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2671,7 +2560,7 @@ func (ec *executionContext) _Job_StartTime(ctx context.Context, field graphql.Co return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Duration(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_duration(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2701,12 +2590,12 @@ func (ec *executionContext) _Job_Duration(ctx context.Context, field graphql.Col } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_NumNodes(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_numNodes(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2736,12 +2625,12 @@ func (ec *executionContext) _Job_NumNodes(ctx context.Context, field graphql.Col } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_NumHWThreads(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_numHWThreads(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2771,12 +2660,12 @@ func (ec *executionContext) _Job_NumHWThreads(ctx context.Context, field graphql } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_NumAcc(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_numAcc(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2806,12 +2695,12 @@ func (ec *executionContext) _Job_NumAcc(ctx context.Context, field graphql.Colle } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2829,7 +2718,7 @@ func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.Collecte ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Smt, nil + return obj.SMT, nil }) if err != nil { ec.Error(ctx, err) @@ -2841,12 +2730,12 @@ func (ec *executionContext) _Job_SMT(ctx context.Context, field graphql.Collecte } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Exclusive(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_exclusive(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2876,12 +2765,12 @@ func (ec *executionContext) _Job_Exclusive(ctx context.Context, field graphql.Co } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Partition(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_partition(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2916,7 +2805,7 @@ func (ec *executionContext) _Job_Partition(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _Job_ArrayJobId(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_arrayJobId(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2934,7 +2823,7 @@ func (ec *executionContext) _Job_ArrayJobId(ctx context.Context, field graphql.C ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.ArrayJobID, nil + return obj.ArrayJobId, nil }) if err != nil { ec.Error(ctx, err) @@ -2946,12 +2835,12 @@ func (ec *executionContext) _Job_ArrayJobId(ctx context.Context, field graphql.C } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_MonitoringStatus(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_monitoringStatus(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -2981,12 +2870,12 @@ func (ec *executionContext) _Job_MonitoringStatus(ctx context.Context, field gra } return graphql.Null } - res := resTmp.(int) + res := resTmp.(int32) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNInt2int32(ctx, field.Selections, res) } -func (ec *executionContext) _Job_State(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_state(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3016,12 +2905,12 @@ func (ec *executionContext) _Job_State(ctx context.Context, field graphql.Collec } return graphql.Null } - res := resTmp.(model.JobState) + res := resTmp.(schema.JobState) fc.Result = res - return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, field.Selections, res) + return ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobState(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Tags(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_tags(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3051,12 +2940,12 @@ func (ec *executionContext) _Job_Tags(ctx context.Context, field graphql.Collect } return graphql.Null } - res := resTmp.([]*model.JobTag) + res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTagᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _Job_Resources(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { +func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3067,14 +2956,14 @@ func (ec *executionContext) _Job_Resources(ctx context.Context, field graphql.Co Object: "Job", Field: field, Args: nil, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, } ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Resources, nil + return ec.resolvers.Job().Resources(rctx, obj) }) if err != nil { ec.Error(ctx, err) @@ -3086,204 +2975,12 @@ func (ec *executionContext) _Job_Resources(ctx context.Context, field graphql.Co } return graphql.Null } - res := resTmp.([]*schema.JobResource) + res := resTmp.([]*model.JobResource) fc.Result = res - return ec.marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResourceᚄ(ctx, field.Selections, res) + return ec.marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResourceᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _Job_LoadAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.LoadAvg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _Job_MemUsedMax(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.MemUsedMax, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _Job_FlopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.FlopsAnyAvg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _Job_MemBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.MemBwAvg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _Job_NetBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.NetBwAvg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _Job_FileBwAvg(ctx context.Context, field graphql.CollectedField, obj *model.Job) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "Job", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.FileBwAvg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*float64) - fc.Result = res - return ec.marshalOFloat2ᚖfloat64(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetric_Unit(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3318,7 +3015,7 @@ func (ec *executionContext) _JobMetric_Unit(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_Scope(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_scope(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3350,10 +3047,10 @@ func (ec *executionContext) _JobMetric_Scope(ctx context.Context, field graphql. } res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNJobMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_Timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_timestep(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3388,7 +3085,7 @@ func (ec *executionContext) _JobMetric_Timestep(ctx context.Context, field graph return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetric_Series(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3418,12 +3115,12 @@ func (ec *executionContext) _JobMetric_Series(ctx context.Context, field graphql } return graphql.Null } - res := resTmp.([]*schema.MetricSeries) + res := resTmp.([]schema.Series) fc.Result = res - return ec.marshalNJobMetricSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricSeriesᚄ(ctx, field.Selections, res) + return ec.marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricSeries_Hostname(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3431,52 +3128,17 @@ func (ec *executionContext) _JobMetricSeries_Hostname(ctx context.Context, field } }() fc := &graphql.FieldContext{ - Object: "JobMetricSeries", + Object: "JobMetric", Field: field, Args: nil, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, } ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Hostname, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricSeries_Id(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricSeries", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Id, nil + return ec.resolvers.JobMetric().StatisticsSeries(rctx, obj) }) if err != nil { ec.Error(ctx, err) @@ -3485,181 +3147,9 @@ func (ec *executionContext) _JobMetricSeries_Id(ctx context.Context, field graph if resTmp == nil { return graphql.Null } - res := resTmp.(int) + res := resTmp.([]*schema.StatsSeries) fc.Result = res - return ec.marshalOInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricSeries_Statistics(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricSeries", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Statistics, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*schema.MetricStatistics) - fc.Result = res - return ec.marshalOJobMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricSeries_Data(ctx context.Context, field graphql.CollectedField, obj *schema.MetricSeries) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricSeries", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Data, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.([]schema.Float) - fc.Result = res - return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricStatistics_Avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricStatistics", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Avg, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(float64) - fc.Result = res - return ec.marshalNFloat2float64(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricStatistics_Min(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricStatistics", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Min, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(float64) - fc.Result = res - return ec.marshalNFloat2float64(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricStatistics_Max(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricStatistics", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Max, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(float64) - fc.Result = res - return ec.marshalNFloat2float64(ctx, field.Selections, res) + return ec.marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeriesᚄ(ctx, field.Selections, res) } func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { @@ -3697,7 +3187,7 @@ func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricWithName_node(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3715,24 +3205,149 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Metric, nil + return obj.Node, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } res := resTmp.(*schema.JobMetric) fc.Result = res - return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) + return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) } -func (ec *executionContext) _JobResource_Hostname(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricWithName_socket(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobMetricWithName", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Socket, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schema.JobMetric) + fc.Result = res + return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobMetricWithName_memoryDomain(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobMetricWithName", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemoryDomain, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schema.JobMetric) + fc.Result = res + return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobMetricWithName_core(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobMetricWithName", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Core, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schema.JobMetric) + fc.Result = res + return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobMetricWithName_hwthread(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobMetricWithName", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hwthread, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schema.JobMetric) + fc.Result = res + return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobResource_hostname(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3767,7 +3382,7 @@ func (ec *executionContext) _JobResource_Hostname(ctx context.Context, field gra return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobResource_HWThreads(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { +func (ec *executionContext) _JobResource_hwthreads(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3785,7 +3400,7 @@ func (ec *executionContext) _JobResource_HWThreads(ctx context.Context, field gr ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.HWThreads, nil + return obj.Hwthreads, nil }) if err != nil { ec.Error(ctx, err) @@ -3799,7 +3414,7 @@ func (ec *executionContext) _JobResource_HWThreads(ctx context.Context, field gr return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _JobResource_Accelerators(ctx context.Context, field graphql.CollectedField, obj *schema.JobResource) (ret graphql.Marshaler) { +func (ec *executionContext) _JobResource_accelerators(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3826,9 +3441,41 @@ func (ec *executionContext) _JobResource_Accelerators(ctx context.Context, field if resTmp == nil { return graphql.Null } - res := resTmp.([]schema.Accelerator) + res := resTmp.([]int) fc.Result = res - return ec.marshalOAccelerator2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAcceleratorᚄ(ctx, field.Selections, res) + return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _JobResource_configuration(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "JobResource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Configuration, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*string) + fc.Result = res + return ec.marshalOString2ᚖstring(ctx, field.Selections, res) } func (ec *executionContext) _JobResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { @@ -3861,9 +3508,9 @@ func (ec *executionContext) _JobResultList_items(ctx context.Context, field grap } return graphql.Null } - res := resTmp.([]*model.Job) + res := resTmp.([]*schema.Job) fc.Result = res - return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobᚄ(ctx, field.Selections, res) + return ec.marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobᚄ(ctx, field.Selections, res) } func (ec *executionContext) _JobResultList_offset(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { @@ -3962,111 +3609,6 @@ func (ec *executionContext) _JobResultList_count(ctx context.Context, field grap return ec.marshalOInt2ᚖint(ctx, field.Selections, res) } -func (ec *executionContext) _JobTag_Id(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobTag", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.ID, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNID2string(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobTag_TagType(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobTag", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.TagType, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobTag_TagName(ctx context.Context, field graphql.CollectedField, obj *model.JobTag) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobTag", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.TagName, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -4312,7 +3854,7 @@ func (ec *executionContext) _JobsStatistics_histNumNodes(ctx context.Context, fi return ec.marshalNHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐHistoPointᚄ(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Name(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4347,7 +3889,7 @@ func (ec *executionContext) _MetricConfig_Name(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4382,7 +3924,42 @@ func (ec *executionContext) _MetricConfig_Unit(ctx context.Context, field graphq return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Timestep(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_scope(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "MetricConfig", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Scope, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _MetricConfig_timestep(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4447,9 +4024,9 @@ func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphq } return graphql.Null } - res := resTmp.(int) + res := resTmp.(float64) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNFloat2float64(ctx, field.Selections, res) } func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { @@ -4482,9 +4059,9 @@ func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field grap } return graphql.Null } - res := resTmp.(int) + res := resTmp.(float64) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNFloat2float64(ctx, field.Selections, res) } func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { @@ -4517,9 +4094,9 @@ func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field gra } return graphql.Null } - res := resTmp.(int) + res := resTmp.(float64) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNFloat2float64(ctx, field.Selections, res) } func (ec *executionContext) _MetricConfig_Alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { @@ -4552,44 +4129,9 @@ func (ec *executionContext) _MetricConfig_Alert(ctx context.Context, field graph } return graphql.Null } - res := resTmp.(int) + res := resTmp.(float64) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) -} - -func (ec *executionContext) _MetricConfig_Scope(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "MetricConfig", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Scope, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNFloat2float64(ctx, field.Selections, res) } func (ec *executionContext) _MetricFootprints_name(ctx context.Context, field graphql.CollectedField, obj *model.MetricFootprints) (ret graphql.Marshaler) { @@ -4662,6 +4204,111 @@ func (ec *executionContext) _MetricFootprints_footprints(ctx context.Context, fi return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } +func (ec *executionContext) _MetricStatistics_avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "MetricStatistics", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Avg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalNFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) _MetricStatistics_min(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "MetricStatistics", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Min, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalNFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) _MetricStatistics_max(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "MetricStatistics", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalNFloat2float64(ctx, field.Selections, res) +} + func (ec *executionContext) _Mutation_createTag(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -4699,9 +4346,9 @@ func (ec *executionContext) _Mutation_createTag(ctx context.Context, field graph } return graphql.Null } - res := resTmp.(*model.JobTag) + res := resTmp.(*schema.Tag) fc.Result = res - return ec.marshalNJobTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTag(ctx, field.Selections, res) + return ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTag(ctx, field.Selections, res) } func (ec *executionContext) _Mutation_deleteTag(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { @@ -4783,9 +4430,9 @@ func (ec *executionContext) _Mutation_addTagsToJob(ctx context.Context, field gr } return graphql.Null } - res := resTmp.([]*model.JobTag) + res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) _Mutation_removeTagsFromJob(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { @@ -4825,9 +4472,9 @@ func (ec *executionContext) _Mutation_removeTagsFromJob(ctx context.Context, fie } return graphql.Null } - res := resTmp.([]*model.JobTag) + res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) _Mutation_updateConfiguration(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { @@ -5009,6 +4656,321 @@ func (ec *executionContext) _NodeMetrics_metrics(ctx context.Context, field grap return ec.marshalNNodeMetric2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricᚄ(ctx, field.Selections, res) } +func (ec *executionContext) _Partition_name(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_processorType(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ProcessorType, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_socketsPerNode(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.SocketsPerNode, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_coresPerSocket(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.CoresPerSocket, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_threadsPerCore(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ThreadsPerCore, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_flopRateScalar(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.FlopRateScalar, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_flopRateSimd(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.FlopRateSimd, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_memoryBandwidth(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemoryBandwidth, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) _Partition_topology(ctx context.Context, field graphql.CollectedField, obj *model.Partition) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Partition", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Topology, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.Topology) + fc.Result = res + return ec.marshalNTopology2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐTopology(ctx, field.Selections, res) +} + func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -5074,9 +5036,9 @@ func (ec *executionContext) _Query_tags(ctx context.Context, field graphql.Colle } return graphql.Null } - res := resTmp.([]*model.JobTag) + res := resTmp.([]*schema.Tag) fc.Result = res - return ec.marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx, field.Selections, res) + return ec.marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTagᚄ(ctx, field.Selections, res) } func (ec *executionContext) _Query_job(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { @@ -5113,9 +5075,9 @@ func (ec *executionContext) _Query_job(ctx context.Context, field graphql.Collec if resTmp == nil { return graphql.Null } - res := resTmp.(*model.Job) + res := resTmp.(*schema.Job) fc.Result = res - return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJob(ctx, field.Selections, res) + return ec.marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJob(ctx, field.Selections, res) } func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { @@ -5441,6 +5403,341 @@ func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.C return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) } +func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Series", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Series_id(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Series", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Id, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) _Series_statistics(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Series", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Statistics, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*schema.MetricStatistics) + fc.Result = res + return ec.marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx, field.Selections, res) +} + +func (ec *executionContext) _Series_data(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Series", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _StatsSeries_mean(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "StatsSeries", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Mean, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "StatsSeries", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Min, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "StatsSeries", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]schema.Float) + fc.Result = res + return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Tag_id(ctx context.Context, field graphql.CollectedField, obj *schema.Tag) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Tag", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int64) + fc.Result = res + return ec.marshalNID2int64(ctx, field.Selections, res) +} + +func (ec *executionContext) _Tag_type(ctx context.Context, field graphql.CollectedField, obj *schema.Tag) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Tag", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Type, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Tag_name(ctx context.Context, field graphql.CollectedField, obj *schema.Tag) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Tag", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + func (ec *executionContext) _TimeRangeOutput_from(ctx context.Context, field graphql.CollectedField, obj *model.TimeRangeOutput) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -5511,6 +5808,198 @@ func (ec *executionContext) _TimeRangeOutput_to(ctx context.Context, field graph return ec.marshalNTime2timeᚐTime(ctx, field.Selections, res) } +func (ec *executionContext) _Topology_node(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Node, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]int) + fc.Result = res + return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Topology_socket(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Socket, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([][]int) + fc.Result = res + return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Topology_memoryDomain(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemoryDomain, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([][]int) + fc.Result = res + return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Topology_die(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Die, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([][]int) + fc.Result = res + return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Topology_core(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Core, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([][]int) + fc.Result = res + return ec.marshalOInt2ᚕᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Topology_accelerators(ctx context.Context, field graphql.CollectedField, obj *model.Topology) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Topology", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Accelerators, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.Accelerator) + fc.Result = res + return ec.marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐAcceleratorᚄ(ctx, field.Selections, res) +} + func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -6724,11 +7213,11 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int if err != nil { return it, err } - case "jobState": + case "state": var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobState")) - it.JobState, err = ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx, v) + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state")) + it.State, err = ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx, v) if err != nil { return it, err } @@ -6912,7 +7401,7 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int var acceleratorImplementors = []string{"Accelerator"} -func (ec *executionContext) _Accelerator(ctx context.Context, sel ast.SelectionSet, obj *schema.Accelerator) graphql.Marshaler { +func (ec *executionContext) _Accelerator(ctx context.Context, sel ast.SelectionSet, obj *model.Accelerator) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, acceleratorImplementors) out := graphql.NewFieldSet(fields) @@ -6921,29 +7410,20 @@ func (ec *executionContext) _Accelerator(ctx context.Context, sel ast.SelectionS switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Accelerator") - case "Id": - field := field - out.Concurrently(i, func() (res graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - } - }() - res = ec._Accelerator_Id(ctx, field, obj) - if res == graphql.Null { - atomic.AddUint32(&invalids, 1) - } - return res - }) - case "Type": - out.Values[i] = ec._Accelerator_Type(ctx, field, obj) + case "id": + out.Values[i] = ec._Accelerator_id(ctx, field, obj) if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + invalids++ } - case "Model": - out.Values[i] = ec._Accelerator_Model(ctx, field, obj) + case "type": + out.Values[i] = ec._Accelerator_type(ctx, field, obj) if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + invalids++ + } + case "model": + out.Values[i] = ec._Accelerator_model(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ } default: panic("unknown field " + strconv.Quote(field.Name)) @@ -6967,53 +7447,23 @@ func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Cluster") - case "ClusterID": - out.Values[i] = ec._Cluster_ClusterID(ctx, field, obj) + case "name": + out.Values[i] = ec._Cluster_name(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "ProcessorType": - out.Values[i] = ec._Cluster_ProcessorType(ctx, field, obj) + case "metricConfig": + out.Values[i] = ec._Cluster_metricConfig(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "SocketsPerNode": - out.Values[i] = ec._Cluster_SocketsPerNode(ctx, field, obj) + case "filterRanges": + out.Values[i] = ec._Cluster_filterRanges(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "CoresPerSocket": - out.Values[i] = ec._Cluster_CoresPerSocket(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "ThreadsPerCore": - out.Values[i] = ec._Cluster_ThreadsPerCore(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "FlopRateScalar": - out.Values[i] = ec._Cluster_FlopRateScalar(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "FlopRateSimd": - out.Values[i] = ec._Cluster_FlopRateSimd(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "MemoryBandwidth": - out.Values[i] = ec._Cluster_MemoryBandwidth(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "MetricConfig": - out.Values[i] = ec._Cluster_MetricConfig(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "FilterRanges": - out.Values[i] = ec._Cluster_FilterRanges(ctx, field, obj) + case "partitions": + out.Values[i] = ec._Cluster_partitions(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -7131,7 +7581,7 @@ func (ec *executionContext) _IntRangeOutput(ctx context.Context, sel ast.Selecti var jobImplementors = []string{"Job"} -func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj *model.Job) graphql.Marshaler { +func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj *schema.Job) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, jobImplementors) out := graphql.NewFieldSet(fields) @@ -7140,53 +7590,53 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Job") - case "Id": - out.Values[i] = ec._Job_Id(ctx, field, obj) + case "id": + out.Values[i] = ec._Job_id(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "JobId": - out.Values[i] = ec._Job_JobId(ctx, field, obj) + case "jobId": + out.Values[i] = ec._Job_jobId(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "User": - out.Values[i] = ec._Job_User(ctx, field, obj) + case "user": + out.Values[i] = ec._Job_user(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Project": - out.Values[i] = ec._Job_Project(ctx, field, obj) + case "project": + out.Values[i] = ec._Job_project(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Cluster": - out.Values[i] = ec._Job_Cluster(ctx, field, obj) + case "cluster": + out.Values[i] = ec._Job_cluster(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "StartTime": - out.Values[i] = ec._Job_StartTime(ctx, field, obj) + case "startTime": + out.Values[i] = ec._Job_startTime(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Duration": - out.Values[i] = ec._Job_Duration(ctx, field, obj) + case "duration": + out.Values[i] = ec._Job_duration(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "NumNodes": - out.Values[i] = ec._Job_NumNodes(ctx, field, obj) + case "numNodes": + out.Values[i] = ec._Job_numNodes(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "NumHWThreads": - out.Values[i] = ec._Job_NumHWThreads(ctx, field, obj) + case "numHWThreads": + out.Values[i] = ec._Job_numHWThreads(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "NumAcc": - out.Values[i] = ec._Job_NumAcc(ctx, field, obj) + case "numAcc": + out.Values[i] = ec._Job_numAcc(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } @@ -7195,32 +7645,32 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Exclusive": - out.Values[i] = ec._Job_Exclusive(ctx, field, obj) + case "exclusive": + out.Values[i] = ec._Job_exclusive(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Partition": - out.Values[i] = ec._Job_Partition(ctx, field, obj) + case "partition": + out.Values[i] = ec._Job_partition(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "ArrayJobId": - out.Values[i] = ec._Job_ArrayJobId(ctx, field, obj) + case "arrayJobId": + out.Values[i] = ec._Job_arrayJobId(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "MonitoringStatus": - out.Values[i] = ec._Job_MonitoringStatus(ctx, field, obj) + case "monitoringStatus": + out.Values[i] = ec._Job_monitoringStatus(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "State": - out.Values[i] = ec._Job_State(ctx, field, obj) + case "state": + out.Values[i] = ec._Job_state(ctx, field, obj) if out.Values[i] == graphql.Null { atomic.AddUint32(&invalids, 1) } - case "Tags": + case "tags": field := field out.Concurrently(i, func() (res graphql.Marshaler) { defer func() { @@ -7228,29 +7678,26 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._Job_Tags(ctx, field, obj) + res = ec._Job_tags(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&invalids, 1) + } + return res + }) + case "resources": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Job_resources(ctx, field, obj) if res == graphql.Null { atomic.AddUint32(&invalids, 1) } return res }) - case "Resources": - out.Values[i] = ec._Job_Resources(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) - } - case "LoadAvg": - out.Values[i] = ec._Job_LoadAvg(ctx, field, obj) - case "MemUsedMax": - out.Values[i] = ec._Job_MemUsedMax(ctx, field, obj) - case "FlopsAnyAvg": - out.Values[i] = ec._Job_FlopsAnyAvg(ctx, field, obj) - case "MemBwAvg": - out.Values[i] = ec._Job_MemBwAvg(ctx, field, obj) - case "NetBwAvg": - out.Values[i] = ec._Job_NetBwAvg(ctx, field, obj) - case "FileBwAvg": - out.Values[i] = ec._Job_FileBwAvg(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7273,99 +7720,37 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobMetric") - case "Unit": - out.Values[i] = ec._JobMetric_Unit(ctx, field, obj) + case "unit": + out.Values[i] = ec._JobMetric_unit(ctx, field, obj) if out.Values[i] == graphql.Null { - invalids++ + atomic.AddUint32(&invalids, 1) } - case "Scope": - out.Values[i] = ec._JobMetric_Scope(ctx, field, obj) + case "scope": + out.Values[i] = ec._JobMetric_scope(ctx, field, obj) if out.Values[i] == graphql.Null { - invalids++ + atomic.AddUint32(&invalids, 1) } - case "Timestep": - out.Values[i] = ec._JobMetric_Timestep(ctx, field, obj) + case "timestep": + out.Values[i] = ec._JobMetric_timestep(ctx, field, obj) if out.Values[i] == graphql.Null { - invalids++ + atomic.AddUint32(&invalids, 1) } - case "Series": - out.Values[i] = ec._JobMetric_Series(ctx, field, obj) + case "series": + out.Values[i] = ec._JobMetric_series(ctx, field, obj) if out.Values[i] == graphql.Null { - invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch() - if invalids > 0 { - return graphql.Null - } - return out -} - -var jobMetricSeriesImplementors = []string{"JobMetricSeries"} - -func (ec *executionContext) _JobMetricSeries(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricSeries) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobMetricSeriesImplementors) - - out := graphql.NewFieldSet(fields) - var invalids uint32 - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobMetricSeries") - case "Hostname": - out.Values[i] = ec._JobMetricSeries_Hostname(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "Id": - out.Values[i] = ec._JobMetricSeries_Id(ctx, field, obj) - case "Statistics": - out.Values[i] = ec._JobMetricSeries_Statistics(ctx, field, obj) - case "Data": - out.Values[i] = ec._JobMetricSeries_Data(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch() - if invalids > 0 { - return graphql.Null - } - return out -} - -var jobMetricStatisticsImplementors = []string{"JobMetricStatistics"} - -func (ec *executionContext) _JobMetricStatistics(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricStatistics) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobMetricStatisticsImplementors) - - out := graphql.NewFieldSet(fields) - var invalids uint32 - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobMetricStatistics") - case "Avg": - out.Values[i] = ec._JobMetricStatistics_Avg(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "Min": - out.Values[i] = ec._JobMetricStatistics_Min(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "Max": - out.Values[i] = ec._JobMetricStatistics_Max(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ + atomic.AddUint32(&invalids, 1) } + case "statisticsSeries": + field := field + out.Concurrently(i, func() (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._JobMetric_statisticsSeries(ctx, field, obj) + return res + }) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7393,11 +7778,16 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele if out.Values[i] == graphql.Null { invalids++ } - case "metric": - out.Values[i] = ec._JobMetricWithName_metric(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } + case "node": + out.Values[i] = ec._JobMetricWithName_node(ctx, field, obj) + case "socket": + out.Values[i] = ec._JobMetricWithName_socket(ctx, field, obj) + case "memoryDomain": + out.Values[i] = ec._JobMetricWithName_memoryDomain(ctx, field, obj) + case "core": + out.Values[i] = ec._JobMetricWithName_core(ctx, field, obj) + case "hwthread": + out.Values[i] = ec._JobMetricWithName_hwthread(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7411,7 +7801,7 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele var jobResourceImplementors = []string{"JobResource"} -func (ec *executionContext) _JobResource(ctx context.Context, sel ast.SelectionSet, obj *schema.JobResource) graphql.Marshaler { +func (ec *executionContext) _JobResource(ctx context.Context, sel ast.SelectionSet, obj *model.JobResource) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, jobResourceImplementors) out := graphql.NewFieldSet(fields) @@ -7420,15 +7810,17 @@ func (ec *executionContext) _JobResource(ctx context.Context, sel ast.SelectionS switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("JobResource") - case "Hostname": - out.Values[i] = ec._JobResource_Hostname(ctx, field, obj) + case "hostname": + out.Values[i] = ec._JobResource_hostname(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "HWThreads": - out.Values[i] = ec._JobResource_HWThreads(ctx, field, obj) - case "Accelerators": - out.Values[i] = ec._JobResource_Accelerators(ctx, field, obj) + case "hwthreads": + out.Values[i] = ec._JobResource_hwthreads(ctx, field, obj) + case "accelerators": + out.Values[i] = ec._JobResource_accelerators(ctx, field, obj) + case "configuration": + out.Values[i] = ec._JobResource_configuration(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7473,43 +7865,6 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio return out } -var jobTagImplementors = []string{"JobTag"} - -func (ec *executionContext) _JobTag(ctx context.Context, sel ast.SelectionSet, obj *model.JobTag) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobTagImplementors) - - out := graphql.NewFieldSet(fields) - var invalids uint32 - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobTag") - case "Id": - out.Values[i] = ec._JobTag_Id(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "TagType": - out.Values[i] = ec._JobTag_TagType(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "TagName": - out.Values[i] = ec._JobTag_TagName(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch() - if invalids > 0 { - return graphql.Null - } - return out -} - var jobsStatisticsImplementors = []string{"JobsStatistics"} func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.SelectionSet, obj *model.JobsStatistics) graphql.Marshaler { @@ -7578,18 +7933,23 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("MetricConfig") - case "Name": - out.Values[i] = ec._MetricConfig_Name(ctx, field, obj) + case "name": + out.Values[i] = ec._MetricConfig_name(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "Unit": - out.Values[i] = ec._MetricConfig_Unit(ctx, field, obj) + case "unit": + out.Values[i] = ec._MetricConfig_unit(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "Timestep": - out.Values[i] = ec._MetricConfig_Timestep(ctx, field, obj) + case "scope": + out.Values[i] = ec._MetricConfig_scope(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "timestep": + out.Values[i] = ec._MetricConfig_timestep(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -7613,11 +7973,6 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection if out.Values[i] == graphql.Null { invalids++ } - case "Scope": - out.Values[i] = ec._MetricConfig_Scope(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7661,6 +8016,43 @@ func (ec *executionContext) _MetricFootprints(ctx context.Context, sel ast.Selec return out } +var metricStatisticsImplementors = []string{"MetricStatistics"} + +func (ec *executionContext) _MetricStatistics(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricStatistics) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricStatisticsImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricStatistics") + case "avg": + out.Values[i] = ec._MetricStatistics_avg(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "min": + out.Values[i] = ec._MetricStatistics_min(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "max": + out.Values[i] = ec._MetricStatistics_max(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var mutationImplementors = []string{"Mutation"} func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -7773,6 +8165,73 @@ func (ec *executionContext) _NodeMetrics(ctx context.Context, sel ast.SelectionS return out } +var partitionImplementors = []string{"Partition"} + +func (ec *executionContext) _Partition(ctx context.Context, sel ast.SelectionSet, obj *model.Partition) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, partitionImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Partition") + case "name": + out.Values[i] = ec._Partition_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "processorType": + out.Values[i] = ec._Partition_processorType(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "socketsPerNode": + out.Values[i] = ec._Partition_socketsPerNode(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "coresPerSocket": + out.Values[i] = ec._Partition_coresPerSocket(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "threadsPerCore": + out.Values[i] = ec._Partition_threadsPerCore(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "flopRateScalar": + out.Values[i] = ec._Partition_flopRateScalar(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "flopRateSimd": + out.Values[i] = ec._Partition_flopRateSimd(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "memoryBandwidth": + out.Values[i] = ec._Partition_memoryBandwidth(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "topology": + out.Values[i] = ec._Partition_topology(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var queryImplementors = []string{"Query"} func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -7926,6 +8385,107 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr return out } +var seriesImplementors = []string{"Series"} + +func (ec *executionContext) _Series(ctx context.Context, sel ast.SelectionSet, obj *schema.Series) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, seriesImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Series") + case "hostname": + out.Values[i] = ec._Series_hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "id": + out.Values[i] = ec._Series_id(ctx, field, obj) + case "statistics": + out.Values[i] = ec._Series_statistics(ctx, field, obj) + case "data": + out.Values[i] = ec._Series_data(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + +var statsSeriesImplementors = []string{"StatsSeries"} + +func (ec *executionContext) _StatsSeries(ctx context.Context, sel ast.SelectionSet, obj *schema.StatsSeries) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, statsSeriesImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("StatsSeries") + case "mean": + out.Values[i] = ec._StatsSeries_mean(ctx, field, obj) + case "min": + out.Values[i] = ec._StatsSeries_min(ctx, field, obj) + case "max": + out.Values[i] = ec._StatsSeries_max(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + +var tagImplementors = []string{"Tag"} + +func (ec *executionContext) _Tag(ctx context.Context, sel ast.SelectionSet, obj *schema.Tag) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, tagImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Tag") + case "id": + out.Values[i] = ec._Tag_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "type": + out.Values[i] = ec._Tag_type(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "name": + out.Values[i] = ec._Tag_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var timeRangeOutputImplementors = []string{"TimeRangeOutput"} func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.SelectionSet, obj *model.TimeRangeOutput) graphql.Marshaler { @@ -7958,6 +8518,40 @@ func (ec *executionContext) _TimeRangeOutput(ctx context.Context, sel ast.Select return out } +var topologyImplementors = []string{"Topology"} + +func (ec *executionContext) _Topology(ctx context.Context, sel ast.SelectionSet, obj *model.Topology) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, topologyImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Topology") + case "node": + out.Values[i] = ec._Topology_node(ctx, field, obj) + case "socket": + out.Values[i] = ec._Topology_socket(ctx, field, obj) + case "memoryDomain": + out.Values[i] = ec._Topology_memoryDomain(ctx, field, obj) + case "die": + out.Values[i] = ec._Topology_die(ctx, field, obj) + case "core": + out.Values[i] = ec._Topology_core(ctx, field, obj) + case "accelerators": + out.Values[i] = ec._Topology_accelerators(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var __DirectiveImplementors = []string{"__Directive"} func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler { @@ -8203,8 +8797,14 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o // region ***************************** type.gotpl ***************************** -func (ec *executionContext) marshalNAccelerator2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v schema.Accelerator) graphql.Marshaler { - return ec._Accelerator(ctx, sel, &v) +func (ec *executionContext) marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐAccelerator(ctx context.Context, sel ast.SelectionSet, v *model.Accelerator) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Accelerator(ctx, sel, v) } func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) { @@ -8401,6 +9001,21 @@ func (ec *executionContext) marshalNHistoPoint2ᚖgithubᚗcomᚋClusterCockpit return ec._HistoPoint(ctx, sel, v) } +func (ec *executionContext) unmarshalNID2int64(ctx context.Context, v interface{}) (int64, error) { + res, err := graphql.UnmarshalInt64(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNID2int64(ctx context.Context, sel ast.SelectionSet, v int64) graphql.Marshaler { + res := graphql.MarshalInt64(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + } + return res +} + func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) { res, err := graphql.UnmarshalID(v) return res, graphql.ErrorOnPath(ctx, err) @@ -8461,6 +9076,66 @@ func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.Selecti return res } +func (ec *executionContext) unmarshalNInt2int32(ctx context.Context, v interface{}) (int32, error) { + res, err := graphql.UnmarshalInt32(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNInt2int32(ctx context.Context, sel ast.SelectionSet, v int32) graphql.Marshaler { + res := graphql.MarshalInt32(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + } + return res +} + +func (ec *executionContext) unmarshalNInt2int64(ctx context.Context, v interface{}) (int64, error) { + res, err := graphql.UnmarshalInt64(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNInt2int64(ctx context.Context, sel ast.SelectionSet, v int64) graphql.Marshaler { + res := graphql.MarshalInt64(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + } + return res +} + +func (ec *executionContext) unmarshalNInt2ᚕintᚄ(ctx context.Context, v interface{}) ([]int, error) { + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]int, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNInt2int(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNInt2ᚕintᚄ(ctx context.Context, sel ast.SelectionSet, v []int) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNInt2int(ctx, sel, v[i]) + } + + return ret +} + func (ec *executionContext) marshalNIntRangeOutput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐIntRangeOutput(ctx context.Context, sel ast.SelectionSet, v *model.IntRangeOutput) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { @@ -8471,7 +9146,7 @@ func (ec *executionContext) marshalNIntRangeOutput2ᚖgithubᚗcomᚋClusterCock return ec._IntRangeOutput(ctx, sel, v) } -func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Job) graphql.Marshaler { +func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Job) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -8495,7 +9170,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJob(ctx, sel, v[i]) + ret[i] = ec.marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJob(ctx, sel, v[i]) } if isLen1 { f(i) @@ -8508,7 +9183,7 @@ func (ec *executionContext) marshalNJob2ᚕᚖgithubᚗcomᚋClusterCockpitᚋcc return ret } -func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJob(ctx context.Context, sel ast.SelectionSet, v *model.Job) graphql.Marshaler { +func (ec *executionContext) marshalNJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "must not be null") @@ -8544,73 +9219,6 @@ func (ec *executionContext) unmarshalNJobFilter2ᚖgithubᚗcomᚋClusterCockpit return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - return ec._JobMetric(ctx, sel, v) -} - -func (ec *executionContext) unmarshalNJobMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx context.Context, v interface{}) (schema.MetricScope, error) { - var res schema.MetricScope - err := res.UnmarshalGQL(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalNJobMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler { - return v -} - -func (ec *executionContext) marshalNJobMetricSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.MetricSeries) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobMetricSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricSeries(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - return ret -} - -func (ec *executionContext) marshalNJobMetricSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricSeries(ctx context.Context, sel ast.SelectionSet, v *schema.MetricSeries) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - return ec._JobMetricSeries(ctx, sel, v) -} - func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -8658,7 +9266,7 @@ func (ec *executionContext) marshalNJobMetricWithName2ᚖgithubᚗcomᚋClusterC return ec._JobMetricWithName(ctx, sel, v) } -func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.JobResource) graphql.Marshaler { +func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobResource) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -8682,7 +9290,7 @@ func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCock if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResource(ctx, sel, v[i]) + ret[i] = ec.marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResource(ctx, sel, v[i]) } if isLen1 { f(i) @@ -8695,7 +9303,7 @@ func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCock return ret } -func (ec *executionContext) marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobResource(ctx context.Context, sel ast.SelectionSet, v *schema.JobResource) graphql.Marshaler { +func (ec *executionContext) marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResource(ctx context.Context, sel ast.SelectionSet, v *model.JobResource) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { ec.Errorf(ctx, "must not be null") @@ -8719,67 +9327,16 @@ func (ec *executionContext) marshalNJobResultList2ᚖgithubᚗcomᚋClusterCockp return ec._JobResultList(ctx, sel, v) } -func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx context.Context, v interface{}) (model.JobState, error) { - var res model.JobState +func (ec *executionContext) unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobState(ctx context.Context, v interface{}) (schema.JobState, error) { + var res schema.JobState err := res.UnmarshalGQL(v) return res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx context.Context, sel ast.SelectionSet, v model.JobState) graphql.Marshaler { +func (ec *executionContext) marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobState(ctx context.Context, sel ast.SelectionSet, v schema.JobState) graphql.Marshaler { return v } -func (ec *executionContext) marshalNJobTag2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTag(ctx context.Context, sel ast.SelectionSet, v model.JobTag) graphql.Marshaler { - return ec._JobTag(ctx, sel, &v) -} - -func (ec *executionContext) marshalNJobTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobTag) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTag(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - return ret -} - -func (ec *executionContext) marshalNJobTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobTag(ctx context.Context, sel ast.SelectionSet, v *model.JobTag) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - return ec._JobTag(ctx, sel, v) -} - func (ec *executionContext) marshalNJobsStatistics2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobsStatisticsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobsStatistics) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -8911,6 +9468,16 @@ func (ec *executionContext) marshalNMetricFootprints2ᚕᚖgithubᚗcomᚋCluste return ret } +func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx context.Context, v interface{}) (schema.MetricScope, error) { + var res schema.MetricScope + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx context.Context, sel ast.SelectionSet, v schema.MetricScope) graphql.Marshaler { + return v +} + func (ec *executionContext) marshalNNodeMetric2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐNodeMetricᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.NodeMetric) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -9045,6 +9612,94 @@ func (ec *executionContext) marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockp return ret } +func (ec *executionContext) marshalNPartition2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPartitionᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Partition) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNPartition2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPartition(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNPartition2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPartition(ctx context.Context, sel ast.SelectionSet, v *model.Partition) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Partition(ctx, sel, v) +} + +func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { + return ec._Series(ctx, sel, &v) +} + +func (ec *executionContext) marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + func (ec *executionContext) unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐSortDirectionEnum(ctx context.Context, v interface{}) (model.SortDirectionEnum, error) { var res model.SortDirectionEnum err := res.UnmarshalGQL(v) @@ -9055,6 +9710,16 @@ func (ec *executionContext) marshalNSortDirectionEnum2githubᚗcomᚋClusterCock return v } +func (ec *executionContext) marshalNStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._StatsSeries(ctx, sel, v) +} + func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) @@ -9100,6 +9765,57 @@ func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel return ret } +func (ec *executionContext) marshalNTag2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v schema.Tag) graphql.Marshaler { + return ec._Tag(ctx, sel, &v) +} + +func (ec *executionContext) marshalNTag2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTagᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Tag) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTag(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + return ret +} + +func (ec *executionContext) marshalNTag2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐTag(ctx context.Context, sel ast.SelectionSet, v *schema.Tag) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Tag(ctx, sel, v) +} + func (ec *executionContext) unmarshalNTime2timeᚐTime(ctx context.Context, v interface{}) (time.Time, error) { res, err := graphql.UnmarshalTime(v) return res, graphql.ErrorOnPath(ctx, err) @@ -9125,6 +9841,16 @@ func (ec *executionContext) marshalNTimeRangeOutput2ᚖgithubᚗcomᚋClusterCoc return ec._TimeRangeOutput(ctx, sel, v) } +func (ec *executionContext) marshalNTopology2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐTopology(ctx context.Context, sel ast.SelectionSet, v *model.Topology) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Topology(ctx, sel, v) +} + func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler { return ec.___Directive(ctx, sel, &v) } @@ -9354,7 +10080,7 @@ func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel a return res } -func (ec *executionContext) marshalOAccelerator2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Accelerator) graphql.Marshaler { +func (ec *executionContext) marshalOAccelerator2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐAcceleratorᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Accelerator) graphql.Marshaler { if v == nil { return graphql.Null } @@ -9381,7 +10107,7 @@ func (ec *executionContext) marshalOAccelerator2ᚕgithubᚗcomᚋClusterCockpit if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNAccelerator2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐAccelerator(ctx, sel, v[i]) + ret[i] = ec.marshalNAccelerator2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐAccelerator(ctx, sel, v[i]) } if isLen1 { f(i) @@ -9434,21 +10160,6 @@ func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast return graphql.MarshalBoolean(*v) } -func (ec *executionContext) unmarshalOFloat2ᚖfloat64(ctx context.Context, v interface{}) (*float64, error) { - if v == nil { - return nil, nil - } - res, err := graphql.UnmarshalFloat(v) - return &res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalOFloat2ᚖfloat64(ctx context.Context, sel ast.SelectionSet, v *float64) graphql.Marshaler { - if v == nil { - return graphql.Null - } - return graphql.MarshalFloat(*v) -} - func (ec *executionContext) unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐFloatRange(ctx context.Context, v interface{}) (*model.FloatRange, error) { if v == nil { return nil, nil @@ -9493,15 +10204,6 @@ func (ec *executionContext) marshalOID2ᚕstringᚄ(ctx context.Context, sel ast return ret } -func (ec *executionContext) unmarshalOInt2int(ctx context.Context, v interface{}) (int, error) { - res, err := graphql.UnmarshalInt(v) - return res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) marshalOInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { - return graphql.MarshalInt(v) -} - func (ec *executionContext) unmarshalOInt2ᚕintᚄ(ctx context.Context, v interface{}) ([]int, error) { if v == nil { return nil, nil @@ -9538,6 +10240,42 @@ func (ec *executionContext) marshalOInt2ᚕintᚄ(ctx context.Context, sel ast.S return ret } +func (ec *executionContext) unmarshalOInt2ᚕᚕintᚄ(ctx context.Context, v interface{}) ([][]int, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([][]int, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNInt2ᚕintᚄ(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOInt2ᚕᚕintᚄ(ctx context.Context, sel ast.SelectionSet, v [][]int) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNInt2ᚕintᚄ(ctx, sel, v[i]) + } + + return ret +} + func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) { if v == nil { return nil, nil @@ -9561,7 +10299,7 @@ func (ec *executionContext) unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpit return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJob(ctx context.Context, sel ast.SelectionSet, v *model.Job) graphql.Marshaler { +func (ec *executionContext) marshalOJob2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJob(ctx context.Context, sel ast.SelectionSet, v *schema.Job) graphql.Marshaler { if v == nil { return graphql.Null } @@ -9592,14 +10330,14 @@ func (ec *executionContext) unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCock return res, nil } -func (ec *executionContext) marshalOJobMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { +func (ec *executionContext) marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { if v == nil { return graphql.Null } - return ec._JobMetricStatistics(ctx, sel, v) + return ec._JobMetric(ctx, sel, v) } -func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx context.Context, v interface{}) ([]model.JobState, error) { +func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx context.Context, v interface{}) ([]schema.JobState, error) { if v == nil { return nil, nil } @@ -9612,10 +10350,10 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit } } var err error - res := make([]model.JobState, len(vSlice)) + res := make([]schema.JobState, len(vSlice)) for i := range vSlice { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, vSlice[i]) + res[i], err = ec.unmarshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobState(ctx, vSlice[i]) if err != nil { return nil, err } @@ -9623,7 +10361,85 @@ func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpit return res, nil } -func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []model.JobState) graphql.Marshaler { +func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.JobState) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobState(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) marshalOMetricFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐMetricFootprints(ctx context.Context, sel ast.SelectionSet, v *model.MetricFootprints) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._MetricFootprints(ctx, sel, v) +} + +func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._MetricStatistics(ctx, sel, v) +} + +func (ec *executionContext) unmarshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx context.Context, v interface{}) ([]schema.Float, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]schema.Float, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloat(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloat(ctx, sel, v[i]) + } + + return ret +} + +func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputOrderByInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPageRequest(ctx context.Context, v interface{}) (*model.PageRequest, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputPageRequest(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.StatsSeries) graphql.Marshaler { if v == nil { return graphql.Null } @@ -9650,7 +10466,7 @@ func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋ if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNJobState2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobState(ctx, sel, v[i]) + ret[i] = ec.marshalNStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx, sel, v[i]) } if isLen1 { f(i) @@ -9663,29 +10479,6 @@ func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋ return ret } -func (ec *executionContext) marshalOMetricFootprints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐMetricFootprints(ctx context.Context, sel ast.SelectionSet, v *model.MetricFootprints) graphql.Marshaler { - if v == nil { - return graphql.Null - } - return ec._MetricFootprints(ctx, sel, v) -} - -func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { - if v == nil { - return nil, nil - } - res, err := ec.unmarshalInputOrderByInput(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - -func (ec *executionContext) unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐPageRequest(ctx context.Context, v interface{}) (*model.PageRequest, error) { - if v == nil { - return nil, nil - } - res, err := ec.unmarshalInputPageRequest(ctx, v) - return &res, graphql.ErrorOnPath(ctx, err) -} - func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) diff --git a/graph/model/models.go b/graph/model/models.go index 0096801..96f0f7e 100644 --- a/graph/model/models.go +++ b/graph/model/models.go @@ -1,26 +1,17 @@ package model -// Go look at `gqlgen.yml` and the schema package for other non-generated models. - -type JobTag struct { - ID string `json:"id" db:"id"` - TagType string `json:"tagType" db:"tag_type"` - TagName string `json:"tagName" db:"tag_name"` -} - type Cluster struct { - ClusterID string `json:"clusterID"` - ProcessorType string `json:"processorType"` - SocketsPerNode int `json:"socketsPerNode"` - CoresPerSocket int `json:"coresPerSocket"` - ThreadsPerCore int `json:"threadsPerCore"` - FlopRateScalar int `json:"flopRateScalar"` - FlopRateSimd int `json:"flopRateSimd"` - MemoryBandwidth int `json:"memoryBandwidth"` - MetricConfig []*MetricConfig `json:"metricConfig"` - FilterRanges *FilterRanges `json:"filterRanges"` - MetricDataRepository *struct { - Kind string `json:"kind"` - Url string `json:"url"` - } `json:"metricDataRepository"` + Name string `json:"name"` + MetricConfig []*MetricConfig `json:"metricConfig"` + FilterRanges *FilterRanges `json:"filterRanges"` + Partitions []*Partition `json:"partitions"` + + // NOT part of the API: + MetricDataRepository *MetricDataRepository `json:"metricDataRepository"` +} + +type MetricDataRepository struct { + Kind string `json:"kind"` + Url string `json:"url"` + Token string `json:"token"` } diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index 8cf6015..ccfd8e2 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -11,6 +11,12 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/schema" ) +type Accelerator struct { + ID string `json:"id"` + Type string `json:"type"` + Model string `json:"model"` +} + type FilterRanges struct { Duration *IntRangeOutput `json:"duration"` NumNodes *IntRangeOutput `json:"numNodes"` @@ -37,59 +43,43 @@ type IntRangeOutput struct { To int `json:"to"` } -type Job struct { - ID string `json:"Id"` - JobID int `json:"JobId"` - User string `json:"User"` - Project string `json:"Project"` - Cluster string `json:"Cluster"` - StartTime time.Time `json:"StartTime"` - Duration int `json:"Duration"` - NumNodes int `json:"NumNodes"` - NumHWThreads int `json:"NumHWThreads"` - NumAcc int `json:"NumAcc"` - Smt int `json:"SMT"` - Exclusive int `json:"Exclusive"` - Partition string `json:"Partition"` - ArrayJobID int `json:"ArrayJobId"` - MonitoringStatus int `json:"MonitoringStatus"` - State JobState `json:"State"` - Tags []*JobTag `json:"Tags"` - Resources []*schema.JobResource `json:"Resources"` - LoadAvg *float64 `json:"LoadAvg"` - MemUsedMax *float64 `json:"MemUsedMax"` - FlopsAnyAvg *float64 `json:"FlopsAnyAvg"` - MemBwAvg *float64 `json:"MemBwAvg"` - NetBwAvg *float64 `json:"NetBwAvg"` - FileBwAvg *float64 `json:"FileBwAvg"` -} - type JobFilter struct { - Tags []string `json:"tags"` - JobID *StringInput `json:"jobId"` - User *StringInput `json:"user"` - Project *StringInput `json:"project"` - Cluster *StringInput `json:"cluster"` - Duration *IntRange `json:"duration"` - NumNodes *IntRange `json:"numNodes"` - StartTime *TimeRange `json:"startTime"` - JobState []JobState `json:"jobState"` - FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"` - MemBwAvg *FloatRange `json:"memBwAvg"` - LoadAvg *FloatRange `json:"loadAvg"` - MemUsedMax *FloatRange `json:"memUsedMax"` + Tags []string `json:"tags"` + JobID *StringInput `json:"jobId"` + User *StringInput `json:"user"` + Project *StringInput `json:"project"` + Cluster *StringInput `json:"cluster"` + Duration *IntRange `json:"duration"` + NumNodes *IntRange `json:"numNodes"` + StartTime *TimeRange `json:"startTime"` + State []schema.JobState `json:"state"` + FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"` + MemBwAvg *FloatRange `json:"memBwAvg"` + LoadAvg *FloatRange `json:"loadAvg"` + MemUsedMax *FloatRange `json:"memUsedMax"` } type JobMetricWithName struct { - Name string `json:"name"` - Metric *schema.JobMetric `json:"metric"` + Name string `json:"name"` + Node *schema.JobMetric `json:"node"` + Socket *schema.JobMetric `json:"socket"` + MemoryDomain *schema.JobMetric `json:"memoryDomain"` + Core *schema.JobMetric `json:"core"` + Hwthread *schema.JobMetric `json:"hwthread"` +} + +type JobResource struct { + Hostname string `json:"hostname"` + Hwthreads []int `json:"hwthreads"` + Accelerators []int `json:"accelerators"` + Configuration *string `json:"configuration"` } type JobResultList struct { - Items []*Job `json:"items"` - Offset *int `json:"offset"` - Limit *int `json:"limit"` - Count *int `json:"count"` + Items []*schema.Job `json:"items"` + Offset *int `json:"offset"` + Limit *int `json:"limit"` + Count *int `json:"count"` } type JobsStatistics struct { @@ -103,14 +93,14 @@ type JobsStatistics struct { } type MetricConfig struct { - Name string `json:"Name"` - Unit string `json:"Unit"` - Timestep int `json:"Timestep"` - Peak int `json:"Peak"` - Normal int `json:"Normal"` - Caution int `json:"Caution"` - Alert int `json:"Alert"` - Scope string `json:"Scope"` + Name string `json:"name"` + Unit string `json:"unit"` + Scope string `json:"scope"` + Timestep int `json:"timestep"` + Peak float64 `json:"Peak"` + Normal float64 `json:"Normal"` + Caution float64 `json:"Caution"` + Alert float64 `json:"Alert"` } type MetricFootprints struct { @@ -138,6 +128,18 @@ type PageRequest struct { Page int `json:"page"` } +type Partition struct { + Name string `json:"name"` + ProcessorType string `json:"processorType"` + SocketsPerNode int `json:"socketsPerNode"` + CoresPerSocket int `json:"coresPerSocket"` + ThreadsPerCore int `json:"threadsPerCore"` + FlopRateScalar int `json:"flopRateScalar"` + FlopRateSimd int `json:"flopRateSimd"` + MemoryBandwidth int `json:"memoryBandwidth"` + Topology *Topology `json:"topology"` +} + type StringInput struct { Eq *string `json:"eq"` Contains *string `json:"contains"` @@ -155,6 +157,15 @@ type TimeRangeOutput struct { To time.Time `json:"to"` } +type Topology struct { + Node []int `json:"node"` + Socket [][]int `json:"socket"` + MemoryDomain [][]int `json:"memoryDomain"` + Die [][]int `json:"die"` + Core [][]int `json:"core"` + Accelerators []*Accelerator `json:"accelerators"` +} + type Aggregate string const ( @@ -198,55 +209,6 @@ func (e Aggregate) MarshalGQL(w io.Writer) { fmt.Fprint(w, strconv.Quote(e.String())) } -type JobState string - -const ( - JobStateRunning JobState = "running" - JobStateCompleted JobState = "completed" - JobStateFailed JobState = "failed" - JobStateCanceled JobState = "canceled" - JobStateStopped JobState = "stopped" - JobStateTimeout JobState = "timeout" -) - -var AllJobState = []JobState{ - JobStateRunning, - JobStateCompleted, - JobStateFailed, - JobStateCanceled, - JobStateStopped, - JobStateTimeout, -} - -func (e JobState) IsValid() bool { - switch e { - case JobStateRunning, JobStateCompleted, JobStateFailed, JobStateCanceled, JobStateStopped, JobStateTimeout: - return true - } - return false -} - -func (e JobState) String() string { - return string(e) -} - -func (e *JobState) UnmarshalGQL(v interface{}) error { - str, ok := v.(string) - if !ok { - return fmt.Errorf("enums must be strings") - } - - *e = JobState(str) - if !e.IsValid() { - return fmt.Errorf("%s is not a valid JobState", str) - } - return nil -} - -func (e JobState) MarshalGQL(w io.Writer) { - fmt.Fprint(w, strconv.Quote(e.String())) -} - type SortDirectionEnum string const ( diff --git a/graph/resolver.go b/graph/resolver.go index 8a1ba7b..2804bd9 100644 --- a/graph/resolver.go +++ b/graph/resolver.go @@ -2,15 +2,14 @@ package graph import ( "context" - "encoding/json" "errors" "fmt" "regexp" "strings" - "time" "github.com/ClusterCockpit/cc-jobarchive/auth" "github.com/ClusterCockpit/cc-jobarchive/graph/model" + "github.com/ClusterCockpit/cc-jobarchive/schema" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" ) @@ -23,44 +22,9 @@ type Resolver struct { DB *sqlx.DB } -var JobTableCols []string = []string{ - "id", "job_id", "cluster", "start_time", - "user", "project", "partition", "array_job_id", "duration", "job_state", "resources", - "num_nodes", "num_hwthreads", "num_acc", "smt", "exclusive", "monitoring_status", - "load_avg", "mem_used_max", "flops_any_avg", "mem_bw_avg", "net_bw_avg", "file_bw_avg", -} - -type Scannable interface { - Scan(dest ...interface{}) error -} - -// Helper function for scanning jobs with the `jobTableCols` columns selected. -func ScanJob(row Scannable) (*model.Job, error) { - job := &model.Job{} - - var rawResources []byte - if err := row.Scan( - &job.ID, &job.JobID, &job.Cluster, &job.StartTime, - &job.User, &job.Project, &job.Partition, &job.ArrayJobID, &job.Duration, &job.State, &rawResources, - &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Smt, &job.Exclusive, &job.MonitoringStatus, - &job.LoadAvg, &job.MemUsedMax, &job.FlopsAnyAvg, &job.MemBwAvg, &job.NetBwAvg, &job.FileBwAvg); err != nil { - return nil, err - } - - if err := json.Unmarshal(rawResources, &job.Resources); err != nil { - return nil, err - } - - if job.Duration == 0 && job.State == model.JobStateRunning { - job.Duration = int(time.Since(job.StartTime).Seconds()) - } - - return job, nil -} - // Helper function for the `jobs` GraphQL-Query. Is also used elsewhere when a list of jobs is needed. -func (r *Resolver) queryJobs(ctx context.Context, filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) { - query := sq.Select(JobTableCols...).From("job") +func (r *Resolver) queryJobs(ctx context.Context, filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*schema.Job, int, error) { + query := sq.Select(schema.JobColumns...).From("job") query = securityCheck(ctx, query) if order != nil { @@ -85,33 +49,32 @@ func (r *Resolver) queryJobs(ctx context.Context, filters []*model.JobFilter, pa query = buildWhereClause(f, query) } - rows, err := query.RunWith(r.DB).Query() + sql, args, err := query.ToSql() if err != nil { return nil, 0, err } - defer rows.Close() - jobs := make([]*model.Job, 0, 50) + rows, err := r.DB.Queryx(sql, args...) + if err != nil { + return nil, 0, err + } + + jobs := make([]*schema.Job, 0, 50) for rows.Next() { - job, err := ScanJob(rows) + job, err := schema.ScanJob(rows) if err != nil { return nil, 0, err } jobs = append(jobs, job) } + // count all jobs: query = sq.Select("count(*)").From("job") for _, f := range filters { query = buildWhereClause(f, query) } - rows, err = query.RunWith(r.DB).Query() - if err != nil { - return nil, 0, err - } - defer rows.Close() var count int - rows.Next() - if err := rows.Scan(&count); err != nil { + if err := query.RunWith(r.DB).Scan(&count); err != nil { return nil, 0, err } @@ -132,7 +95,7 @@ func securityCheck(ctx context.Context, query sq.SelectBuilder) sq.SelectBuilder return query.Where("job.user_id = ?", user.Username) } -// Build a sq.SelectBuilder out of a model.JobFilter. +// Build a sq.SelectBuilder out of a schema.JobFilter. func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { query = query.Join("jobtag ON jobtag.job_id = job.id").Where("jobtag.tag_id IN ?", filter.Tags) @@ -155,8 +118,8 @@ func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select if filter.Duration != nil { query = buildIntCondition("job.duration", filter.Duration, query) } - if filter.JobState != nil { - query = query.Where("job.job_state IN ?", filter.JobState) + if filter.State != nil { + query = query.Where("job.job_state IN ?", filter.State) } if filter.NumNodes != nil { query = buildIntCondition("job.num_nodes", filter.NumNodes, query) diff --git a/graph/schema.graphqls b/graph/schema.graphqls index cec1b2d..0a162f8 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -1,107 +1,122 @@ +scalar Time +scalar NullableFloat +scalar MetricScope +scalar JobState + type Job { - Id: ID! # Database ID, unique - JobId: Int! # ID given to the job by the cluster scheduler - User: String! # Username - Project: String! # Project - Cluster: String! # Name of the cluster this job was running on - StartTime: Time! # RFC3339 formated string - Duration: Int! # For running jobs, the time it has already run - NumNodes: Int! # Number of nodes this job was running on - NumHWThreads: Int! - NumAcc: Int! + id: ID! + jobId: Int! + user: String! + project: String! + cluster: String! + startTime: Time! + duration: Int! + numNodes: Int! + numHWThreads: Int! + numAcc: Int! SMT: Int! - Exclusive: Int! - Partition: String! - ArrayJobId: Int! - MonitoringStatus: Int! - State: JobState! # State of the job - Tags: [JobTag!]! # List of tags this job has - Resources: [JobResource!]! # List of hosts/hwthreads/gpus/... - - # Will be null for running jobs. - LoadAvg: Float - MemUsedMax: Float - FlopsAnyAvg: Float - MemBwAvg: Float - NetBwAvg: Float - FileBwAvg: Float -} - -type JobResource { - Hostname: String! - HWThreads: [Int!] - Accelerators: [Accelerator!] -} - -type Accelerator { - Id: String! - Type: String! - Model: String! -} - -# TODO: Extend by more possible states? -enum JobState { - running - completed - failed - canceled - stopped - timeout -} - -type JobTag { - Id: ID! # Database ID, unique - TagType: String! # Type - TagName: String! # Name + exclusive: Int! + partition: String! + arrayJobId: Int! + monitoringStatus: Int! + state: JobState! + tags: [Tag!]! + resources: [JobResource!]! } type Cluster { - ClusterID: String! - ProcessorType: String! - SocketsPerNode: Int! - CoresPerSocket: Int! - ThreadsPerCore: Int! - FlopRateScalar: Int! - FlopRateSimd: Int! - MemoryBandwidth: Int! - MetricConfig: [MetricConfig!]! - FilterRanges: FilterRanges! + name: String! + metricConfig: [MetricConfig!]! + filterRanges: FilterRanges! + partitions: [Partition!]! +} + +type Partition { + name: String! + processorType: String! + socketsPerNode: Int! + coresPerSocket: Int! + threadsPerCore: Int! + flopRateScalar: Int! + flopRateSimd: Int! + memoryBandwidth: Int! + topology: Topology! +} + +type Topology { + node: [Int!] + socket: [[Int!]!] + memoryDomain: [[Int!]!] + die: [[Int!]!] + core: [[Int!]!] + accelerators: [Accelerator!] +} + +type Accelerator { + id: String! + type: String! + model: String! } type MetricConfig { - Name: String! - Unit: String! - Timestep: Int! - Peak: Int! - Normal: Int! - Caution: Int! - Alert: Int! - Scope: String! + name: String! + unit: String! + scope: String! + timestep: Int! + Peak: Float! + Normal: Float! + Caution: Float! + Alert: Float! } -type JobMetric { - Unit: String! - Scope: JobMetricScope! - Timestep: Int! - Series: [JobMetricSeries!]! +type Tag { + id: ID! + type: String! + name: String! } -type JobMetricSeries { - Hostname: String! - Id: Int - Statistics: JobMetricStatistics - Data: [NullableFloat!]! -} - -type JobMetricStatistics { - Avg: Float! - Min: Float! - Max: Float! +type JobResource { + hostname: String! + hwthreads: [Int!] + accelerators: [Int!] + configuration: String } type JobMetricWithName { - name: String! - metric: JobMetric! + name: String! + + node: JobMetric + socket: JobMetric + memoryDomain: JobMetric + core: JobMetric + hwthread: JobMetric +} + +type JobMetric { + unit: String! + scope: MetricScope! + timestep: Int! + series: [Series!]! + statisticsSeries: [StatsSeries!] +} + +type Series { + hostname: String! + id: Int + statistics: MetricStatistics + data: [NullableFloat!]! +} + +type MetricStatistics { + avg: Float! + min: Float! + max: Float! +} + +type StatsSeries { + mean: [NullableFloat!] + min: [NullableFloat!] + max: [NullableFloat!] } type MetricFootprints { @@ -123,7 +138,7 @@ type NodeMetrics { type Query { clusters: [Cluster!]! # List of all clusters - tags: [JobTag!]! # List of all tags + tags: [Tag!]! # List of all tags job(id: ID!): Job jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! @@ -138,23 +153,16 @@ type Query { } type Mutation { - createTag(type: String!, name: String!): JobTag! + createTag(type: String!, name: String!): Tag! deleteTag(id: ID!): ID! - addTagsToJob(job: ID!, tagIds: [ID!]!): [JobTag!]! - removeTagsFromJob(job: ID!, tagIds: [ID!]!): [JobTag!]! + addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]! + removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]! updateConfiguration(name: String!, value: String!): String } -type IntRangeOutput { - from: Int! - to: Int! -} - -type TimeRangeOutput { - from: Time! - to: Time! -} +type IntRangeOutput { from: Int!, to: Int! } +type TimeRangeOutput { from: Time!, to: Time! } type FilterRanges { duration: IntRangeOutput! @@ -171,7 +179,7 @@ input JobFilter { duration: IntRange numNodes: IntRange startTime: TimeRange - jobState: [JobState!] + state: [JobState!] flopsAnyAvg: FloatRange memBwAvg: FloatRange loadAvg: FloatRange @@ -195,20 +203,9 @@ input StringInput { endsWith: String } -input IntRange { - from: Int! - to: Int! -} - -input FloatRange { - from: Float! - to: Float! -} - -input TimeRange { - from: Time - to: Time -} +input IntRange { from: Int!, to: Int! } +input FloatRange { from: Float!, to: Float! } +input TimeRange { from: Time, to: Time } type JobResultList { items: [Job!]! @@ -236,7 +233,3 @@ input PageRequest { itemsPerPage: Int! page: Int! } - -scalar Time -scalar NullableFloat -scalar JobMetricScope diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index b16e296..d8cd186 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -19,36 +19,35 @@ import ( sq "github.com/Masterminds/squirrel" ) -func (r *acceleratorResolver) ID(ctx context.Context, obj *schema.Accelerator) (string, error) { - panic(fmt.Errorf("not implemented")) -} - -func (r *jobResolver) Tags(ctx context.Context, obj *model.Job) ([]*model.JobTag, error) { +func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) { query := sq. Select("tag.id", "tag.tag_type", "tag.tag_name"). From("tag"). Join("jobtag ON jobtag.tag_id = tag.id"). Where("jobtag.job_id = ?", obj.ID) - rows, err := query.RunWith(r.DB).Query() + sql, args, err := query.ToSql() if err != nil { return nil, err } - defer rows.Close() - tags := make([]*model.JobTag, 0) - for rows.Next() { - var tag model.JobTag - if err := rows.Scan(&tag.ID, &tag.TagType, &tag.TagName); err != nil { - return nil, err - } - tags = append(tags, &tag) + tags := make([]*schema.Tag, 0) + if err := r.DB.Select(&tags, sql, args...); err != nil { + return nil, err } return tags, nil } -func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*model.JobTag, error) { +func (r *jobResolver) Resources(ctx context.Context, obj *schema.Job) ([]*model.JobResource, error) { + panic(fmt.Errorf("not implemented")) +} + +func (r *jobMetricResolver) StatisticsSeries(ctx context.Context, obj *schema.JobMetric) ([]*schema.StatsSeries, error) { + panic(fmt.Errorf("not implemented")) +} + +func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) { res, err := r.DB.Exec("INSERT INTO tag (tag_type, tag_name) VALUES ($1, $2)", typeArg, name) if err != nil { return nil, err @@ -59,7 +58,7 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s return nil, err } - return &model.JobTag{ID: strconv.FormatInt(id, 10), TagType: typeArg, TagName: name}, nil + return &schema.Tag{ID: id, Type: typeArg, Name: name}, nil } func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) { @@ -67,7 +66,7 @@ func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, er panic(fmt.Errorf("not implemented")) } -func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*model.JobTag, error) { +func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { jid, err := strconv.Atoi(job) if err != nil { return nil, err @@ -84,7 +83,9 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds } } - tags, err := r.Job().Tags(ctx, &model.Job{ID: job}) + dummyJob := schema.Job{} + dummyJob.ID = int64(jid) + tags, err := r.Job().Tags(ctx, &dummyJob) if err != nil { return nil, err } @@ -97,7 +98,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds return tags, metricdata.UpdateTags(jobObj, tags) } -func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*model.JobTag, error) { +func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { jid, err := strconv.Atoi(job) if err != nil { return nil, err @@ -114,7 +115,9 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta } } - tags, err := r.Job().Tags(ctx, &model.Job{ID: job}) + dummyJob := schema.Job{} + dummyJob.ID = int64(jid) + tags, err := r.Job().Tags(ctx, &dummyJob) if err != nil { return nil, err } @@ -139,29 +142,28 @@ func (r *queryResolver) Clusters(ctx context.Context) ([]*model.Cluster, error) return config.Clusters, nil } -func (r *queryResolver) Tags(ctx context.Context) ([]*model.JobTag, error) { - rows, err := sq.Select("id", "tag_type", "tag_name").From("tag").RunWith(r.DB).Query() +func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) { + sql, args, err := sq.Select("id", "tag_type", "tag_name").From("tag").ToSql() if err != nil { return nil, err } - defer rows.Close() - tags := make([]*model.JobTag, 0) - for rows.Next() { - var tag model.JobTag - if err := rows.Scan(&tag.ID, &tag.TagType, &tag.TagName); err != nil { - return nil, err - } - tags = append(tags, &tag) + tags := make([]*schema.Tag, 0) + if err := r.DB.Select(&tags, sql, args...); err != nil { + return nil, err } - return tags, nil } -func (r *queryResolver) Job(ctx context.Context, id string) (*model.Job, error) { - query := sq.Select(JobTableCols...).From("job").Where("job.id = ?", id) +func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) { + query := sq.Select(schema.JobColumns...).From("job").Where("job.id = ?", id) query = securityCheck(ctx, query) - return ScanJob(query.RunWith(r.DB).QueryRow()) + sql, args, err := query.ToSql() + if err != nil { + return nil, err + } + + return schema.ScanJob(r.DB.QueryRowx(sql, args...)) } func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) { @@ -178,8 +180,12 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str res := []*model.JobMetricWithName{} for name, md := range data { res = append(res, &model.JobMetricWithName{ - Name: name, - Metric: md, + Name: name, + Node: md["node"], + Socket: md["socket"], + MemoryDomain: md["memoryDomain"], + Core: md["core"], + Hwthread: md["hwthread"], }) } @@ -237,19 +243,19 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ return res, nil } -// Accelerator returns generated.AcceleratorResolver implementation. -func (r *Resolver) Accelerator() generated.AcceleratorResolver { return &acceleratorResolver{r} } - // Job returns generated.JobResolver implementation. func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} } +// JobMetric returns generated.JobMetricResolver implementation. +func (r *Resolver) JobMetric() generated.JobMetricResolver { return &jobMetricResolver{r} } + // Mutation returns generated.MutationResolver implementation. func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} } // Query returns generated.QueryResolver implementation. func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } -type acceleratorResolver struct{ *Resolver } type jobResolver struct{ *Resolver } +type jobMetricResolver struct{ *Resolver } type mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver } diff --git a/graph/stats.go b/graph/stats.go index fa0d66e..2ad5aea 100644 --- a/graph/stats.go +++ b/graph/stats.go @@ -3,6 +3,7 @@ package graph import ( "context" "database/sql" + "errors" "fmt" "math" @@ -16,9 +17,9 @@ import ( // GraphQL validation should make sure that no unkown values can be specified. var groupBy2column = map[model.Aggregate]string{ - model.AggregateUser: "job.user_id", - model.AggregateProject: "job.project_id", - model.AggregateCluster: "job.cluster_id", + model.AggregateUser: "job.user", + model.AggregateProject: "job.project", + model.AggregateCluster: "job.cluster", } // Helper function for the jobsStatistics GraphQL query placed here so that schema.resolvers.go is not too full. @@ -28,53 +29,59 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF // `socketsPerNode` and `coresPerSocket` can differ from cluster to cluster, so we need to explicitly loop over those. for _, cluster := range config.Clusters { - corehoursCol := fmt.Sprintf("SUM(job.duration * job.num_nodes * %d * %d) / 3600", cluster.SocketsPerNode, cluster.CoresPerSocket) - var query sq.SelectBuilder - if groupBy == nil { - query = sq.Select( - "''", - "COUNT(job.id)", - "SUM(job.duration) / 3600", - corehoursCol, - ).From("job").Where("job.cluster_id = ?", cluster.ClusterID) - } else { - col := groupBy2column[*groupBy] - query = sq.Select( - col, - "COUNT(job.id)", - "SUM(job.duration) / 3600", - corehoursCol, - ).From("job").Where("job.cluster_id = ?", cluster.ClusterID).GroupBy(col) - } + for _, partition := range cluster.Partitions { + corehoursCol := fmt.Sprintf("SUM(job.duration * job.num_nodes * %d * %d) / 3600", partition.SocketsPerNode, partition.CoresPerSocket) + var query sq.SelectBuilder + if groupBy == nil { + query = sq.Select( + "''", + "COUNT(job.id)", + "SUM(job.duration) / 3600", + corehoursCol, + ).From("job") + } else { + col := groupBy2column[*groupBy] + query = sq.Select( + col, + "COUNT(job.id)", + "SUM(job.duration) / 3600", + corehoursCol, + ).From("job").GroupBy(col) + } - query = securityCheck(ctx, query) - for _, f := range filter { - query = buildWhereClause(f, query) - } + query = query. + Where("job.cluster = ?", cluster.Name). + Where("job.partition = ?", partition.Name) - rows, err := query.RunWith(r.DB).Query() - if err != nil { - return nil, err - } + query = securityCheck(ctx, query) + for _, f := range filter { + query = buildWhereClause(f, query) + } - for rows.Next() { - var id sql.NullString - var jobs, walltime, corehours sql.NullInt64 - if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil { + rows, err := query.RunWith(r.DB).Query() + if err != nil { return nil, err } - if id.Valid { - if s, ok := stats[id.String]; ok { - s.TotalJobs += int(jobs.Int64) - s.TotalWalltime += int(walltime.Int64) - s.TotalCoreHours += int(corehours.Int64) - } else { - stats[id.String] = &model.JobsStatistics{ - ID: id.String, - TotalJobs: int(jobs.Int64), - TotalWalltime: int(walltime.Int64), - TotalCoreHours: int(corehours.Int64), + for rows.Next() { + var id sql.NullString + var jobs, walltime, corehours sql.NullInt64 + if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil { + return nil, err + } + + if id.Valid { + if s, ok := stats[id.String]; ok { + s.TotalJobs += int(jobs.Int64) + s.TotalWalltime += int(walltime.Int64) + s.TotalCoreHours += int(corehours.Int64) + } else { + stats[id.String] = &model.JobsStatistics{ + ID: id.String, + TotalJobs: int(jobs.Int64), + TotalWalltime: int(walltime.Int64), + TotalCoreHours: int(corehours.Int64), + } } } } @@ -204,9 +211,16 @@ func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilte return nil, err } - flops, membw := jobdata["flops_any"], jobdata["mem_bw"] - if flops == nil && membw == nil { - return nil, fmt.Errorf("'flops_any' or 'mem_bw' missing for job %s", job.ID) + flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"] + if flops_ == nil && membw_ == nil { + return nil, fmt.Errorf("'flops_any' or 'mem_bw' missing for job %d", job.ID) + } + + flops, ok1 := flops_["node"] + membw, ok2 := membw_["node"] + if !ok1 || !ok2 { + // TODO/FIXME: + return nil, errors.New("todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level") } for n := 0; n < len(flops.Series); n++ { diff --git a/init-db.go b/init-db.go index 496714a..541c3d3 100644 --- a/init-db.go +++ b/init-db.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "database/sql" "encoding/json" "fmt" "log" @@ -23,7 +22,7 @@ const JOBS_DB_SCHEMA string = ` id INTEGER PRIMARY KEY AUTOINCREMENT, -- Not needed in sqlite job_id BIGINT NOT NULL, cluster VARCHAR(255) NOT NULL, - start_time BITINT NOT NULL, + start_time TIMESTAMP NOT NULL, user VARCHAR(255) NOT NULL, project VARCHAR(255) NOT NULL, @@ -80,25 +79,20 @@ func initDB(db *sqlx.DB, archive string) error { return err } - insertstmt, err := db.Prepare(`INSERT INTO job ( - job_id, cluster, start_time, - user, project, partition, array_job_id, duration, job_state, meta_data, resources, - num_nodes, num_hwthreads, num_acc, smt, exclusive, monitoring_status, - flops_any_avg, mem_bw_avg - ) VALUES ( - ?, ?, ?, - ?, ?, ?, ?, ?, ?, ?, ?, - ?, ?, ?, ?, ?, ?, - ?, ? - );`) if err != nil { return err } - tx, err := db.Begin() + tx, err := db.Beginx() if err != nil { return err } + + stmt, err := tx.PrepareNamed(schema.JobInsertStmt) + if err != nil { + return err + } + i := 0 tags := make(map[string]int64) handleDirectory := func(filename string) error { @@ -110,16 +104,16 @@ func initDB(db *sqlx.DB, archive string) error { } } - tx, err = db.Begin() + tx, err = db.Beginx() if err != nil { return err } - insertstmt = tx.Stmt(insertstmt) + stmt = tx.NamedStmt(stmt) fmt.Printf("%d jobs inserted...\r", i) } - err := loadJob(tx, insertstmt, tags, filename) + err := loadJob(tx, stmt, tags, filename) if err == nil { i += 1 } @@ -151,14 +145,14 @@ func initDB(db *sqlx.DB, archive string) error { return err } - for _, startTiemDir := range startTimeDirs { - if startTiemDir.Type().IsRegular() && startTiemDir.Name() == "meta.json" { + for _, startTimeDir := range startTimeDirs { + if startTimeDir.Type().IsRegular() && startTimeDir.Name() == "meta.json" { if err := handleDirectory(dirpath); err != nil { log.Printf("in %s: %s\n", dirpath, err.Error()) } - } else if startTiemDir.IsDir() { - if err := handleDirectory(filepath.Join(dirpath, startTiemDir.Name())); err != nil { - log.Printf("in %s: %s\n", filepath.Join(dirpath, startTiemDir.Name()), err.Error()) + } else if startTimeDir.IsDir() { + if err := handleDirectory(filepath.Join(dirpath, startTimeDir.Name())); err != nil { + log.Printf("in %s: %s\n", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) } } } @@ -184,34 +178,28 @@ func initDB(db *sqlx.DB, archive string) error { // Read the `meta.json` file at `path` and insert it to the database using the prepared // insert statement `stmt`. `tags` maps all existing tags to their database ID. -func loadJob(tx *sql.Tx, stmt *sql.Stmt, tags map[string]int64, path string) error { +func loadJob(tx *sqlx.Tx, stmt *sqlx.NamedStmt, tags map[string]int64, path string) error { f, err := os.Open(filepath.Join(path, "meta.json")) if err != nil { return err } defer f.Close() - var job schema.JobMeta = schema.JobMeta{ - Exclusive: 1, - } + var job schema.JobMeta = schema.JobMeta{BaseJob: schema.JobDefaults} if err := json.NewDecoder(bufio.NewReader(f)).Decode(&job); err != nil { return err } // TODO: Other metrics... - flopsAnyAvg := loadJobStat(&job, "flops_any") - memBwAvg := loadJobStat(&job, "mem_bw") + job.FlopsAnyAvg = loadJobStat(&job, "flops_any") + job.MemBwAvg = loadJobStat(&job, "mem_bw") - resources, err := json.Marshal(job.Resources) + job.RawResources, err = json.Marshal(job.Resources) if err != nil { return err } - res, err := stmt.Exec( - job.JobId, job.Cluster, job.StartTime, - job.User, job.Project, job.Partition, job.ArrayJobId, job.Duration, job.JobState, job.MetaData, string(resources), - job.NumNodes, job.NumHWThreads, job.NumAcc, job.SMT, job.Exclusive, job.MonitoringStatus, - flopsAnyAvg, memBwAvg) + res, err := stmt.Exec(job) if err != nil { return err } @@ -244,12 +232,10 @@ func loadJob(tx *sql.Tx, stmt *sql.Stmt, tags map[string]int64, path string) err return nil } -func loadJobStat(job *schema.JobMeta, metric string) sql.NullFloat64 { - val := sql.NullFloat64{Valid: false} +func loadJobStat(job *schema.JobMeta, metric string) float64 { if stats, ok := job.Statistics[metric]; ok { - val.Valid = true - val.Float64 = stats.Avg + return stats.Avg } - return val + return 0.0 } diff --git a/metricdata/archive.go b/metricdata/archive.go index dca84fd..def080a 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -13,13 +13,12 @@ import ( "strconv" "github.com/ClusterCockpit/cc-jobarchive/config" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/schema" ) // For a given job, return the path of the `data.json`/`meta.json` file. // TODO: Implement Issue ClusterCockpit/ClusterCockpit#97 -func getPath(job *model.Job, file string, checkLegacy bool) (string, error) { +func getPath(job *schema.Job, file string, checkLegacy bool) (string, error) { lvl1, lvl2 := fmt.Sprintf("%d", job.JobID/1000), fmt.Sprintf("%03d", job.JobID%1000) if !checkLegacy { return filepath.Join(JobArchivePath, job.Cluster, lvl1, lvl2, strconv.FormatInt(job.StartTime.Unix(), 10), file), nil @@ -34,7 +33,7 @@ func getPath(job *model.Job, file string, checkLegacy bool) (string, error) { } // Assuming job is completed/archived, return the jobs metric data. -func loadFromArchive(job *model.Job) (schema.JobData, error) { +func loadFromArchive(job *schema.Job) (schema.JobData, error) { filename, err := getPath(job, "data.json", true) if err != nil { return nil, err @@ -56,8 +55,8 @@ func loadFromArchive(job *model.Job) (schema.JobData, error) { // If the job is archived, find its `meta.json` file and override the tags list // in that JSON file. If the job is not archived, nothing is done. -func UpdateTags(job *model.Job, tags []*model.JobTag) error { - if job.State == model.JobStateRunning { +func UpdateTags(job *schema.Job, tags []*schema.Tag) error { + if job.State == schema.JobStateRunning { return nil } @@ -74,23 +73,19 @@ func UpdateTags(job *model.Job, tags []*model.JobTag) error { return err } - var metaFile schema.JobMeta + var metaFile schema.JobMeta = schema.JobMeta{ + BaseJob: schema.JobDefaults, + } if err := json.NewDecoder(f).Decode(&metaFile); err != nil { return err } f.Close() - metaFile.Tags = make([]struct { - Name string "json:\"Name\"" - Type string "json:\"Type\"" - }, 0) + metaFile.Tags = make([]*schema.Tag, 0) for _, tag := range tags { - metaFile.Tags = append(metaFile.Tags, struct { - Name string "json:\"Name\"" - Type string "json:\"Type\"" - }{ - Name: tag.TagName, - Type: tag.TagType, + metaFile.Tags = append(metaFile.Tags, &schema.Tag{ + Name: tag.Name, + Type: tag.Type, }) } @@ -103,7 +98,7 @@ func UpdateTags(job *model.Job, tags []*model.JobTag) error { } // Helper to metricdata.LoadAverages(). -func loadAveragesFromArchive(job *model.Job, metrics []string, data [][]schema.Float) error { +func loadAveragesFromArchive(job *schema.Job, metrics []string, data [][]schema.Float) error { filename, err := getPath(job, "meta.json", true) if err != nil { return err @@ -131,8 +126,8 @@ func loadAveragesFromArchive(job *model.Job, metrics []string, data [][]schema.F } // Writes a running job to the job-archive -func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { - if job.State != model.JobStateRunning { +func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { + if job.State != schema.JobStateRunning { return nil, errors.New("cannot archive job that is not running") } @@ -146,51 +141,27 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { return nil, err } - tags := []struct { - Name string `json:"Name"` - Type string `json:"Type"` - }{} - for _, tag := range job.Tags { - tags = append(tags, struct { - Name string `json:"Name"` - Type string `json:"Type"` - }{ - Name: tag.TagName, - Type: tag.TagType, - }) - } - - metaData := &schema.JobMeta{ - JobId: int64(job.JobID), - User: job.User, - Project: job.Project, - Cluster: job.Cluster, - NumNodes: job.NumNodes, - NumHWThreads: job.NumHWThreads, - NumAcc: job.NumAcc, - Exclusive: int8(job.Exclusive), - MonitoringStatus: int8(job.MonitoringStatus), - SMT: int8(job.Smt), - Partition: job.Partition, - ArrayJobId: job.ArrayJobID, - JobState: string(job.State), - StartTime: job.StartTime.Unix(), - Duration: int64(job.Duration), - Resources: job.Resources, - MetaData: "", // TODO/FIXME: Handle `meta_data`! - Tags: tags, - Statistics: make(map[string]*schema.JobMetaStatistics), + jobMeta := &schema.JobMeta{ + BaseJob: job.BaseJob, + StartTime: job.StartTime.Unix(), + Statistics: make(map[string]schema.JobStatistics), } for metric, data := range jobData { avg, min, max := 0.0, math.MaxFloat32, -math.MaxFloat32 - for _, nodedata := range data.Series { - avg += nodedata.Statistics.Avg - min = math.Min(min, nodedata.Statistics.Min) - max = math.Max(max, nodedata.Statistics.Max) + nodeData, ok := data["node"] + if !ok { + // TODO/FIXME: Calc average for non-node metrics as well! + continue } - metaData.Statistics[metric] = &schema.JobMetaStatistics{ + for _, series := range nodeData.Series { + avg += series.Statistics.Avg + min = math.Min(min, series.Statistics.Min) + max = math.Max(max, series.Statistics.Max) + } + + jobMeta.Statistics[metric] = schema.JobStatistics{ Unit: config.GetMetricConfig(job.Cluster, metric).Unit, Avg: avg / float64(job.NumNodes), Min: min, @@ -202,7 +173,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { // only return the JobMeta structure as the // statistics in there are needed. if !useArchive { - return metaData, nil + return jobMeta, nil } dirPath, err := getPath(job, "", false) @@ -220,7 +191,7 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { } defer f.Close() writer := bufio.NewWriter(f) - if err := json.NewEncoder(writer).Encode(metaData); err != nil { + if err := json.NewEncoder(writer).Encode(jobMeta); err != nil { return nil, err } if err := writer.Flush(); err != nil { @@ -239,5 +210,5 @@ func ArchiveJob(job *model.Job, ctx context.Context) (*schema.JobMeta, error) { return nil, err } - return metaData, f.Close() + return jobMeta, f.Close() } diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 5bcd31a..2602f3b 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -12,7 +12,6 @@ import ( "time" "github.com/ClusterCockpit/cc-jobarchive/config" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/schema" ) @@ -57,7 +56,7 @@ func (ccms *CCMetricStore) Init(url string) error { return nil } -func (ccms *CCMetricStore) doRequest(job *model.Job, suffix string, metrics []string, ctx context.Context) (*http.Response, error) { +func (ccms *CCMetricStore) doRequest(job *schema.Job, suffix string, metrics []string, ctx context.Context) (*http.Response, error) { from, to := job.StartTime.Unix(), job.StartTime.Add(time.Duration(job.Duration)*time.Second).Unix() reqBody := ApiRequestBody{} reqBody.Metrics = metrics @@ -85,7 +84,7 @@ func (ccms *CCMetricStore) doRequest(job *model.Job, suffix string, metrics []st return ccms.client.Do(req) } -func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { +func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) { res, err := ccms.doRequest(job, "timeseries?with-stats=true", metrics, ctx) if err != nil { return nil, err @@ -103,8 +102,9 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex Scope: "node", // TODO: FIXME: Whatever... Unit: mc.Unit, Timestep: mc.Timestep, - Series: make([]*schema.MetricSeries, 0, len(job.Resources)), + Series: make([]schema.Series, 0, len(job.Resources)), } + for i, node := range job.Resources { if node.Accelerators != nil || node.HWThreads != nil { // TODO/FIXME: @@ -120,7 +120,7 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node.Hostname, metric) } - metricData.Series = append(metricData.Series, &schema.MetricSeries{ + metricData.Series = append(metricData.Series, schema.Series{ Hostname: node.Hostname, Data: data.Data, Statistics: &schema.MetricStatistics{ @@ -130,13 +130,13 @@ func (ccms *CCMetricStore) LoadData(job *model.Job, metrics []string, ctx contex }, }) } - jobData[metric] = metricData + jobData[metric] = map[string]*schema.JobMetric{"node": metricData} } return jobData, nil } -func (ccms *CCMetricStore) LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { +func (ccms *CCMetricStore) LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { res, err := ccms.doRequest(job, "stats", metrics, ctx) if err != nil { return nil, err diff --git a/metricdata/influxdb-v2.go b/metricdata/influxdb-v2.go index 184be79..759ef91 100644 --- a/metricdata/influxdb-v2.go +++ b/metricdata/influxdb-v2.go @@ -1,5 +1,6 @@ package metricdata +/* import ( "context" "errors" @@ -175,3 +176,4 @@ func (idb *InfluxDBv2DataRepository) LoadStats(job *model.Job, metrics []string, func (idb *InfluxDBv2DataRepository) LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) { return nil, nil } +*/ diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index b0c6e82..d066015 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/ClusterCockpit/cc-jobarchive/config" - "github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/schema" ) @@ -15,10 +14,10 @@ type MetricDataRepository interface { Init(url string) error // Return the JobData for the given job, only with the requested metrics. - LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) + LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) // Return a map of metrics to a map of nodes to the metric statistics of the job. - LoadStats(job *model.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) + LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) // Return a map of nodes to a map of metrics to the data for the requested time. LoadNodeData(clusterId string, metrics, nodes []string, from, to int64, ctx context.Context) (map[string]map[string][]schema.Float, error) @@ -41,15 +40,15 @@ func Init(jobArchivePath string, disableArchive bool) error { if err := ccms.Init(cluster.MetricDataRepository.Url); err != nil { return err } - metricDataRepos[cluster.ClusterID] = ccms - case "influxdb-v2": - idb := &InfluxDBv2DataRepository{} - if err := idb.Init(cluster.MetricDataRepository.Url); err != nil { - return err - } - metricDataRepos[cluster.ClusterID] = idb + metricDataRepos[cluster.Name] = ccms + // case "influxdb-v2": + // idb := &InfluxDBv2DataRepository{} + // if err := idb.Init(cluster.MetricDataRepository.Url); err != nil { + // return err + // } + // metricDataRepos[cluster.Name] = idb default: - return fmt.Errorf("unkown metric data repository '%s' for cluster '%s'", cluster.MetricDataRepository.Kind, cluster.ClusterID) + return fmt.Errorf("unkown metric data repository '%s' for cluster '%s'", cluster.MetricDataRepository.Kind, cluster.Name) } } } @@ -57,8 +56,8 @@ func Init(jobArchivePath string, disableArchive bool) error { } // Fetches the metric data for a job. -func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.JobData, error) { - if job.State == model.JobStateRunning || !useArchive { +func LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) { + if job.State == schema.JobStateRunning || !useArchive { repo, ok := metricDataRepos[job.Cluster] if !ok { return nil, fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) @@ -85,8 +84,8 @@ func LoadData(job *model.Job, metrics []string, ctx context.Context) (schema.Job } // Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize. -func LoadAverages(job *model.Job, metrics []string, data [][]schema.Float, ctx context.Context) error { - if job.State != model.JobStateRunning && useArchive { +func LoadAverages(job *schema.Job, metrics []string, data [][]schema.Float, ctx context.Context) error { + if job.State != schema.JobStateRunning && useArchive { return loadAveragesFromArchive(job, metrics, data) } diff --git a/schema/job.go b/schema/job.go new file mode 100644 index 0000000..ae6bd40 --- /dev/null +++ b/schema/job.go @@ -0,0 +1,153 @@ +package schema + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +type BaseJob struct { + ID int64 `json:"id" db:"id"` + JobID int64 `json:"jobId" db:"job_id"` + User string `json:"user" db:"user"` + Project string `json:"project" db:"project"` + Cluster string `json:"cluster" db:"cluster"` + Partition string `json:"partition" db:"partition"` + ArrayJobId int32 `json:"arrayJobId" db:"array_job_id"` + NumNodes int32 `json:"numNodes" db:"num_nodes"` + NumHWThreads int32 `json:"numHwthreads" db:"num_hwthreads"` + NumAcc int32 `json:"numAcc" db:"num_acc"` + Exclusive int32 `json:"exclusive" db:"exclusive"` + MonitoringStatus int32 `json:"monitoringStatus" db:"monitoring_status"` + SMT int32 `json:"smt" db:"smt"` + State JobState `json:"jobState" db:"job_state"` + Duration int32 `json:"duration" db:"duration"` + Tags []*Tag `json:"tags"` + RawResources []byte `json:"-" db:"resources"` + Resources []Resource `json:"resources"` + MetaData interface{} `json:"metaData" db:"meta_data"` + + MemUsedMax float64 `json:"-" db:"mem_used_max"` + FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` + MemBwAvg float64 `json:"-" db:"mem_bw_avg"` + LoadAvg float64 `json:"-" db:"load_avg"` + NetBwAvg float64 `json:"-" db:"net_bw_avg"` + NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` + FileBwAvg float64 `json:"-" db:"file_bw_avg"` + FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` +} + +type JobMeta struct { + BaseJob + StartTime int64 `json:"startTime" db:"start_time"` + Statistics map[string]JobStatistics `json:"statistics,omitempty"` +} + +var JobDefaults BaseJob = BaseJob{ + Exclusive: 1, + MonitoringStatus: 1, + MetaData: "", +} + +var JobColumns []string = []string{ + "id", "job_id", "user", "project", "cluster", "partition", "array_job_id", "num_nodes", + "num_hwthreads", "num_acc", "exclusive", "monitoring_status", "smt", "job_state", + "duration", "resources", "meta_data", +} + +const JobInsertStmt string = `INSERT INTO job ( + job_id, user, project, cluster, partition, array_job_id, num_nodes, num_hwthreads, num_acc, + exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data, + mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total +) VALUES ( + :job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, + :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data, + :mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total +);` + +type Job struct { + BaseJob + StartTime time.Time `json:"startTime" db:"start_time"` +} + +type Scannable interface { + StructScan(dest interface{}) error +} + +// Helper function for scanning jobs with the `jobTableCols` columns selected. +func ScanJob(row Scannable) (*Job, error) { + job := &Job{BaseJob: JobDefaults} + if err := row.StructScan(&job); err != nil { + return nil, err + } + + if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil { + return nil, err + } + + if job.Duration == 0 && job.State == JobStateRunning { + job.Duration = int32(time.Since(job.StartTime).Seconds()) + } + + return job, nil +} + +type JobStatistics struct { + Unit string `json:"unit"` + Avg float64 `json:"avg"` + Min float64 `json:"min"` + Max float64 `json:"max"` +} + +type Tag struct { + ID int64 `json:"id" db:"id"` + Type string `json:"type" db:"tag_type"` + Name string `json:"name" db:"tag_name"` +} + +type Resource struct { + Hostname string `json:"hostname"` + HWThreads []int `json:"hwthreads,omitempty"` + Accelerators []int `json:"accelerators,omitempty"` + Configuration string `json:"configuration,omitempty"` +} + +type JobState string + +const ( + JobStateRunning JobState = "running" + JobStateCompleted JobState = "completed" + JobStateFailed JobState = "failed" + JobStateCanceled JobState = "canceled" + JobStateStopped JobState = "stopped" + JobStateTimeout JobState = "timeout" +) + +func (e *JobState) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = JobState(str) + if !e.Valid() { + return errors.New("invalid job state") + } + + return nil +} + +func (e JobState) MarshalGQL(w io.Writer) { + fmt.Fprintf(w, "\"%s\"", e) +} + +func (e JobState) Valid() bool { + return e == JobStateRunning || + e == JobStateCompleted || + e == JobStateFailed || + e == JobStateCanceled || + e == JobStateStopped || + e == JobStateTimeout +} diff --git a/schema/metrics.go b/schema/metrics.go index 181083e..0186750 100644 --- a/schema/metrics.go +++ b/schema/metrics.go @@ -5,14 +5,34 @@ import ( "io" ) -// Format of `data.json` files. -type JobData map[string]*JobMetric +type JobData map[string]map[string]*JobMetric type JobMetric struct { - Unit string `json:"Unit"` - Scope MetricScope `json:"Scope"` - Timestep int `json:"Timestep"` - Series []*MetricSeries `json:"Series"` + Unit string `json:"unit"` + Scope MetricScope `json:"scope"` + Timestep int `json:"timestep"` + Series []Series `json:"series"` + StatsSeries *StatsSeries `json:"statisticsSeries,omitempty"` +} + +type Series struct { + Hostname string `json:"hostname"` + Id *int `json:"id,omitempty"` + Statistics *MetricStatistics `json:"statistics"` + Data []Float `json:"data"` +} + +type MetricStatistics struct { + Avg float64 `json:"avg"` + Min float64 `json:"min"` + Max float64 `json:"max"` +} + +type StatsSeries struct { + Mean []Float `json:"mean,omitempty"` + Min []Float `json:"min,omitempty"` + Max []Float `json:"max,omitempty"` + Percentiles map[int][]Float `json:"percentiles,omitempty"` } type MetricScope string @@ -39,61 +59,3 @@ func (e *MetricScope) UnmarshalGQL(v interface{}) error { func (e MetricScope) MarshalGQL(w io.Writer) { fmt.Fprintf(w, "\"%s\"", e) } - -type MetricStatistics struct { - Avg float64 `json:"Avg"` - Min float64 `json:"Min"` - Max float64 `json:"Max"` -} - -type MetricSeries struct { - Hostname string `json:"Hostname"` - Id int `json:"Id"` - Statistics *MetricStatistics `json:"Statistics"` - Data []Float `json:"Data"` -} - -type JobMetaStatistics struct { - Unit string `json:"Unit"` - Avg float64 `json:"Avg"` - Min float64 `json:"Min"` - Max float64 `json:"Max"` -} - -type Accelerator struct { - ID int `json:"Id"` - Type string `json:"Type"` - Model string `json:"Model"` -} - -type JobResource struct { - Hostname string `json:"Hostname"` - HWThreads []int `json:"HWThreads,omitempty"` - Accelerators []Accelerator `json:"Accelerators,omitempty"` -} - -// Format of `meta.json` files. -type JobMeta struct { - JobId int64 `json:"JobId"` - User string `json:"User"` - Project string `json:"Project"` - Cluster string `json:"Cluster"` - NumNodes int `json:"NumNodes"` - NumHWThreads int `json:"NumHWThreads"` - NumAcc int `json:"NumAcc"` - Exclusive int8 `json:"Exclusive"` - MonitoringStatus int8 `json:"MonitoringStatus"` - SMT int8 `json:"SMT"` - Partition string `json:"Partition"` - ArrayJobId int `json:"ArrayJobId"` - JobState string `json:"JobState"` - StartTime int64 `json:"StartTime"` - Duration int64 `json:"Duration"` - Resources []*JobResource `json:"Resources"` - MetaData string `json:"MetaData"` - Tags []struct { - Name string `json:"Name"` - Type string `json:"Type"` - } `json:"Tags"` - Statistics map[string]*JobMetaStatistics `json:"Statistics"` -} diff --git a/server.go b/server.go index 991436a..827b929 100644 --- a/server.go +++ b/server.go @@ -176,9 +176,8 @@ func main() { resolver := &graph.Resolver{DB: db} graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) graphQLPlayground := playground.Handler("GraphQL playground", "/query") - restApi := &api.RestApi{ + api := &api.RestApi{ DB: db, - Resolver: resolver, AsyncArchiving: programConfig.AsyncArchiving, } @@ -235,7 +234,7 @@ func main() { }) monitoringRoutes(secured, resolver) - restApi.MountRoutes(secured) + api.MountRoutes(secured) r.PathPrefix("/").Handler(http.FileServer(http.Dir(programConfig.StaticFiles))) handler := handlers.CORS( diff --git a/templates/home.html b/templates/home.html index 6b23b2f..b357374 100644 --- a/templates/home.html +++ b/templates/home.html @@ -35,7 +35,7 @@ - + @@ -44,10 +44,10 @@ {{range .Infos.clusters}} - - - - + + + + {{end}} From a6e8d5b484ba77b3ea3d64f8c91f9b9a57559643 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Mon, 20 Dec 2021 10:48:58 +0100 Subject: [PATCH 17/25] Slight GraphQL-API changes --- api/openapi.yaml | 171 +++++++++++++++++++ api/rest.go | 2 +- graph/generated/generated.go | 313 +++++++++++------------------------ graph/model/models_gen.go | 16 +- graph/schema.graphqls | 19 +-- graph/schema.resolvers.go | 21 ++- 6 files changed, 294 insertions(+), 248 deletions(-) create mode 100644 api/openapi.yaml diff --git a/api/openapi.yaml b/api/openapi.yaml new file mode 100644 index 0000000..38e7f0a --- /dev/null +++ b/api/openapi.yaml @@ -0,0 +1,171 @@ +# +# ClusterCockpit's API spec can be exported via: +# docker exec -it cc-php php bin/console api:openapi:export --yaml +# +# This spec is written by hand and hopefully up to date with the API. +# + +openapi: 3.0.3 +info: + title: 'ClusterCockpit REST API' + description: 'API for batch job control' + version: 0.0.2 +servers: + - url: / + description: '' +paths: + '/api/jobs/{id}': + get: + operationId: 'getJob' + summary: 'Get job resource' + parameters: + - name: id + in: path + required: true + schema: { type: integer } + description: 'Database ID (Resource Identifier)' + responses: + 200: + description: 'Job resource' + content: + 'application/json': + schema: + $ref: '#/components/schemas/Job' + 404: + description: 'Resource not found' + '/api/jobs/tag_job/{id}': + post: + operationId: 'tagJob' + summary: 'Add a tag to a job' + parameters: + - name: id + in: path + required: true + schema: { type: integer } + description: 'Job ID' + requestBody: + description: 'Array of tags to add' + required: true + content: + 'application/json': + schema: + type: array + items: + $ref: '#/components/schemas/Tag' + responses: + 200: + description: 'Job resource' + content: + 'application/json': + schema: + $ref: '#/components/schemas/Job' + 404: + description: 'Job or tag does not exist' + 400: + description: 'Bad request' + '/api/jobs/start_job/': + post: + operationId: 'startJob' + summary: 'Add a newly started job' + requestBody: + required: true + content: + 'application/json': + schema: + $ref: '#/components/schemas/Job' + responses: + 201: + description: 'Job successfully' + content: + 'application/json': + schema: + type: object + properties: + id: + type: integer + description: 'The database ID assigned to this job' + 400: + description: 'Bad request' + 422: + description: 'The combination of jobId, clusterId and startTime does already exist' + '/api/jobs/stop_job/': + post: + operationId: stopJobViaJobID + summary: 'Mark a job as stopped. Which job to stop is specified by the request body.' + requestBody: + required: true + content: + 'application/json': + schema: + type: object + required: [jobId, cluster, startTime, stopTime] + properties: + jobId: { type: integer } + cluster: { type: string } + startTime: { type: integer } + stopTime: { type: integer } + responses: + 200: + description: 'Job resource' + content: + 'application/json': + schema: + $ref: '#/components/schemas/Job' + 400: + description: 'Bad request' + 404: + description: 'Resource not found' + '/api/jobs/stop_job/{id}': + post: + operationId: 'stopJobViaDBID' + summary: 'Mark a job as stopped.' + parameters: + - name: id + in: path + required: true + schema: { type: integer } + description: 'Database ID (Resource Identifier)' + requestBody: + required: true + content: + 'application/json': + schema: + type: object + required: [stopTime] + properties: + stopTime: { type: integer } + responses: + 200: + description: 'Job resource' + content: + 'application/json': + schema: + $ref: '#/components/schemas/Job' + 400: + description: 'Bad request' + 404: + description: 'Resource not found' +components: + schemas: + Tag: + description: 'A job tag' + type: object + properties: + id: + type: string + description: 'Database ID' + type: + type: string + description: 'Tag type' + name: + type: string + description: 'Tag name' + Job: + $ref: https://raw.githubusercontent.com/ClusterCockpit/cc-specifications/master/schema/json/job-meta.schema.json + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT +security: + - bearerAuth: [] # Applies `bearerAuth` globally \ No newline at end of file diff --git a/api/rest.go b/api/rest.go index 8a64b3a..64b284c 100644 --- a/api/rest.go +++ b/api/rest.go @@ -39,7 +39,7 @@ type StopJobApiRequest struct { // JobId, ClusterId and StartTime are optional. // They are only used if no database id was provided. JobId *string `json:"jobId"` - Cluster *string `json:"clusterId"` + Cluster *string `json:"cluster"` StartTime *int64 `json:"startTime"` // Payload diff --git a/graph/generated/generated.go b/graph/generated/generated.go index 3134b6d..ba23a9e 100644 --- a/graph/generated/generated.go +++ b/graph/generated/generated.go @@ -106,12 +106,8 @@ type ComplexityRoot struct { } JobMetricWithName struct { - Core func(childComplexity int) int - Hwthread func(childComplexity int) int - MemoryDomain func(childComplexity int) int - Name func(childComplexity int) int - Node func(childComplexity int) int - Socket func(childComplexity int) int + Metric func(childComplexity int) int + Name func(childComplexity int) int } JobResource struct { @@ -193,7 +189,7 @@ type ComplexityRoot struct { Query struct { Clusters func(childComplexity int) int Job func(childComplexity int, id string) int - JobMetrics func(childComplexity int, id string, metrics []string) int + JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int JobsStatistics func(childComplexity int, filter []*model.JobFilter, groupBy *model.Aggregate) int @@ -254,7 +250,7 @@ type QueryResolver interface { Clusters(ctx context.Context) ([]*model.Cluster, error) Tags(ctx context.Context) ([]*schema.Tag, error) Job(ctx context.Context, id string) (*schema.Job, error) - JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) + JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) @@ -536,26 +532,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetric.Unit(childComplexity), true - case "JobMetricWithName.core": - if e.complexity.JobMetricWithName.Core == nil { + case "JobMetricWithName.metric": + if e.complexity.JobMetricWithName.Metric == nil { break } - return e.complexity.JobMetricWithName.Core(childComplexity), true - - case "JobMetricWithName.hwthread": - if e.complexity.JobMetricWithName.Hwthread == nil { - break - } - - return e.complexity.JobMetricWithName.Hwthread(childComplexity), true - - case "JobMetricWithName.memoryDomain": - if e.complexity.JobMetricWithName.MemoryDomain == nil { - break - } - - return e.complexity.JobMetricWithName.MemoryDomain(childComplexity), true + return e.complexity.JobMetricWithName.Metric(childComplexity), true case "JobMetricWithName.name": if e.complexity.JobMetricWithName.Name == nil { @@ -564,20 +546,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetricWithName.Name(childComplexity), true - case "JobMetricWithName.node": - if e.complexity.JobMetricWithName.Node == nil { - break - } - - return e.complexity.JobMetricWithName.Node(childComplexity), true - - case "JobMetricWithName.socket": - if e.complexity.JobMetricWithName.Socket == nil { - break - } - - return e.complexity.JobMetricWithName.Socket(childComplexity), true - case "JobResource.accelerators": if e.complexity.JobResource.Accelerators == nil { break @@ -683,14 +651,14 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.TotalWalltime(childComplexity), true - case "MetricConfig.Alert": + case "MetricConfig.alert": if e.complexity.MetricConfig.Alert == nil { break } return e.complexity.MetricConfig.Alert(childComplexity), true - case "MetricConfig.Caution": + case "MetricConfig.caution": if e.complexity.MetricConfig.Caution == nil { break } @@ -704,14 +672,14 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricConfig.Name(childComplexity), true - case "MetricConfig.Normal": + case "MetricConfig.normal": if e.complexity.MetricConfig.Normal == nil { break } return e.complexity.MetricConfig.Normal(childComplexity), true - case "MetricConfig.Peak": + case "MetricConfig.peak": if e.complexity.MetricConfig.Peak == nil { break } @@ -954,7 +922,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string)), true + return e.complexity.Query.JobMetrics(childComplexity, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)), true case "Query.jobs": if e.complexity.Query.Jobs == nil { @@ -1278,10 +1246,10 @@ type MetricConfig { unit: String! scope: String! timestep: Int! - Peak: Float! - Normal: Float! - Caution: Float! - Alert: Float! + peak: Float! + normal: Float! + caution: Float! + alert: Float! } type Tag { @@ -1298,13 +1266,8 @@ type JobResource { } type JobMetricWithName { - name: String! - - node: JobMetric - socket: JobMetric - memoryDomain: JobMetric - core: JobMetric - hwthread: JobMetric + name: String! + metric: JobMetric! } type JobMetric { @@ -1356,7 +1319,7 @@ type Query { tags: [Tag!]! # List of all tags job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! @@ -1603,6 +1566,15 @@ func (ec *executionContext) field_Query_jobMetrics_args(ctx context.Context, raw } } args["metrics"] = arg1 + var arg2 []schema.MetricScope + if tmp, ok := rawArgs["scopes"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("scopes")) + arg2, err = ec.unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["scopes"] = arg2 return args, nil } @@ -3187,7 +3159,7 @@ func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field g return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) _JobMetricWithName_node(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { +func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -3205,146 +3177,21 @@ func (ec *executionContext) _JobMetricWithName_node(ctx context.Context, field g ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return obj.Node, nil + return obj.Metric, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - return graphql.Null - } - res := resTmp.(*schema.JobMetric) - fc.Result = res - return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricWithName_socket(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricWithName", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Socket, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { return graphql.Null } res := resTmp.(*schema.JobMetric) fc.Result = res - return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricWithName_memoryDomain(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricWithName", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.MemoryDomain, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*schema.JobMetric) - fc.Result = res - return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricWithName_core(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricWithName", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Core, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*schema.JobMetric) - fc.Result = res - return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobMetricWithName_hwthread(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobMetricWithName", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Hwthread, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*schema.JobMetric) - fc.Result = res - return ec.marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) + return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) } func (ec *executionContext) _JobResource_hostname(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { @@ -3994,7 +3841,7 @@ func (ec *executionContext) _MetricConfig_timestep(ctx context.Context, field gr return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_peak(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4029,7 +3876,7 @@ func (ec *executionContext) _MetricConfig_Peak(ctx context.Context, field graphq return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_normal(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4064,7 +3911,7 @@ func (ec *executionContext) _MetricConfig_Normal(ctx context.Context, field grap return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_caution(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -4099,7 +3946,7 @@ func (ec *executionContext) _MetricConfig_Caution(ctx context.Context, field gra return ec.marshalNFloat2float64(ctx, field.Selections, res) } -func (ec *executionContext) _MetricConfig_Alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { +func (ec *executionContext) _MetricConfig_alert(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) @@ -5105,7 +4952,7 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql fc.Args = args resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobMetrics(rctx, args["id"].(string), args["metrics"].([]string)) + return ec.resolvers.Query().JobMetrics(rctx, args["id"].(string), args["metrics"].([]string), args["scopes"].([]schema.MetricScope)) }) if err != nil { ec.Error(ctx, err) @@ -7778,16 +7625,11 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele if out.Values[i] == graphql.Null { invalids++ } - case "node": - out.Values[i] = ec._JobMetricWithName_node(ctx, field, obj) - case "socket": - out.Values[i] = ec._JobMetricWithName_socket(ctx, field, obj) - case "memoryDomain": - out.Values[i] = ec._JobMetricWithName_memoryDomain(ctx, field, obj) - case "core": - out.Values[i] = ec._JobMetricWithName_core(ctx, field, obj) - case "hwthread": - out.Values[i] = ec._JobMetricWithName_hwthread(ctx, field, obj) + case "metric": + out.Values[i] = ec._JobMetricWithName_metric(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7953,23 +7795,23 @@ func (ec *executionContext) _MetricConfig(ctx context.Context, sel ast.Selection if out.Values[i] == graphql.Null { invalids++ } - case "Peak": - out.Values[i] = ec._MetricConfig_Peak(ctx, field, obj) + case "peak": + out.Values[i] = ec._MetricConfig_peak(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "Normal": - out.Values[i] = ec._MetricConfig_Normal(ctx, field, obj) + case "normal": + out.Values[i] = ec._MetricConfig_normal(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "Caution": - out.Values[i] = ec._MetricConfig_Caution(ctx, field, obj) + case "caution": + out.Values[i] = ec._MetricConfig_caution(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } - case "Alert": - out.Values[i] = ec._MetricConfig_Alert(ctx, field, obj) + case "alert": + out.Values[i] = ec._MetricConfig_alert(ctx, field, obj) if out.Values[i] == graphql.Null { invalids++ } @@ -9219,6 +9061,16 @@ func (ec *executionContext) unmarshalNJobFilter2ᚖgithubᚗcomᚋClusterCockpit return &res, graphql.ErrorOnPath(ctx, err) } +func (ec *executionContext) marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._JobMetric(ctx, sel, v) +} + func (ec *executionContext) marshalNJobMetricWithName2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobMetricWithNameᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobMetricWithName) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -10330,13 +10182,6 @@ func (ec *executionContext) unmarshalOJobFilter2ᚕᚖgithubᚗcomᚋClusterCock return res, nil } -func (ec *executionContext) marshalOJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx context.Context, sel ast.SelectionSet, v *schema.JobMetric) graphql.Marshaler { - if v == nil { - return graphql.Null - } - return ec._JobMetric(ctx, sel, v) -} - func (ec *executionContext) unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobStateᚄ(ctx context.Context, v interface{}) ([]schema.JobState, error) { if v == nil { return nil, nil @@ -10380,6 +10225,42 @@ func (ec *executionContext) marshalOMetricFootprints2ᚖgithubᚗcomᚋClusterCo return ec._MetricFootprints(ctx, sel, v) } +func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx context.Context, v interface{}) ([]schema.MetricScope, error) { + if v == nil { + return nil, nil + } + var vSlice []interface{} + if v != nil { + if tmp1, ok := v.([]interface{}); ok { + vSlice = tmp1 + } else { + vSlice = []interface{}{v} + } + } + var err error + res := make([]schema.MetricScope, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScopeᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.MetricScope) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, sel, v[i]) + } + + return ret +} + func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricStatistics(ctx context.Context, sel ast.SelectionSet, v *schema.MetricStatistics) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index ccfd8e2..1db1574 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -60,12 +60,8 @@ type JobFilter struct { } type JobMetricWithName struct { - Name string `json:"name"` - Node *schema.JobMetric `json:"node"` - Socket *schema.JobMetric `json:"socket"` - MemoryDomain *schema.JobMetric `json:"memoryDomain"` - Core *schema.JobMetric `json:"core"` - Hwthread *schema.JobMetric `json:"hwthread"` + Name string `json:"name"` + Metric *schema.JobMetric `json:"metric"` } type JobResource struct { @@ -97,10 +93,10 @@ type MetricConfig struct { Unit string `json:"unit"` Scope string `json:"scope"` Timestep int `json:"timestep"` - Peak float64 `json:"Peak"` - Normal float64 `json:"Normal"` - Caution float64 `json:"Caution"` - Alert float64 `json:"Alert"` + Peak float64 `json:"peak"` + Normal float64 `json:"normal"` + Caution float64 `json:"caution"` + Alert float64 `json:"alert"` } type MetricFootprints struct { diff --git a/graph/schema.graphqls b/graph/schema.graphqls index 0a162f8..e86d71a 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -63,10 +63,10 @@ type MetricConfig { unit: String! scope: String! timestep: Int! - Peak: Float! - Normal: Float! - Caution: Float! - Alert: Float! + peak: Float! + normal: Float! + caution: Float! + alert: Float! } type Tag { @@ -83,13 +83,8 @@ type JobResource { } type JobMetricWithName { - name: String! - - node: JobMetric - socket: JobMetric - memoryDomain: JobMetric - core: JobMetric - hwthread: JobMetric + name: String! + metric: JobMetric! } type JobMetric { @@ -141,7 +136,7 @@ type Query { tags: [Tag!]! # List of all tags job(id: ID!): Job - jobMetrics(id: ID!, metrics: [String!]): [JobMetricWithName!]! + jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobMetricWithName!]! jobsFootprints(filter: [JobFilter!], metrics: [String!]!): [MetricFootprints]! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index d8cd186..43ae37c 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -166,12 +166,13 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) return schema.ScanJob(r.DB.QueryRowx(sql, args...)) } -func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) { +func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) { job, err := r.Query().Job(ctx, id) if err != nil { return nil, err } + // TODO: FIXME: Do something with `scopes` data, err := metricdata.LoadData(job, metrics, ctx) if err != nil { return nil, err @@ -179,14 +180,16 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str res := []*model.JobMetricWithName{} for name, md := range data { - res = append(res, &model.JobMetricWithName{ - Name: name, - Node: md["node"], - Socket: md["socket"], - MemoryDomain: md["memoryDomain"], - Core: md["core"], - Hwthread: md["hwthread"], - }) + for scope, metric := range md { + if metric.Scope != schema.MetricScope(scope) { + panic("WTF?") + } + + res = append(res, &model.JobMetricWithName{ + Name: name, + Metric: metric, + }) + } } return res, err From 30a436e27e0a1f5e4a2b443196c684429343bde6 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Mon, 20 Dec 2021 10:49:46 +0100 Subject: [PATCH 18/25] use token from cluster.json --- .env | 7 ------- metricdata/cc-metric-store.go | 12 +++--------- metricdata/metricdata.go | 4 ++-- server.go | 14 ++++++-------- 4 files changed, 11 insertions(+), 26 deletions(-) diff --git a/.env b/.env index 48076cd..0301d4b 100644 --- a/.env +++ b/.env @@ -1,11 +1,4 @@ - -export CCMETRICSTORE_URL="http://localhost:8081" -export CCMETRICSTORE_JWT="eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw" - -export INFLUXDB_V2_TOKEN="egLfcf7fx0FESqFYU3RpAAbj" - export JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0=" export JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q==" export SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629" - export LDAP_ADMIN_PASSWORD="mashup" diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 2602f3b..603c82f 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -6,9 +6,7 @@ import ( "encoding/json" "errors" "fmt" - "log" "net/http" - "os" "time" "github.com/ClusterCockpit/cc-jobarchive/config" @@ -46,13 +44,9 @@ type ApiStatsData struct { Max schema.Float `json:"max"` } -func (ccms *CCMetricStore) Init(url string) error { - ccms.url = url // os.Getenv("CCMETRICSTORE_URL") - ccms.jwt = os.Getenv("CCMETRICSTORE_JWT") - if ccms.jwt == "" { - log.Println("warning: environment variable 'CCMETRICSTORE_JWT' not set") - } - +func (ccms *CCMetricStore) Init(url, token string) error { + ccms.url = url + ccms.jwt = token return nil } diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index d066015..18f3aac 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -11,7 +11,7 @@ import ( type MetricDataRepository interface { // Initialize this MetricDataRepository. One instance of // this interface will only ever be responsible for one cluster. - Init(url string) error + Init(url, token string) error // Return the JobData for the given job, only with the requested metrics. LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) @@ -37,7 +37,7 @@ func Init(jobArchivePath string, disableArchive bool) error { switch cluster.MetricDataRepository.Kind { case "cc-metric-store": ccms := &CCMetricStore{} - if err := ccms.Init(cluster.MetricDataRepository.Url); err != nil { + if err := ccms.Init(cluster.MetricDataRepository.Url, cluster.MetricDataRepository.Token); err != nil { return err } metricDataRepos[cluster.Name] = ccms diff --git a/server.go b/server.go index 827b929..18f4ccc 100644 --- a/server.go +++ b/server.go @@ -16,6 +16,7 @@ import ( "github.com/ClusterCockpit/cc-jobarchive/graph" "github.com/ClusterCockpit/cc-jobarchive/graph/generated" "github.com/ClusterCockpit/cc-jobarchive/metricdata" + "github.com/ClusterCockpit/cc-jobarchive/schema" "github.com/ClusterCockpit/cc-jobarchive/templates" "github.com/gorilla/handlers" "github.com/gorilla/mux" @@ -264,19 +265,16 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { filterPresets := map[string]interface{}{} query := r.URL.Query() if query.Get("tag") != "" { - filterPresets["tagId"] = query.Get("tag") + filterPresets["tag"] = query.Get("tag") } if query.Get("cluster") != "" { - filterPresets["clusterId"] = query.Get("cluster") + filterPresets["cluster"] = query.Get("cluster") } if query.Get("project") != "" { - filterPresets["projectId"] = query.Get("project") + filterPresets["project"] = query.Get("project") } - if query.Get("running") == "true" { - filterPresets["isRunning"] = true - } - if query.Get("running") == "false" { - filterPresets["isRunning"] = false + if query.Get("state") != "" && schema.JobState(query.Get("state")).Valid() { + filterPresets["state"] = query.Get("state") } if query.Get("from") != "" && query.Get("to") != "" { filterPresets["startTime"] = map[string]string{ From 9d87e8874c7fc92c50484ac2b6712a05ea4a1d86 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Fri, 7 Jan 2022 09:39:00 +0100 Subject: [PATCH 19/25] slight change to job structure --- api/rest.go | 67 +++++++++++++++++++++++++++++++++++++++++++---- init-db.go | 13 ++++++--- schema/job.go | 46 ++++++++++++++++++-------------- schema/metrics.go | 43 +++++++++++++++++++++--------- server.go | 57 +++++++++++++++++++++++++--------------- 5 files changed, 163 insertions(+), 63 deletions(-) diff --git a/api/rest.go b/api/rest.go index 64b284c..dedb540 100644 --- a/api/rest.go +++ b/api/rest.go @@ -4,8 +4,12 @@ import ( "context" "encoding/json" "fmt" + "io" "log" "net/http" + "os" + "path/filepath" + "time" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph" @@ -17,9 +21,10 @@ import ( ) type RestApi struct { - DB *sqlx.DB - Resolver *graph.Resolver - AsyncArchiving bool + DB *sqlx.DB + Resolver *graph.Resolver + AsyncArchiving bool + MachineStateDir string } func (api *RestApi) MountRoutes(r *mux.Router) { @@ -29,6 +34,9 @@ func (api *RestApi) MountRoutes(r *mux.Router) { r.HandleFunc("/api/jobs/{id}", api.getJob).Methods(http.MethodGet) r.HandleFunc("/api/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + + r.HandleFunc("/api/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) + r.HandleFunc("/api/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost) } type StartJobApiRespone struct { @@ -150,12 +158,17 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - req.RawResources, err = json.Marshal(req.Resources) + job := schema.Job{ + BaseJob: req.BaseJob, + StartTime: time.Unix(req.StartTime, 0), + } + + job.RawResources, err = json.Marshal(req.Resources) if err != nil { log.Fatal(err) } - res, err := api.DB.NamedExec(schema.JobInsertStmt, req) + res, err := api.DB.NamedExec(schema.JobInsertStmt, job) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return @@ -278,3 +291,47 @@ func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) { } } } + +func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) { + if api.MachineStateDir == "" { + http.Error(rw, "not enabled", http.StatusNotFound) + return + } + + vars := mux.Vars(r) + cluster := vars["cluster"] + host := vars["host"] + dir := filepath.Join(api.MachineStateDir, cluster) + if err := os.MkdirAll(dir, 0755); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + filename := filepath.Join(dir, fmt.Sprintf("%s.json", host)) + f, err := os.Create(filename) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + defer f.Close() + + if _, err := io.Copy(f, r.Body); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + rw.WriteHeader(http.StatusCreated) +} + +func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) { + if api.MachineStateDir == "" { + http.Error(rw, "not enabled", http.StatusNotFound) + return + } + + vars := mux.Vars(r) + filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("%s.json", vars["host"])) + + // Sets the content-type and 'Last-Modified' Header and so on automatically + http.ServeFile(rw, r, filename) +} diff --git a/init-db.go b/init-db.go index 541c3d3..0c94fe4 100644 --- a/init-db.go +++ b/init-db.go @@ -185,14 +185,19 @@ func loadJob(tx *sqlx.Tx, stmt *sqlx.NamedStmt, tags map[string]int64, path stri } defer f.Close() - var job schema.JobMeta = schema.JobMeta{BaseJob: schema.JobDefaults} - if err := json.NewDecoder(bufio.NewReader(f)).Decode(&job); err != nil { + var jobMeta schema.JobMeta = schema.JobMeta{BaseJob: schema.JobDefaults} + if err := json.NewDecoder(bufio.NewReader(f)).Decode(&jobMeta); err != nil { return err } + job := schema.Job{ + BaseJob: jobMeta.BaseJob, + StartTime: time.Unix(jobMeta.StartTime, 0), + } + // TODO: Other metrics... - job.FlopsAnyAvg = loadJobStat(&job, "flops_any") - job.MemBwAvg = loadJobStat(&job, "mem_bw") + job.FlopsAnyAvg = loadJobStat(&jobMeta, "flops_any") + job.MemBwAvg = loadJobStat(&jobMeta, "mem_bw") job.RawResources, err = json.Marshal(job.Resources) if err != nil { diff --git a/schema/job.go b/schema/job.go index ae6bd40..d09fd67 100644 --- a/schema/job.go +++ b/schema/job.go @@ -8,6 +8,8 @@ import ( "time" ) +// Common subset of Job and JobMeta. Use one of those, not +// this type directly. type BaseJob struct { ID int64 `json:"id" db:"id"` JobID int64 `json:"jobId" db:"job_id"` @@ -25,23 +27,32 @@ type BaseJob struct { State JobState `json:"jobState" db:"job_state"` Duration int32 `json:"duration" db:"duration"` Tags []*Tag `json:"tags"` - RawResources []byte `json:"-" db:"resources"` - Resources []Resource `json:"resources"` + Resources []*Resource `json:"resources"` MetaData interface{} `json:"metaData" db:"meta_data"` - - MemUsedMax float64 `json:"-" db:"mem_used_max"` - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` - LoadAvg float64 `json:"-" db:"load_avg"` - NetBwAvg float64 `json:"-" db:"net_bw_avg"` - NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` - FileBwAvg float64 `json:"-" db:"file_bw_avg"` - FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` } +// This type is used as the GraphQL interface and using sqlx as a table row. +type Job struct { + BaseJob + RawResources []byte `json:"-" db:"resources"` + StartTime time.Time `json:"startTime" db:"start_time"` + MemUsedMax float64 `json:"-" db:"mem_used_max"` + FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` + MemBwAvg float64 `json:"-" db:"mem_bw_avg"` + LoadAvg float64 `json:"-" db:"load_avg"` + NetBwAvg float64 `json:"-" db:"net_bw_avg"` + NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` + FileBwAvg float64 `json:"-" db:"file_bw_avg"` + FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` +} + +// When reading from the database or sending data via GraphQL, the start time can be in the much more +// convenient time.Time type. In the `meta.json` files, the start time is encoded as a unix epoch timestamp. +// This is why there is this struct, which contains all fields from the regular job struct, but "overwrites" +// the StartTime field with one of type int64. type JobMeta struct { BaseJob - StartTime int64 `json:"startTime" db:"start_time"` + StartTime int64 `json:"startTime"` Statistics map[string]JobStatistics `json:"statistics,omitempty"` } @@ -52,9 +63,9 @@ var JobDefaults BaseJob = BaseJob{ } var JobColumns []string = []string{ - "id", "job_id", "user", "project", "cluster", "partition", "array_job_id", "num_nodes", - "num_hwthreads", "num_acc", "exclusive", "monitoring_status", "smt", "job_state", - "duration", "resources", "meta_data", + "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.start_time", "job.partition", "job.array_job_id", "job.num_nodes", + "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", + "job.duration", "job.resources", "job.meta_data", } const JobInsertStmt string = `INSERT INTO job ( @@ -67,11 +78,6 @@ const JobInsertStmt string = `INSERT INTO job ( :mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total );` -type Job struct { - BaseJob - StartTime time.Time `json:"startTime" db:"start_time"` -} - type Scannable interface { StructScan(dest interface{}) error } diff --git a/schema/metrics.go b/schema/metrics.go index 0186750..384f65d 100644 --- a/schema/metrics.go +++ b/schema/metrics.go @@ -5,14 +5,14 @@ import ( "io" ) -type JobData map[string]map[string]*JobMetric +type JobData map[string]map[MetricScope]*JobMetric type JobMetric struct { - Unit string `json:"unit"` - Scope MetricScope `json:"scope"` - Timestep int `json:"timestep"` - Series []Series `json:"series"` - StatsSeries *StatsSeries `json:"statisticsSeries,omitempty"` + Unit string `json:"unit"` + Scope MetricScope `json:"scope"` + Timestep int `json:"timestep"` + Series []Series `json:"series"` + StatisticsSeries *StatsSeries `json:"statisticsSeries"` } type Series struct { @@ -29,20 +29,37 @@ type MetricStatistics struct { } type StatsSeries struct { - Mean []Float `json:"mean,omitempty"` - Min []Float `json:"min,omitempty"` - Max []Float `json:"max,omitempty"` + Mean []Float `json:"mean"` + Min []Float `json:"min"` + Max []Float `json:"max"` Percentiles map[int][]Float `json:"percentiles,omitempty"` } type MetricScope string const ( - MetricScopeNode MetricScope = "node" - MetricScopeSocket MetricScope = "socket" - MetricScopeCpu MetricScope = "cpu" + MetricScopeNode MetricScope = "node" + MetricScopeSocket MetricScope = "socket" + MetricScopeCpu MetricScope = "cpu" + MetricScopeHWThread MetricScope = "hwthread" ) +var metricScopeGranularity map[MetricScope]int = map[MetricScope]int{ + MetricScopeNode: 1, + MetricScopeSocket: 2, + MetricScopeCpu: 3, + MetricScopeHWThread: 4, +} + +func (e *MetricScope) MaxGranularity(other MetricScope) MetricScope { + a := metricScopeGranularity[*e] + b := metricScopeGranularity[other] + if a < b { + return *e + } + return other +} + func (e *MetricScope) UnmarshalGQL(v interface{}) error { str, ok := v.(string) if !ok { @@ -50,7 +67,7 @@ func (e *MetricScope) UnmarshalGQL(v interface{}) error { } *e = MetricScope(str) - if *e != "node" && *e != "socket" && *e != "cpu" { + if _, ok := metricScopeGranularity[*e]; !ok { return fmt.Errorf("%s is not a valid MetricScope", str) } return nil diff --git a/server.go b/server.go index 18f4ccc..9f76380 100644 --- a/server.go +++ b/server.go @@ -6,7 +6,9 @@ import ( "fmt" "log" "net/http" + "net/url" "os" + "strconv" "github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/playground" @@ -60,6 +62,9 @@ type ProgramConfig struct { // If overwriten, at least all the options in the defaults below must // be provided! Most options here can be overwritten by the user. UiDefaults map[string]interface{} `json:"ui-defaults"` + + // Where to store MachineState files + MachineStateDir string `json:"machine-state-dir"` } var programConfig ProgramConfig = ProgramConfig{ @@ -95,6 +100,7 @@ var programConfig ProgramConfig = ProgramConfig{ "plot_view_showRoofline": true, "plot_view_showStatTable": true, }, + MachineStateDir: "./var/machine-state", } func main() { @@ -178,8 +184,10 @@ func main() { graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) graphQLPlayground := playground.Handler("GraphQL playground", "/query") api := &api.RestApi{ - DB: db, - AsyncArchiving: programConfig.AsyncArchiving, + DB: db, + AsyncArchiving: programConfig.AsyncArchiving, + Resolver: resolver, + MachineStateDir: programConfig.MachineStateDir, } handleGetLogin := func(rw http.ResponseWriter, r *http.Request) { @@ -255,18 +263,9 @@ func main() { } func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { - router.HandleFunc("/monitoring/jobs/", func(rw http.ResponseWriter, r *http.Request) { - conf, err := config.GetUIConfig(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - + buildFilterPresets := func(query url.Values) map[string]interface{} { filterPresets := map[string]interface{}{} - query := r.URL.Query() - if query.Get("tag") != "" { - filterPresets["tag"] = query.Get("tag") - } + if query.Get("cluster") != "" { filterPresets["cluster"] = query.Get("cluster") } @@ -276,17 +275,32 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { if query.Get("state") != "" && schema.JobState(query.Get("state")).Valid() { filterPresets["state"] = query.Get("state") } - if query.Get("from") != "" && query.Get("to") != "" { - filterPresets["startTime"] = map[string]string{ - "from": query.Get("from"), - "to": query.Get("to"), + if rawtags, ok := query["tag"]; ok { + tags := make([]int, len(rawtags)) + for i, tid := range rawtags { + var err error + tags[i], err = strconv.Atoi(tid) + if err != nil { + tags[i] = -1 + } } + filterPresets["tags"] = tags + } + + return filterPresets + } + + router.HandleFunc("/monitoring/jobs/", func(rw http.ResponseWriter, r *http.Request) { + conf, err := config.GetUIConfig(r) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return } templates.Render(rw, r, "monitoring/jobs/", &templates.Page{ Title: "Jobs - ClusterCockpit", Config: conf, - FilterPresets: filterPresets, + FilterPresets: buildFilterPresets(r.URL.Query()), }) }) @@ -340,9 +354,10 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { // is disabled or the user does not exist but has started jobs. templates.Render(rw, r, "monitoring/user/", &templates.Page{ - Title: fmt.Sprintf("User %s - ClusterCockpit", id), - Config: conf, - Infos: map[string]interface{}{"userId": id}, + Title: fmt.Sprintf("User %s - ClusterCockpit", id), + Config: conf, + Infos: map[string]interface{}{"username": id}, + FilterPresets: buildFilterPresets(r.URL.Query()), }) }) From 3f88e512f0ebb4c6908a34efb035452c84e4b385 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Fri, 7 Jan 2022 09:44:34 +0100 Subject: [PATCH 20/25] fixes in the GraphQL schema --- graph/generated/generated.go | 635 +++++++++++++++-------------------- graph/model/models_gen.go | 23 +- graph/resolver.go | 22 +- graph/schema.graphqls | 16 +- graph/schema.resolvers.go | 15 +- graph/stats.go | 2 +- 6 files changed, 307 insertions(+), 406 deletions(-) diff --git a/graph/generated/generated.go b/graph/generated/generated.go index ba23a9e..04ccb0e 100644 --- a/graph/generated/generated.go +++ b/graph/generated/generated.go @@ -38,7 +38,6 @@ type Config struct { type ResolverRoot interface { Job() JobResolver - JobMetric() JobMetricResolver Mutation() MutationResolver Query() QueryResolver } @@ -110,13 +109,6 @@ type ComplexityRoot struct { Name func(childComplexity int) int } - JobResource struct { - Accelerators func(childComplexity int) int - Configuration func(childComplexity int) int - Hostname func(childComplexity int) int - Hwthreads func(childComplexity int) int - } - JobResultList struct { Count func(childComplexity int) int Items func(childComplexity int) int @@ -198,6 +190,13 @@ type ComplexityRoot struct { Tags func(childComplexity int) int } + Resource struct { + Accelerators func(childComplexity int) int + Configuration func(childComplexity int) int + HWThreads func(childComplexity int) int + Hostname func(childComplexity int) int + } + Series struct { Data func(childComplexity int) int Hostname func(childComplexity int) int @@ -234,10 +233,6 @@ type ComplexityRoot struct { type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) - Resources(ctx context.Context, obj *schema.Job) ([]*model.JobResource, error) -} -type JobMetricResolver interface { - StatisticsSeries(ctx context.Context, obj *schema.JobMetric) ([]*schema.StatsSeries, error) } type MutationResolver interface { CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) @@ -546,34 +541,6 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobMetricWithName.Name(childComplexity), true - case "JobResource.accelerators": - if e.complexity.JobResource.Accelerators == nil { - break - } - - return e.complexity.JobResource.Accelerators(childComplexity), true - - case "JobResource.configuration": - if e.complexity.JobResource.Configuration == nil { - break - } - - return e.complexity.JobResource.Configuration(childComplexity), true - - case "JobResource.hostname": - if e.complexity.JobResource.Hostname == nil { - break - } - - return e.complexity.JobResource.Hostname(childComplexity), true - - case "JobResource.hwthreads": - if e.complexity.JobResource.Hwthreads == nil { - break - } - - return e.complexity.JobResource.Hwthreads(childComplexity), true - case "JobResultList.count": if e.complexity.JobResultList.Count == nil { break @@ -991,6 +958,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Query.Tags(childComplexity), true + case "Resource.accelerators": + if e.complexity.Resource.Accelerators == nil { + break + } + + return e.complexity.Resource.Accelerators(childComplexity), true + + case "Resource.configuration": + if e.complexity.Resource.Configuration == nil { + break + } + + return e.complexity.Resource.Configuration(childComplexity), true + + case "Resource.hwthreads": + if e.complexity.Resource.HWThreads == nil { + break + } + + return e.complexity.Resource.HWThreads(childComplexity), true + + case "Resource.hostname": + if e.complexity.Resource.Hostname == nil { + break + } + + return e.complexity.Resource.Hostname(childComplexity), true + case "Series.data": if e.complexity.Series.Data == nil { break @@ -1204,7 +1199,7 @@ type Job { monitoringStatus: Int! state: JobState! tags: [Tag!]! - resources: [JobResource!]! + resources: [Resource!]! } type Cluster { @@ -1244,7 +1239,7 @@ type Accelerator { type MetricConfig { name: String! unit: String! - scope: String! + scope: MetricScope! timestep: Int! peak: Float! normal: Float! @@ -1258,7 +1253,7 @@ type Tag { name: String! } -type JobResource { +type Resource { hostname: String! hwthreads: [Int!] accelerators: [Int!] @@ -1274,8 +1269,8 @@ type JobMetric { unit: String! scope: MetricScope! timestep: Int! - series: [Series!]! - statisticsSeries: [StatsSeries!] + series: [Series!] + statisticsSeries: StatsSeries } type Series { @@ -1292,9 +1287,9 @@ type MetricStatistics { } type StatsSeries { - mean: [NullableFloat!] - min: [NullableFloat!] - max: [NullableFloat!] + mean: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! } type MetricFootprints { @@ -2928,14 +2923,14 @@ func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.Co Object: "Job", Field: field, Args: nil, - IsMethod: true, - IsResolver: true, + IsMethod: false, + IsResolver: false, } ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Job().Resources(rctx, obj) + return obj.Resources, nil }) if err != nil { ec.Error(ctx, err) @@ -2947,9 +2942,9 @@ func (ec *executionContext) _Job_resources(ctx context.Context, field graphql.Co } return graphql.Null } - res := resTmp.([]*model.JobResource) + res := resTmp.([]*schema.Resource) fc.Result = res - return ec.marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResourceᚄ(ctx, field.Selections, res) + return ec.marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐResourceᚄ(ctx, field.Selections, res) } func (ec *executionContext) _JobMetric_unit(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { @@ -3082,14 +3077,11 @@ func (ec *executionContext) _JobMetric_series(ctx context.Context, field graphql return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } res := resTmp.([]schema.Series) fc.Result = res - return ec.marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) + return ec.marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx, field.Selections, res) } func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, field graphql.CollectedField, obj *schema.JobMetric) (ret graphql.Marshaler) { @@ -3103,14 +3095,14 @@ func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, fie Object: "JobMetric", Field: field, Args: nil, - IsMethod: true, - IsResolver: true, + IsMethod: false, + IsResolver: false, } ctx = graphql.WithFieldContext(ctx, fc) resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.JobMetric().StatisticsSeries(rctx, obj) + return obj.StatisticsSeries, nil }) if err != nil { ec.Error(ctx, err) @@ -3119,9 +3111,9 @@ func (ec *executionContext) _JobMetric_statisticsSeries(ctx context.Context, fie if resTmp == nil { return graphql.Null } - res := resTmp.([]*schema.StatsSeries) + res := resTmp.(*schema.StatsSeries) fc.Result = res - return ec.marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeriesᚄ(ctx, field.Selections, res) + return ec.marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx, field.Selections, res) } func (ec *executionContext) _JobMetricWithName_name(ctx context.Context, field graphql.CollectedField, obj *model.JobMetricWithName) (ret graphql.Marshaler) { @@ -3194,137 +3186,6 @@ func (ec *executionContext) _JobMetricWithName_metric(ctx context.Context, field return ec.marshalNJobMetric2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐJobMetric(ctx, field.Selections, res) } -func (ec *executionContext) _JobResource_hostname(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobResource", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Hostname, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - res := resTmp.(string) - fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobResource_hwthreads(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobResource", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Hwthreads, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.([]int) - fc.Result = res - return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobResource_accelerators(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobResource", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Accelerators, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.([]int) - fc.Result = res - return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) -} - -func (ec *executionContext) _JobResource_configuration(ctx context.Context, field graphql.CollectedField, obj *model.JobResource) (ret graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - fc := &graphql.FieldContext{ - Object: "JobResource", - Field: field, - Args: nil, - IsMethod: false, - IsResolver: false, - } - - ctx = graphql.WithFieldContext(ctx, fc) - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { - ctx = rctx // use context from middleware stack in children - return obj.Configuration, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - return graphql.Null - } - res := resTmp.(*string) - fc.Result = res - return ec.marshalOString2ᚖstring(ctx, field.Selections, res) -} - func (ec *executionContext) _JobResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -3801,9 +3662,9 @@ func (ec *executionContext) _MetricConfig_scope(ctx context.Context, field graph } return graphql.Null } - res := resTmp.(string) + res := resTmp.(schema.MetricScope) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐMetricScope(ctx, field.Selections, res) } func (ec *executionContext) _MetricConfig_timestep(ctx context.Context, field graphql.CollectedField, obj *model.MetricConfig) (ret graphql.Marshaler) { @@ -5250,6 +5111,137 @@ func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.C return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) } +func (ec *executionContext) _Resource_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Resource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Resource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Hostname, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) _Resource_hwthreads(ctx context.Context, field graphql.CollectedField, obj *schema.Resource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Resource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HWThreads, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]int) + fc.Result = res + return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Resource_accelerators(ctx context.Context, field graphql.CollectedField, obj *schema.Resource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Resource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Accelerators, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]int) + fc.Result = res + return ec.marshalOInt2ᚕintᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) _Resource_configuration(ctx context.Context, field graphql.CollectedField, obj *schema.Resource) (ret graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + fc := &graphql.FieldContext{ + Object: "Resource", + Field: field, + Args: nil, + IsMethod: false, + IsResolver: false, + } + + ctx = graphql.WithFieldContext(ctx, fc) + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Configuration, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalOString2string(ctx, field.Selections, res) +} + func (ec *executionContext) _Series_hostname(ctx context.Context, field graphql.CollectedField, obj *schema.Series) (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { @@ -5409,11 +5401,14 @@ func (ec *executionContext) _StatsSeries_mean(ctx context.Context, field graphql return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { @@ -5441,11 +5436,14 @@ func (ec *executionContext) _StatsSeries_min(ctx context.Context, field graphql. return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql.CollectedField, obj *schema.StatsSeries) (ret graphql.Marshaler) { @@ -5473,11 +5471,14 @@ func (ec *executionContext) _StatsSeries_max(ctx context.Context, field graphql. return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } res := resTmp.([]schema.Float) fc.Result = res - return ec.marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) + return ec.marshalNNullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx, field.Selections, res) } func (ec *executionContext) _Tag_id(ctx context.Context, field graphql.CollectedField, obj *schema.Tag) (ret graphql.Marshaler) { @@ -7532,19 +7533,10 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj return res }) case "resources": - field := field - out.Concurrently(i, func() (res graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - } - }() - res = ec._Job_resources(ctx, field, obj) - if res == graphql.Null { - atomic.AddUint32(&invalids, 1) - } - return res - }) + out.Values[i] = ec._Job_resources(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&invalids, 1) + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7570,34 +7562,22 @@ func (ec *executionContext) _JobMetric(ctx context.Context, sel ast.SelectionSet case "unit": out.Values[i] = ec._JobMetric_unit(ctx, field, obj) if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + invalids++ } case "scope": out.Values[i] = ec._JobMetric_scope(ctx, field, obj) if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + invalids++ } case "timestep": out.Values[i] = ec._JobMetric_timestep(ctx, field, obj) if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) + invalids++ } case "series": out.Values[i] = ec._JobMetric_series(ctx, field, obj) - if out.Values[i] == graphql.Null { - atomic.AddUint32(&invalids, 1) - } case "statisticsSeries": - field := field - out.Concurrently(i, func() (res graphql.Marshaler) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - } - }() - res = ec._JobMetric_statisticsSeries(ctx, field, obj) - return res - }) + out.Values[i] = ec._JobMetric_statisticsSeries(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7641,39 +7621,6 @@ func (ec *executionContext) _JobMetricWithName(ctx context.Context, sel ast.Sele return out } -var jobResourceImplementors = []string{"JobResource"} - -func (ec *executionContext) _JobResource(ctx context.Context, sel ast.SelectionSet, obj *model.JobResource) graphql.Marshaler { - fields := graphql.CollectFields(ec.OperationContext, sel, jobResourceImplementors) - - out := graphql.NewFieldSet(fields) - var invalids uint32 - for i, field := range fields { - switch field.Name { - case "__typename": - out.Values[i] = graphql.MarshalString("JobResource") - case "hostname": - out.Values[i] = ec._JobResource_hostname(ctx, field, obj) - if out.Values[i] == graphql.Null { - invalids++ - } - case "hwthreads": - out.Values[i] = ec._JobResource_hwthreads(ctx, field, obj) - case "accelerators": - out.Values[i] = ec._JobResource_accelerators(ctx, field, obj) - case "configuration": - out.Values[i] = ec._JobResource_configuration(ctx, field, obj) - default: - panic("unknown field " + strconv.Quote(field.Name)) - } - } - out.Dispatch() - if invalids > 0 { - return graphql.Null - } - return out -} - var jobResultListImplementors = []string{"JobResultList"} func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.SelectionSet, obj *model.JobResultList) graphql.Marshaler { @@ -8227,6 +8174,39 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr return out } +var resourceImplementors = []string{"Resource"} + +func (ec *executionContext) _Resource(ctx context.Context, sel ast.SelectionSet, obj *schema.Resource) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, resourceImplementors) + + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Resource") + case "hostname": + out.Values[i] = ec._Resource_hostname(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } + case "hwthreads": + out.Values[i] = ec._Resource_hwthreads(ctx, field, obj) + case "accelerators": + out.Values[i] = ec._Resource_accelerators(ctx, field, obj) + case "configuration": + out.Values[i] = ec._Resource_configuration(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var seriesImplementors = []string{"Series"} func (ec *executionContext) _Series(ctx context.Context, sel ast.SelectionSet, obj *schema.Series) graphql.Marshaler { @@ -8276,10 +8256,19 @@ func (ec *executionContext) _StatsSeries(ctx context.Context, sel ast.SelectionS out.Values[i] = graphql.MarshalString("StatsSeries") case "mean": out.Values[i] = ec._StatsSeries_mean(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } case "min": out.Values[i] = ec._StatsSeries_min(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } case "max": out.Values[i] = ec._StatsSeries_max(ctx, field, obj) + if out.Values[i] == graphql.Null { + invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -9118,53 +9107,6 @@ func (ec *executionContext) marshalNJobMetricWithName2ᚖgithubᚗcomᚋClusterC return ec._JobMetricWithName(ctx, sel, v) } -func (ec *executionContext) marshalNJobResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.JobResource) graphql.Marshaler { - ret := make(graphql.Array, len(v)) - var wg sync.WaitGroup - isLen1 := len(v) == 1 - if !isLen1 { - wg.Add(len(v)) - } - for i := range v { - i := i - fc := &graphql.FieldContext{ - Index: &i, - Result: &v[i], - } - ctx := graphql.WithFieldContext(ctx, fc) - f := func(i int) { - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil - } - }() - if !isLen1 { - defer wg.Done() - } - ret[i] = ec.marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResource(ctx, sel, v[i]) - } - if isLen1 { - f(i) - } else { - go f(i) - } - - } - wg.Wait() - return ret -} - -func (ec *executionContext) marshalNJobResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResource(ctx context.Context, sel ast.SelectionSet, v *model.JobResource) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - return ec._JobResource(ctx, sel, v) -} - func (ec *executionContext) marshalNJobResultList2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐJobResultList(ctx context.Context, sel ast.SelectionSet, v model.JobResultList) graphql.Marshaler { return ec._JobResultList(ctx, sel, &v) } @@ -9511,11 +9453,7 @@ func (ec *executionContext) marshalNPartition2ᚖgithubᚗcomᚋClusterCockpit return ec._Partition(ctx, sel, v) } -func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { - return ec._Series(ctx, sel, &v) -} - -func (ec *executionContext) marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler { +func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.Resource) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup isLen1 := len(v) == 1 @@ -9539,7 +9477,7 @@ func (ec *executionContext) marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋcc if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx, sel, v[i]) + ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐResource(ctx, sel, v[i]) } if isLen1 { f(i) @@ -9552,6 +9490,20 @@ func (ec *executionContext) marshalNSeries2ᚕgithubᚗcomᚋClusterCockpitᚋcc return ret } +func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐResource(ctx context.Context, sel ast.SelectionSet, v *schema.Resource) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + return ec._Resource(ctx, sel, v) +} + +func (ec *executionContext) marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx context.Context, sel ast.SelectionSet, v schema.Series) graphql.Marshaler { + return ec._Series(ctx, sel, &v) +} + func (ec *executionContext) unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐSortDirectionEnum(ctx context.Context, v interface{}) (model.SortDirectionEnum, error) { var res model.SortDirectionEnum err := res.UnmarshalGQL(v) @@ -9562,16 +9514,6 @@ func (ec *executionContext) marshalNSortDirectionEnum2githubᚗcomᚋClusterCock return v } -func (ec *executionContext) marshalNStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { - if v == nil { - if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null - } - return ec._StatsSeries(ctx, sel, v) -} - func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) @@ -10268,42 +10210,6 @@ func (ec *executionContext) marshalOMetricStatistics2ᚖgithubᚗcomᚋClusterCo return ec._MetricStatistics(ctx, sel, v) } -func (ec *executionContext) unmarshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx context.Context, v interface{}) ([]schema.Float, error) { - if v == nil { - return nil, nil - } - var vSlice []interface{} - if v != nil { - if tmp1, ok := v.([]interface{}); ok { - vSlice = tmp1 - } else { - vSlice = []interface{}{v} - } - } - var err error - res := make([]schema.Float, len(vSlice)) - for i := range vSlice { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) - res[i], err = ec.unmarshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloat(ctx, vSlice[i]) - if err != nil { - return nil, err - } - } - return res, nil -} - -func (ec *executionContext) marshalONullableFloat2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloatᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Float) graphql.Marshaler { - if v == nil { - return graphql.Null - } - ret := make(graphql.Array, len(v)) - for i := range v { - ret[i] = ec.marshalNNullableFloat2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐFloat(ctx, sel, v[i]) - } - - return ret -} - func (ec *executionContext) unmarshalOOrderByInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋgraphᚋmodelᚐOrderByInput(ctx context.Context, v interface{}) (*model.OrderByInput, error) { if v == nil { return nil, nil @@ -10320,7 +10226,7 @@ func (ec *executionContext) unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockp return &res, graphql.ErrorOnPath(ctx, err) } -func (ec *executionContext) marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []*schema.StatsSeries) graphql.Marshaler { +func (ec *executionContext) marshalOSeries2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeriesᚄ(ctx context.Context, sel ast.SelectionSet, v []schema.Series) graphql.Marshaler { if v == nil { return graphql.Null } @@ -10347,7 +10253,7 @@ func (ec *executionContext) marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCock if !isLen1 { defer wg.Done() } - ret[i] = ec.marshalNStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx, sel, v[i]) + ret[i] = ec.marshalNSeries2githubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐSeries(ctx, sel, v[i]) } if isLen1 { f(i) @@ -10360,6 +10266,13 @@ func (ec *executionContext) marshalOStatsSeries2ᚕᚖgithubᚗcomᚋClusterCock return ret } +func (ec *executionContext) marshalOStatsSeries2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑjobarchiveᚋschemaᚐStatsSeries(ctx context.Context, sel ast.SelectionSet, v *schema.StatsSeries) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._StatsSeries(ctx, sel, v) +} + func (ec *executionContext) unmarshalOString2string(ctx context.Context, v interface{}) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) diff --git a/graph/model/models_gen.go b/graph/model/models_gen.go index 1db1574..dfc23fc 100644 --- a/graph/model/models_gen.go +++ b/graph/model/models_gen.go @@ -64,13 +64,6 @@ type JobMetricWithName struct { Metric *schema.JobMetric `json:"metric"` } -type JobResource struct { - Hostname string `json:"hostname"` - Hwthreads []int `json:"hwthreads"` - Accelerators []int `json:"accelerators"` - Configuration *string `json:"configuration"` -} - type JobResultList struct { Items []*schema.Job `json:"items"` Offset *int `json:"offset"` @@ -89,14 +82,14 @@ type JobsStatistics struct { } type MetricConfig struct { - Name string `json:"name"` - Unit string `json:"unit"` - Scope string `json:"scope"` - Timestep int `json:"timestep"` - Peak float64 `json:"peak"` - Normal float64 `json:"normal"` - Caution float64 `json:"caution"` - Alert float64 `json:"alert"` + Name string `json:"name"` + Unit string `json:"unit"` + Scope schema.MetricScope `json:"scope"` + Timestep int `json:"timestep"` + Peak float64 `json:"peak"` + Normal float64 `json:"normal"` + Caution float64 `json:"caution"` + Alert float64 `json:"alert"` } type MetricFootprints struct { diff --git a/graph/resolver.go b/graph/resolver.go index 2804bd9..ee90752 100644 --- a/graph/resolver.go +++ b/graph/resolver.go @@ -98,7 +98,7 @@ func securityCheck(ctx context.Context, query sq.SelectBuilder) sq.SelectBuilder // Build a sq.SelectBuilder out of a schema.JobFilter. func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { if filter.Tags != nil { - query = query.Join("jobtag ON jobtag.job_id = job.id").Where("jobtag.tag_id IN ?", filter.Tags) + query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}) } if filter.JobID != nil { query = buildStringCondition("job.job_id", filter.JobID, query) @@ -119,7 +119,12 @@ func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select query = buildIntCondition("job.duration", filter.Duration, query) } if filter.State != nil { - query = query.Where("job.job_state IN ?", filter.State) + states := make([]string, len(filter.State)) + for i, val := range filter.State { + states[i] = string(val) + } + + query = query.Where(sq.Eq{"job.job_state": states}) } if filter.NumNodes != nil { query = buildIntCondition("job.num_nodes", filter.NumNodes, query) @@ -164,20 +169,23 @@ func buildStringCondition(field string, cond *model.StringInput, query sq.Select return query.Where(field+" = ?", *cond.Eq) } if cond.StartsWith != nil { - return query.Where(field+"LIKE ?", fmt.Sprint(*cond.StartsWith, "%")) + return query.Where(field+" LIKE ?", fmt.Sprint(*cond.StartsWith, "%")) } if cond.EndsWith != nil { - return query.Where(field+"LIKE ?", fmt.Sprint("%", *cond.StartsWith)) + return query.Where(field+" LIKE ?", fmt.Sprint("%", *cond.EndsWith)) } if cond.Contains != nil { - return query.Where(field+"LIKE ?", fmt.Sprint("%", *cond.StartsWith, "%")) + return query.Where(field+" LIKE ?", fmt.Sprint("%", *cond.Contains, "%")) } return query } +var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + func toSnakeCase(str string) string { - matchFirstCap := regexp.MustCompile("(.)([A-Z][a-z]+)") - matchAllCap := regexp.MustCompile("([a-z0-9])([A-Z])") + str = strings.ReplaceAll(str, "'", "") + str = strings.ReplaceAll(str, "\\", "") snake := matchFirstCap.ReplaceAllString(str, "${1}_${2}") snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") return strings.ToLower(snake) diff --git a/graph/schema.graphqls b/graph/schema.graphqls index e86d71a..26a8821 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -21,7 +21,7 @@ type Job { monitoringStatus: Int! state: JobState! tags: [Tag!]! - resources: [JobResource!]! + resources: [Resource!]! } type Cluster { @@ -61,7 +61,7 @@ type Accelerator { type MetricConfig { name: String! unit: String! - scope: String! + scope: MetricScope! timestep: Int! peak: Float! normal: Float! @@ -75,7 +75,7 @@ type Tag { name: String! } -type JobResource { +type Resource { hostname: String! hwthreads: [Int!] accelerators: [Int!] @@ -91,8 +91,8 @@ type JobMetric { unit: String! scope: MetricScope! timestep: Int! - series: [Series!]! - statisticsSeries: [StatsSeries!] + series: [Series!] + statisticsSeries: StatsSeries } type Series { @@ -109,9 +109,9 @@ type MetricStatistics { } type StatsSeries { - mean: [NullableFloat!] - min: [NullableFloat!] - max: [NullableFloat!] + mean: [NullableFloat!]! + min: [NullableFloat!]! + max: [NullableFloat!]! } type MetricFootprints { diff --git a/graph/schema.resolvers.go b/graph/schema.resolvers.go index 43ae37c..20ba974 100644 --- a/graph/schema.resolvers.go +++ b/graph/schema.resolvers.go @@ -39,14 +39,6 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, return tags, nil } -func (r *jobResolver) Resources(ctx context.Context, obj *schema.Job) ([]*model.JobResource, error) { - panic(fmt.Errorf("not implemented")) -} - -func (r *jobMetricResolver) StatisticsSeries(ctx context.Context, obj *schema.JobMetric) ([]*schema.StatsSeries, error) { - panic(fmt.Errorf("not implemented")) -} - func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) { res, err := r.DB.Exec("INSERT INTO tag (tag_type, tag_name) VALUES ($1, $2)", typeArg, name) if err != nil { @@ -172,8 +164,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str return nil, err } - // TODO: FIXME: Do something with `scopes` - data, err := metricdata.LoadData(job, metrics, ctx) + data, err := metricdata.LoadData(job, metrics, scopes, ctx) if err != nil { return nil, err } @@ -249,9 +240,6 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [ // Job returns generated.JobResolver implementation. func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} } -// JobMetric returns generated.JobMetricResolver implementation. -func (r *Resolver) JobMetric() generated.JobMetricResolver { return &jobMetricResolver{r} } - // Mutation returns generated.MutationResolver implementation. func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} } @@ -259,6 +247,5 @@ func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResol func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } type jobResolver struct{ *Resolver } -type jobMetricResolver struct{ *Resolver } type mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver } diff --git a/graph/stats.go b/graph/stats.go index 2ad5aea..2bb0505 100644 --- a/graph/stats.go +++ b/graph/stats.go @@ -206,7 +206,7 @@ func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilte } for _, job := range jobs { - jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, ctx) + jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx) if err != nil { return nil, err } From e581bfc70f50550d6f938150baad8bc86aa31091 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Fri, 7 Jan 2022 09:47:41 +0100 Subject: [PATCH 21/25] start work on supporting metrics with a scope of hwthread --- config/config.go | 13 +++ metricdata/archive.go | 57 +++++++++- metricdata/cc-metric-store.go | 202 +++++++++++++++++++++++++++------- metricdata/metricdata.go | 6 +- 4 files changed, 235 insertions(+), 43 deletions(-) diff --git a/config/config.go b/config/config.go index e4011ac..e35caaa 100644 --- a/config/config.go +++ b/config/config.go @@ -156,6 +156,19 @@ func GetClusterConfig(cluster string) *model.Cluster { return nil } +func GetPartition(cluster, partition string) *model.Partition { + for _, c := range Clusters { + if c.Name == cluster { + for _, p := range c.Partitions { + if p.Name == partition { + return p + } + } + } + } + return nil +} + func GetMetricConfig(cluster, metric string) *model.MetricConfig { for _, c := range Clusters { if c.Name == cluster { diff --git a/metricdata/archive.go b/metricdata/archive.go index def080a..4ef6e6c 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -136,11 +136,18 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { for _, mc := range metricConfigs { allMetrics = append(allMetrics, mc.Name) } - jobData, err := LoadData(job, allMetrics, ctx) + + // TODO: Use more granular resolution on non-exclusive jobs? + scopes := []schema.MetricScope{schema.MetricScopeNode} + jobData, err := LoadData(job, allMetrics, scopes, ctx) if err != nil { return nil, err } + if err := calcStatisticsSeries(job, jobData); err != nil { + return nil, err + } + jobMeta := &schema.JobMeta{ BaseJob: job.BaseJob, StartTime: job.StartTime.Unix(), @@ -212,3 +219,51 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { return jobMeta, f.Close() } + +// Add statisticsSeries fields +func calcStatisticsSeries(job *schema.Job, jobData schema.JobData) error { + for _, scopes := range jobData { + for _, jobMetric := range scopes { + if jobMetric.StatisticsSeries != nil { + continue + } + + if len(jobMetric.Series) < 5 { + continue + } + + n := 0 + for _, series := range jobMetric.Series { + if len(series.Data) > n { + n = len(series.Data) + } + } + + mean, min, max := make([]schema.Float, n), make([]schema.Float, n), make([]schema.Float, n) + for i := 0; i < n; i++ { + sum, smin, smax := schema.Float(0.), math.MaxFloat32, -math.MaxFloat32 + for _, series := range jobMetric.Series { + if len(series.Data) >= i { + sum, smin, smax = schema.NaN, math.NaN(), math.NaN() + break + } + x := series.Data[i] + sum += x + smin = math.Min(smin, float64(x)) + smax = math.Max(smax, float64(x)) + } + sum /= schema.Float(len(jobMetric.Series)) + mean[i] = sum + min[i] = schema.Float(smin) + max[i] = schema.Float(smax) + } + + jobMetric.StatisticsSeries.Mean = mean + jobMetric.StatisticsSeries.Min = min + jobMetric.StatisticsSeries.Max = max + jobMetric.Series = nil + } + } + + return nil +} diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 603c82f..62451c9 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -1,12 +1,14 @@ package metricdata import ( + "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "net/http" + "strconv" "time" "github.com/ClusterCockpit/cc-jobarchive/config" @@ -29,9 +31,9 @@ type ApiMetricData struct { From int64 `json:"from"` To int64 `json:"to"` Data []schema.Float `json:"data"` - Avg *float64 `json:"avg"` - Min *float64 `json:"min"` - Max *float64 `json:"max"` + Avg schema.Float `json:"avg"` + Min schema.Float `json:"min"` + Max schema.Float `json:"max"` } type ApiStatsData struct { @@ -78,53 +80,175 @@ func (ccms *CCMetricStore) doRequest(job *schema.Job, suffix string, metrics []s return ccms.client.Do(req) } -func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) { - res, err := ccms.doRequest(job, "timeseries?with-stats=true", metrics, ctx) - if err != nil { +func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { + + type ApiQuery struct { + Metric string `json:"metric"` + Hostname string `json:"hostname"` + Type *string `json:"type,omitempty"` + TypeIds []string `json:"type-ids,omitempty"` + SubType *string `json:"subtype,omitempty"` + SubTypeIds []string `json:"subtype-ids,omitempty"` + } + + type ApiQueryRequest struct { + Cluster string `json:"cluster"` + From int64 `json:"from"` + To int64 `json:"to"` + Queries []ApiQuery `json:"queries"` + } + + type ApiQueryResponse struct { + ApiMetricData + Query *ApiQuery `json:"query"` + } + + reqBody := ApiQueryRequest{ + Cluster: job.Cluster, + From: job.StartTime.Unix(), + To: job.StartTime.Add(time.Duration(job.Duration)).Unix(), + Queries: make([]ApiQuery, 0), + } + + if len(scopes) != 1 { + return nil, errors.New("todo: support more than one scope in a query") + } + + topology := config.GetPartition(job.Cluster, job.Partition).Topology + scopeForMetric := map[string]schema.MetricScope{} + for _, metric := range metrics { + mc := config.GetMetricConfig(job.Cluster, metric) + nativeScope, requestedScope := mc.Scope, scopes[0] + + // case 1: A metric is requested at node scope with a native scope of node as well + // case 2: A metric is requested at node scope and node is exclusive + if (nativeScope == requestedScope && nativeScope == schema.MetricScopeNode) || + (job.Exclusive == 1 && requestedScope == schema.MetricScopeNode) { + nodes := map[string]bool{} + for _, resource := range job.Resources { + nodes[resource.Hostname] = true + } + + for node := range nodes { + reqBody.Queries = append(reqBody.Queries, ApiQuery{ + Metric: metric, + Hostname: node, + }) + } + + scopeForMetric[metric] = schema.MetricScopeNode + continue + } + + // case: Read a metric at hwthread scope with native scope hwthread + if nativeScope == requestedScope && nativeScope == schema.MetricScopeHWThread && job.NumNodes == 1 { + hwthreads := job.Resources[0].HWThreads + if hwthreads == nil { + hwthreads = topology.Node + } + + t := "cpu" // TODO/FIXME: inconsistency between cc-metric-collector and ClusterCockpit + for _, hwthread := range hwthreads { + reqBody.Queries = append(reqBody.Queries, ApiQuery{ + Metric: metric, + Hostname: job.Resources[0].Hostname, + Type: &t, + TypeIds: []string{strconv.Itoa(hwthread)}, + }) + } + + scopeForMetric[metric] = schema.MetricScopeHWThread + continue + } + + // case: A metric is requested at node scope, has a hwthread scope and node is not exclusive and runs on a single node + if requestedScope == schema.MetricScopeNode && nativeScope == schema.MetricScopeHWThread && job.Exclusive != 1 && job.NumNodes == 1 { + hwthreads := job.Resources[0].HWThreads + if hwthreads == nil { + hwthreads = topology.Node + } + + t := "cpu" // TODO/FIXME: inconsistency between cc-metric-collector and ClusterCockpit + ids := make([]string, 0, len(hwthreads)) + for _, hwthread := range hwthreads { + ids = append(ids, strconv.Itoa(hwthread)) + } + + reqBody.Queries = append(reqBody.Queries, ApiQuery{ + Metric: metric, + Hostname: job.Resources[0].Hostname, + Type: &t, + TypeIds: ids, + }) + scopeForMetric[metric] = schema.MetricScopeNode + continue + } + + // TODO: Job teilt sich knoten und metric native scope ist kleiner als node + panic("todo") + } + + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(reqBody); err != nil { return nil, err } - resdata := make([]map[string]ApiMetricData, 0, len(job.Resources)) - if err := json.NewDecoder(res.Body).Decode(&resdata); err != nil { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.url+"/api/query", buf) + if err != nil { + return nil, err + } + if ccms.jwt != "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ccms.jwt)) + } + res, err := ccms.client.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("cc-metric-store replied with: %s", res.Status) + } + + var resBody []ApiQueryResponse + if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { return nil, err } var jobData schema.JobData = make(schema.JobData) - for _, metric := range metrics { + for _, res := range resBody { + metric := res.Query.Metric + if res.Error != nil { + return nil, fmt.Errorf("cc-metric-store error while fetching %s: %s", metric, *res.Error) + } + mc := config.GetMetricConfig(job.Cluster, metric) - metricData := &schema.JobMetric{ - Scope: "node", // TODO: FIXME: Whatever... - Unit: mc.Unit, - Timestep: mc.Timestep, - Series: make([]schema.Series, 0, len(job.Resources)), + scope := scopeForMetric[metric] + jobMetric, ok := jobData[metric][scope] + if !ok { + jobMetric = &schema.JobMetric{ + Unit: mc.Unit, + Scope: scope, + Timestep: mc.Timestep, + Series: make([]schema.Series, 0), + } + jobData[metric][scope] = jobMetric } - for i, node := range job.Resources { - if node.Accelerators != nil || node.HWThreads != nil { - // TODO/FIXME: - return nil, errors.New("todo: cc-metric-store resources: Accelerator/HWThreads") - } - - data := resdata[i][metric] - if data.Error != nil { - return nil, errors.New(*data.Error) - } - - if data.Avg == nil || data.Min == nil || data.Max == nil { - return nil, fmt.Errorf("no data for node '%s' and metric '%s'", node.Hostname, metric) - } - - metricData.Series = append(metricData.Series, schema.Series{ - Hostname: node.Hostname, - Data: data.Data, - Statistics: &schema.MetricStatistics{ - Avg: *data.Avg, - Min: *data.Min, - Max: *data.Max, - }, - }) + id := (*int)(nil) + if res.Query.Type != nil { + id = new(int) + *id, _ = strconv.Atoi(res.Query.TypeIds[0]) } - jobData[metric] = map[string]*schema.JobMetric{"node": metricData} + + jobMetric.Series = append(jobMetric.Series, schema.Series{ + Hostname: res.Query.Hostname, + Id: id, + Statistics: &schema.MetricStatistics{ + Avg: float64(res.Avg), + Min: float64(res.Min), + Max: float64(res.Max), + }, + Data: res.Data, + }) } return jobData, nil diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index 18f3aac..875b242 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -14,7 +14,7 @@ type MetricDataRepository interface { Init(url, token string) error // Return the JobData for the given job, only with the requested metrics. - LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) + LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) // Return a map of metrics to a map of nodes to the metric statistics of the job. LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) @@ -56,14 +56,14 @@ func Init(jobArchivePath string, disableArchive bool) error { } // Fetches the metric data for a job. -func LoadData(job *schema.Job, metrics []string, ctx context.Context) (schema.JobData, error) { +func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { if job.State == schema.JobStateRunning || !useArchive { repo, ok := metricDataRepos[job.Cluster] if !ok { return nil, fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) } - return repo.LoadData(job, metrics, ctx) + return repo.LoadData(job, metrics, scopes, ctx) } data, err := loadFromArchive(job) From 1c6ab3d062443a6a8fd273689d51c73de602beec Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Fri, 7 Jan 2022 09:54:12 +0100 Subject: [PATCH 22/25] update go.mod and go.sum --- README.md | 6 ++++-- go.mod | 3 ++- go.sum | 59 ------------------------------------------------------- 3 files changed, 6 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index 8a836e5..792655f 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ __*DOES NOT WORK WITH CURRENT FRONTEND*__ [![Build](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml) +Create your job-archive accoring to [this specification](https://github.com/ClusterCockpit/cc-specifications). At least one cluster with a valid `cluster.json` file is required. Having no jobs in the job-archive at all is fine. You may use the sample job-archive available for download [in cc-docker/develop](https://github.com/ClusterCockpit/cc-docker/tree/develop). + ### Run server ```sh @@ -29,7 +31,7 @@ touch ./var/job.db # This will first initialize the job.db database by traversing all # `meta.json` files in the job-archive. After that, a HTTP server on # the port 8080 will be running. The `--init-db` is only needed the first time. -./cc-jobarchive --init-db +./cc-jobarchive --init-db --add-user :admin: # Show other options: ./cc-jobarchive --help @@ -37,7 +39,7 @@ touch ./var/job.db ### Configuration -A config file in the JSON format can be provided using `--config` to override the defaults. Loop at the beginning of `server.go` for the defaults and consequently the format of the configuration file. +A config file in the JSON format can be provided using `--config` to override the defaults. Look at the beginning of `server.go` for the defaults and consequently the format of the configuration file. ### Update GraphQL schema diff --git a/go.mod b/go.mod index e2740b7..27ba1a1 100644 --- a/go.mod +++ b/go.mod @@ -10,9 +10,10 @@ require ( github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/sessions v1.2.1 - github.com/influxdata/influxdb-client-go/v2 v2.6.0 github.com/jmoiron/sqlx v1.3.1 github.com/mattn/go-sqlite3 v1.14.6 + github.com/stretchr/testify v1.5.1 // indirect github.com/vektah/gqlparser/v2 v2.1.0 golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 + gopkg.in/yaml.v2 v2.3.0 // indirect ) diff --git a/go.sum b/go.sum index 41d6210..56bb41b 100644 --- a/go.sum +++ b/go.sum @@ -12,40 +12,27 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= -github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/trifles v0.0.0-20190318185328-a8d75aae118c h1:TUuUh0Xgj97tLMNtWtNvI9mIV6isjEb9lBMNv+77IGM= github.com/dgryski/trifles v0.0.0-20190318185328-a8d75aae118c/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f h1:9oNbS1z4rVpbnkHBdPZU4jo9bSmrLpII768arSyMFgk= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.6.1 h1:KOwqsTYZdeuMacU7CxjMNYEKeBvLbxW+psodrbcEa3A= github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -57,10 +44,6 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/influxdata/influxdb-client-go/v2 v2.6.0 h1:bIOaGTgvvv1Na2hG+nIvqyv7PK2UiU2WrJN1ck1ykyM= -github.com/influxdata/influxdb-client-go/v2 v2.6.0/go.mod h1:Y/0W1+TZir7ypoQZYd2IrnVOKB3Tq6oegAQeSVN/+EU= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -68,8 +51,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -77,16 +58,9 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -94,89 +68,56 @@ github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 h1:zCoDWFD5 github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= github.com/vektah/gqlparser/v2 v2.1.0 h1:uiKJ+T5HMGGQM2kRKQ8Pxw8+Zq9qhhZhz/lieYvCMns= github.com/vektah/gqlparser/v2 v2.1.0/go.mod h1:SyUiHgLATUR8BiYURfTirrTcGpcE+4XkV2se04Px1Ms= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589 h1:rjUrONFu4kLchcZTfp3/96bR8bW8dIa8uz3cR5n0cgM= golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From b7432fca5fb25b74f7af95e76df3b698da49f9ce Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Mon, 10 Jan 2022 16:13:40 +0100 Subject: [PATCH 23/25] continue working on non-node scoped metrics --- init-db.go | 14 +++++++++++--- metricdata/archive.go | 8 ++++---- metricdata/cc-metric-store.go | 31 +++++++++++++++++++++++++++++-- metricdata/metricdata.go | 8 +++++++- schema/job.go | 19 +++++-------------- 5 files changed, 56 insertions(+), 24 deletions(-) diff --git a/init-db.go b/init-db.go index 0c94fe4..502c5f5 100644 --- a/init-db.go +++ b/init-db.go @@ -36,8 +36,8 @@ const JOBS_DB_SCHEMA string = ` num_nodes INT NOT NULL, num_hwthreads INT NOT NULL, num_acc INT NOT NULL, - smt TINYINT CHECK(smt IN (0, 1 )) NOT NULL DEFAULT 1, - exclusive TINYINT CHECK(exclusive IN (0, 1, 2)) NOT NULL DEFAULT 1, + smt TINYINT CHECK(smt IN (0, 1 )) NOT NULL DEFAULT 1, + exclusive TINYINT CHECK(exclusive IN (0, 1, 2)) NOT NULL DEFAULT 1, monitoring_status TINYINT CHECK(monitoring_status IN (0, 1 )) NOT NULL DEFAULT 1, mem_used_max REAL NOT NULL DEFAULT 0.0, @@ -88,7 +88,15 @@ func initDB(db *sqlx.DB, archive string) error { return err } - stmt, err := tx.PrepareNamed(schema.JobInsertStmt) + stmt, err := tx.PrepareNamed(`INSERT INTO job ( + job_id, user, project, cluster, partition, array_job_id, num_nodes, num_hwthreads, num_acc, + exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data, + mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total + ) VALUES ( + :job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, + :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data, + :mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total + );`) if err != nil { return err } diff --git a/metricdata/archive.go b/metricdata/archive.go index 4ef6e6c..53f87b1 100644 --- a/metricdata/archive.go +++ b/metricdata/archive.go @@ -243,7 +243,7 @@ func calcStatisticsSeries(job *schema.Job, jobData schema.JobData) error { for i := 0; i < n; i++ { sum, smin, smax := schema.Float(0.), math.MaxFloat32, -math.MaxFloat32 for _, series := range jobMetric.Series { - if len(series.Data) >= i { + if i >= len(series.Data) { sum, smin, smax = schema.NaN, math.NaN(), math.NaN() break } @@ -258,9 +258,9 @@ func calcStatisticsSeries(job *schema.Job, jobData schema.JobData) error { max[i] = schema.Float(smax) } - jobMetric.StatisticsSeries.Mean = mean - jobMetric.StatisticsSeries.Min = min - jobMetric.StatisticsSeries.Max = max + jobMetric.StatisticsSeries = &schema.StatsSeries{ + Min: min, Mean: mean, Max: max, + } jobMetric.Series = nil } } diff --git a/metricdata/cc-metric-store.go b/metricdata/cc-metric-store.go index 62451c9..28a0069 100644 --- a/metricdata/cc-metric-store.go +++ b/metricdata/cc-metric-store.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "net/http" "strconv" "time" @@ -81,6 +82,7 @@ func (ccms *CCMetricStore) doRequest(job *schema.Job, suffix string, metrics []s } func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) { + // log.Printf("job: %#v", job) type ApiQuery struct { Metric string `json:"metric"` @@ -106,7 +108,7 @@ func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes [] reqBody := ApiQueryRequest{ Cluster: job.Cluster, From: job.StartTime.Unix(), - To: job.StartTime.Add(time.Duration(job.Duration)).Unix(), + To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(), Queries: make([]ApiQuery, 0), } @@ -118,12 +120,20 @@ func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes [] scopeForMetric := map[string]schema.MetricScope{} for _, metric := range metrics { mc := config.GetMetricConfig(job.Cluster, metric) + if mc == nil { + // return nil, fmt.Errorf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) + log.Printf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster) + continue + } + nativeScope, requestedScope := mc.Scope, scopes[0] // case 1: A metric is requested at node scope with a native scope of node as well // case 2: A metric is requested at node scope and node is exclusive + // case 3: A metric has native scope node if (nativeScope == requestedScope && nativeScope == schema.MetricScopeNode) || - (job.Exclusive == 1 && requestedScope == schema.MetricScopeNode) { + (job.Exclusive == 1 && requestedScope == schema.MetricScopeNode) || + (nativeScope == schema.MetricScopeNode) { nodes := map[string]bool{} for _, resource := range job.Resources { nodes[resource.Hostname] = true @@ -188,6 +198,8 @@ func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes [] panic("todo") } + // log.Printf("query: %#v", reqBody) + buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(reqBody); err != nil { return nil, err @@ -213,9 +225,16 @@ func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes [] return nil, err } + // log.Printf("response: %#v", resBody) + var jobData schema.JobData = make(schema.JobData) for _, res := range resBody { + metric := res.Query.Metric + if _, ok := jobData[metric]; !ok { + jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric) + } + if res.Error != nil { return nil, fmt.Errorf("cc-metric-store error while fetching %s: %s", metric, *res.Error) } @@ -239,6 +258,14 @@ func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes [] *id, _ = strconv.Atoi(res.Query.TypeIds[0]) } + if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { + // TODO: use schema.Float instead of float64? + // This is done because regular float64 can not be JSONed when NaN. + res.Avg = schema.Float(0) + res.Min = schema.Float(0) + res.Max = schema.Float(0) + } + jobMetric.Series = append(jobMetric.Series, schema.Series{ Hostname: res.Query.Hostname, Id: id, diff --git a/metricdata/metricdata.go b/metricdata/metricdata.go index 875b242..25f4925 100644 --- a/metricdata/metricdata.go +++ b/metricdata/metricdata.go @@ -63,7 +63,13 @@ func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ct return nil, fmt.Errorf("no metric data repository configured for '%s'", job.Cluster) } - return repo.LoadData(job, metrics, scopes, ctx) + data, err := repo.LoadData(job, metrics, scopes, ctx) + if err != nil { + return nil, err + } + + calcStatisticsSeries(job, data) + return data, nil } data, err := loadFromArchive(job) diff --git a/schema/job.go b/schema/job.go index d09fd67..8781776 100644 --- a/schema/job.go +++ b/schema/job.go @@ -11,7 +11,6 @@ import ( // Common subset of Job and JobMeta. Use one of those, not // this type directly. type BaseJob struct { - ID int64 `json:"id" db:"id"` JobID int64 `json:"jobId" db:"job_id"` User string `json:"user" db:"user"` Project string `json:"project" db:"project"` @@ -27,14 +26,15 @@ type BaseJob struct { State JobState `json:"jobState" db:"job_state"` Duration int32 `json:"duration" db:"duration"` Tags []*Tag `json:"tags"` + RawResources []byte `json:"-" db:"resources"` Resources []*Resource `json:"resources"` MetaData interface{} `json:"metaData" db:"meta_data"` } // This type is used as the GraphQL interface and using sqlx as a table row. type Job struct { + ID int64 `json:"id" db:"id"` BaseJob - RawResources []byte `json:"-" db:"resources"` StartTime time.Time `json:"startTime" db:"start_time"` MemUsedMax float64 `json:"-" db:"mem_used_max"` FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` @@ -52,7 +52,7 @@ type Job struct { // the StartTime field with one of type int64. type JobMeta struct { BaseJob - StartTime int64 `json:"startTime"` + StartTime int64 `json:"startTime" db:"start_time"` Statistics map[string]JobStatistics `json:"statistics,omitempty"` } @@ -68,16 +68,6 @@ var JobColumns []string = []string{ "job.duration", "job.resources", "job.meta_data", } -const JobInsertStmt string = `INSERT INTO job ( - job_id, user, project, cluster, partition, array_job_id, num_nodes, num_hwthreads, num_acc, - exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data, - mem_used_max, flops_any_avg, mem_bw_avg, load_avg, net_bw_avg, net_data_vol_total, file_bw_avg, file_data_vol_total -) VALUES ( - :job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, - :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data, - :mem_used_max, :flops_any_avg, :mem_bw_avg, :load_avg, :net_bw_avg, :net_data_vol_total, :file_bw_avg, :file_data_vol_total -);` - type Scannable interface { StructScan(dest interface{}) error } @@ -85,7 +75,7 @@ type Scannable interface { // Helper function for scanning jobs with the `jobTableCols` columns selected. func ScanJob(row Scannable) (*Job, error) { job := &Job{BaseJob: JobDefaults} - if err := row.StructScan(&job); err != nil { + if err := row.StructScan(job); err != nil { return nil, err } @@ -97,6 +87,7 @@ func ScanJob(row Scannable) (*Job, error) { job.Duration = int32(time.Since(job.StartTime).Seconds()) } + job.RawResources = nil return job, nil } From 290e9b89bf3b1d7c85dd5e79b1132bc5714b4340 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Mon, 10 Jan 2022 16:14:54 +0100 Subject: [PATCH 24/25] add cli option for generating a JWT; simplify templates --- api/rest.go | 33 +++++++++++--------- auth/auth.go | 38 ++++++++++++++--------- go.sum | 6 ++++ server.go | 57 +++++++++++++++++++++++++--------- templates/monitoring/jobs.html | 8 +---- templates/monitoring/user.html | 13 ++------ templates/templates.go | 37 +++++++++++++--------- 7 files changed, 117 insertions(+), 75 deletions(-) diff --git a/api/rest.go b/api/rest.go index dedb540..018d25a 100644 --- a/api/rest.go +++ b/api/rest.go @@ -9,7 +9,6 @@ import ( "net/http" "os" "path/filepath" - "time" "github.com/ClusterCockpit/cc-jobarchive/config" "github.com/ClusterCockpit/cc-jobarchive/graph" @@ -28,15 +27,18 @@ type RestApi struct { } func (api *RestApi) MountRoutes(r *mux.Router) { - r.HandleFunc("/api/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) - r.HandleFunc("/api/jobs/stop_job/", api.stopJob).Methods(http.MethodPost, http.MethodPut) - r.HandleFunc("/api/jobs/stop_job/{id}", api.stopJob).Methods(http.MethodPost, http.MethodPut) + r = r.PathPrefix("/api").Subrouter() + r.StrictSlash(true) - r.HandleFunc("/api/jobs/{id}", api.getJob).Methods(http.MethodGet) - r.HandleFunc("/api/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) + r.HandleFunc("/jobs/stop_job/", api.stopJob).Methods(http.MethodPost, http.MethodPut) + r.HandleFunc("/jobs/stop_job/{id}", api.stopJob).Methods(http.MethodPost, http.MethodPut) - r.HandleFunc("/api/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) - r.HandleFunc("/api/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost) + r.HandleFunc("/jobs/{id}", api.getJob).Methods(http.MethodGet) + r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + + r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) + r.HandleFunc("/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost) } type StartJobApiRespone struct { @@ -158,17 +160,18 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { return } - job := schema.Job{ - BaseJob: req.BaseJob, - StartTime: time.Unix(req.StartTime, 0), - } - - job.RawResources, err = json.Marshal(req.Resources) + req.RawResources, err = json.Marshal(req.Resources) if err != nil { log.Fatal(err) } - res, err := api.DB.NamedExec(schema.JobInsertStmt, job) + res, err := api.DB.NamedExec(`INSERT INTO job ( + job_id, user, project, cluster, partition, array_job_id, num_nodes, num_hwthreads, num_acc, + exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data + ) VALUES ( + :job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc, + :exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data + );`, req) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) return diff --git a/auth/auth.go b/auth/auth.go index d10ac3b..e463fd0 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -23,12 +23,13 @@ import ( ) type User struct { - Username string - Password string - Name string - IsAdmin bool - ViaLdap bool - Email string + Username string + Password string + Name string + IsAdmin bool + IsAPIUser bool + ViaLdap bool + Email string } type ContextKey string @@ -110,6 +111,9 @@ func AddUserToDB(db *sqlx.DB, arg string) error { if parts[1] == "admin" { roles = "[\"ROLE_ADMIN\"]" } + if parts[1] == "api" { + roles = "[\"ROLE_API\"]" + } _, err = sq.Insert("user").Columns("username", "password", "roles").Values(parts[0], string(password), roles).RunWith(db).Exec() if err != nil { @@ -124,7 +128,7 @@ func DelUserFromDB(db *sqlx.DB, username string) error { return err } -func fetchUserFromDB(db *sqlx.DB, username string) (*User, error) { +func FetchUserFromDB(db *sqlx.DB, username string) (*User, error) { user := &User{Username: username} var hashedPassword, name, rawRoles, email sql.NullString if err := sq.Select("password", "ldap", "name", "roles", "email").From("user"). @@ -141,8 +145,11 @@ func fetchUserFromDB(db *sqlx.DB, username string) (*User, error) { json.Unmarshal([]byte(rawRoles.String), &roles) } for _, role := range roles { - if role == "ROLE_ADMIN" { + switch role { + case "ROLE_ADMIN": user.IsAdmin = true + case "ROLE_API": + user.IsAPIUser = true } } @@ -154,7 +161,7 @@ func fetchUserFromDB(db *sqlx.DB, username string) (*User, error) { func Login(db *sqlx.DB) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { username, password := r.FormValue("username"), r.FormValue("password") - user, err := fetchUserFromDB(db, username) + user, err := FetchUserFromDB(db, username) if err == nil && user.ViaLdap && ldapAuthEnabled { err = loginViaLdap(user, password) } else if err == nil && !user.ViaLdap && user.Password != "" { @@ -168,7 +175,7 @@ func Login(db *sqlx.DB) http.Handler { if err != nil { log.Printf("login failed: %s\n", err.Error()) rw.WriteHeader(http.StatusUnauthorized) - templates.Render(rw, r, "login", &templates.Page{ + templates.Render(rw, r, "login.html", &templates.Page{ Title: "Login failed", Login: &templates.LoginPage{ Error: "Username or password incorrect", @@ -231,9 +238,11 @@ func authViaToken(r *http.Request) (*User, error) { claims := token.Claims.(jwt.MapClaims) sub, _ := claims["sub"].(string) isAdmin, _ := claims["is_admin"].(bool) + isAPIUser, _ := claims["is_api"].(bool) return &User{ - Username: sub, - IsAdmin: isAdmin, + Username: sub, + IsAdmin: isAdmin, + IsAPIUser: isAPIUser, }, nil } @@ -264,7 +273,7 @@ func Auth(next http.Handler) http.Handler { log.Printf("authentication failed: no session or jwt found\n") rw.WriteHeader(http.StatusUnauthorized) - templates.Render(rw, r, "login", &templates.Page{ + templates.Render(rw, r, "login.html", &templates.Page{ Title: "Authentication failed", Login: &templates.LoginPage{ Error: "No valid session or JWT provided", @@ -290,6 +299,7 @@ func ProvideJWT(user *User) (string, error) { tok := jwt.NewWithClaims(jwt.SigningMethodEdDSA, jwt.MapClaims{ "sub": user.Username, "is_admin": user.IsAdmin, + "is_api": user.IsAPIUser, }) return tok.SignedString(JwtPrivateKey) @@ -320,7 +330,7 @@ func Logout(rw http.ResponseWriter, r *http.Request) { } } - templates.Render(rw, r, "login", &templates.Page{ + templates.Render(rw, r, "login.html", &templates.Page{ Title: "Logout successful", Login: &templates.LoginPage{ Info: "Logout successful", diff --git a/go.sum b/go.sum index 56bb41b..88d0d20 100644 --- a/go.sum +++ b/go.sum @@ -12,6 +12,7 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -68,14 +69,17 @@ github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 h1:zCoDWFD5 github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -84,6 +88,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= github.com/vektah/gqlparser/v2 v2.1.0 h1:uiKJ+T5HMGGQM2kRKQ8Pxw8+Zq9qhhZhz/lieYvCMns= @@ -112,6 +117,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589 h1:rjUrONFu4kLchcZTfp3/96bR8bW8dIa8uz3cR5n0cgM= golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/server.go b/server.go index 9f76380..00e5c96 100644 --- a/server.go +++ b/server.go @@ -106,13 +106,14 @@ var programConfig ProgramConfig = ProgramConfig{ func main() { var flagReinitDB, flagStopImmediately, flagSyncLDAP bool var flagConfigFile string - var flagNewUser, flagDelUser string + var flagNewUser, flagDelUser, flagGenJWT string flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize `job`, `tag`, and `jobtag` tables") flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the `user` table with ldap") flag.BoolVar(&flagStopImmediately, "no-server", false, "Do not start a server, stop right after initialization and argument handling") flag.StringVar(&flagConfigFile, "config", "", "Location of the config file for this server (overwrites the defaults)") - flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin]:`") + flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin|api]:`") flag.StringVar(&flagDelUser, "del-user", "", "Remove user by username") + flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by the username") flag.Parse() if flagConfigFile != "" { @@ -156,6 +157,24 @@ func main() { if flagSyncLDAP { auth.SyncWithLDAP(db) } + + if flagGenJWT != "" { + user, err := auth.FetchUserFromDB(db, flagGenJWT) + if err != nil { + log.Fatal(err) + } + + if !user.IsAPIUser { + log.Println("warning: that user does not have the API role") + } + + jwt, err := auth.ProvideJWT(user) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("JWT for '%s': %s\n", user.Username, jwt) + } } else if flagNewUser != "" || flagDelUser != "" { log.Fatalln("arguments --add-user and --del-user can only be used if authentication is enabled") } @@ -182,6 +201,18 @@ func main() { resolver := &graph.Resolver{DB: db} graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) + + // graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error { + // switch e := err.(type) { + // case string: + // return fmt.Errorf("panic: %s", e) + // case error: + // return fmt.Errorf("panic caused by: %w", e) + // } + + // return errors.New("internal server error (panic)") + // }) + graphQLPlayground := playground.Handler("GraphQL playground", "/query") api := &api.RestApi{ DB: db, @@ -191,7 +222,7 @@ func main() { } handleGetLogin := func(rw http.ResponseWriter, r *http.Request) { - templates.Render(rw, r, "login", &templates.Page{ + templates.Render(rw, r, "login.html", &templates.Page{ Title: "Login", Login: &templates.LoginPage{}, }) @@ -199,7 +230,7 @@ func main() { r := mux.NewRouter() r.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - templates.Render(rw, r, "404", &templates.Page{ + templates.Render(rw, r, "404.html", &templates.Page{ Title: "Not found", }) }) @@ -215,8 +246,6 @@ func main() { } secured.Handle("/query", graphQLEndpoint) - secured.HandleFunc("/config.json", config.ServeConfig).Methods(http.MethodGet) - secured.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { conf, err := config.GetUIConfig(r) if err != nil { @@ -235,7 +264,7 @@ func main() { infos["admin"] = user.IsAdmin } - templates.Render(rw, r, "home", &templates.Page{ + templates.Render(rw, r, "home.html", &templates.Page{ Title: "ClusterCockpit", Config: conf, Infos: infos, @@ -297,7 +326,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { return } - templates.Render(rw, r, "monitoring/jobs/", &templates.Page{ + templates.Render(rw, r, "monitoring/jobs.html", &templates.Page{ Title: "Jobs - ClusterCockpit", Config: conf, FilterPresets: buildFilterPresets(r.URL.Query()), @@ -318,7 +347,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { return } - templates.Render(rw, r, "monitoring/job/", &templates.Page{ + templates.Render(rw, r, "monitoring/job.html", &templates.Page{ Title: fmt.Sprintf("Job %d - ClusterCockpit", job.JobID), Config: conf, Infos: map[string]interface{}{ @@ -336,7 +365,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { return } - templates.Render(rw, r, "monitoring/users/", &templates.Page{ + templates.Render(rw, r, "monitoring/users.html", &templates.Page{ Title: "Users - ClusterCockpit", Config: conf, }) @@ -353,7 +382,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { // TODO: One could check if the user exists, but that would be unhelpfull if authentication // is disabled or the user does not exist but has started jobs. - templates.Render(rw, r, "monitoring/user/", &templates.Page{ + templates.Render(rw, r, "monitoring/user.html", &templates.Page{ Title: fmt.Sprintf("User %s - ClusterCockpit", id), Config: conf, Infos: map[string]interface{}{"username": id}, @@ -374,7 +403,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { filterPresets["clusterId"] = query.Get("cluster") } - templates.Render(rw, r, "monitoring/analysis/", &templates.Page{ + templates.Render(rw, r, "monitoring/analysis.html", &templates.Page{ Title: "Analysis View - ClusterCockpit", Config: conf, FilterPresets: filterPresets, @@ -394,7 +423,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { filterPresets["clusterId"] = query.Get("cluster") } - templates.Render(rw, r, "monitoring/systems/", &templates.Page{ + templates.Render(rw, r, "monitoring/systems.html", &templates.Page{ Title: "System View - ClusterCockpit", Config: conf, FilterPresets: filterPresets, @@ -409,7 +438,7 @@ func monitoringRoutes(router *mux.Router, resolver *graph.Resolver) { } vars := mux.Vars(r) - templates.Render(rw, r, "monitoring/node/", &templates.Page{ + templates.Render(rw, r, "monitoring/node.html", &templates.Page{ Title: fmt.Sprintf("Node %s - ClusterCockpit", vars["nodeId"]), Config: conf, Infos: map[string]interface{}{ diff --git a/templates/monitoring/jobs.html b/templates/monitoring/jobs.html index 1d70968..9733678 100644 --- a/templates/monitoring/jobs.html +++ b/templates/monitoring/jobs.html @@ -8,13 +8,7 @@ {{define "javascript"}} {{end}} diff --git a/templates/monitoring/user.html b/templates/monitoring/user.html index ee16cdc..693ae61 100644 --- a/templates/monitoring/user.html +++ b/templates/monitoring/user.html @@ -7,16 +7,9 @@ {{end}} {{define "javascript"}} {{end}} diff --git a/templates/templates.go b/templates/templates.go index 327ef19..1ab66d7 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -6,7 +6,9 @@ import ( "net/http" ) -var templates map[string]*template.Template +var templatesDir string +var debugMode bool = true +var templates map[string]*template.Template = map[string]*template.Template{} type Page struct { Title string @@ -22,27 +24,32 @@ type LoginPage struct { } func init() { - base := template.Must(template.ParseFiles("./templates/base.html")) - templates = map[string]*template.Template{ - "home": template.Must(template.Must(base.Clone()).ParseFiles("./templates/home.html")), - "404": template.Must(template.Must(base.Clone()).ParseFiles("./templates/404.html")), - "login": template.Must(template.Must(base.Clone()).ParseFiles("./templates/login.html")), - "monitoring/jobs/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/jobs.html")), - "monitoring/job/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/job.html")), - "monitoring/users/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/users.html")), - "monitoring/user/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/user.html")), - "monitoring/analysis/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/analysis.html")), - "monitoring/systems/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/systems.html")), - "monitoring/node/": template.Must(template.Must(base.Clone()).ParseFiles("./templates/monitoring/node.html")), + templatesDir = "./templates/" + base := template.Must(template.ParseFiles(templatesDir + "base.html")) + files := []string{ + "home.html", "404.html", "login.html", + "monitoring/jobs.html", "monitoring/job.html", + "monitoring/users.html", "monitoring/user.html", + "monitoring/analysis.html", + "monitoring/systems.html", + "monitoring/node.html", + } + + for _, file := range files { + templates[file] = template.Must(template.Must(base.Clone()).ParseFiles(templatesDir + file)) } } -func Render(rw http.ResponseWriter, r *http.Request, name string, page *Page) { - t, ok := templates[name] +func Render(rw http.ResponseWriter, r *http.Request, file string, page *Page) { + t, ok := templates[file] if !ok { panic("templates must be predefinied!") } + if debugMode { + t = template.Must(template.ParseFiles(templatesDir+"base.html", templatesDir+file)) + } + if err := t.Execute(rw, page); err != nil { log.Printf("template error: %s\n", err.Error()) } From 102dd855247f3844a84728766e06b15d0d8bfcc4 Mon Sep 17 00:00:00 2001 From: Lou Knauer Date: Mon, 10 Jan 2022 16:17:40 +0100 Subject: [PATCH 25/25] renamed submodule --- .gitmodules | 2 +- config/config.go | 15 --------------- graph/schema.graphqls | 6 +++--- utils/add-job.mjs | 40 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 19 deletions(-) create mode 100644 utils/add-job.mjs diff --git a/.gitmodules b/.gitmodules index 3beff03..08755e4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "frontend"] path = frontend - url = git@github.com:ClusterCockpit/cc-svelte-datatable.git + url = git@github.com:ClusterCockpit/cc-frontend.git diff --git a/config/config.go b/config/config.go index e35caaa..e13e101 100644 --- a/config/config.go +++ b/config/config.go @@ -132,21 +132,6 @@ func UpdateConfig(key, value string, ctx context.Context) error { return nil } -// http.HandlerFunc compatible function that serves the current configuration as JSON. -// TODO: Use templates and stuff instead of this... -func ServeConfig(rw http.ResponseWriter, r *http.Request) { - config, err := GetUIConfig(r) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - rw.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(rw).Encode(config); err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - } -} - func GetClusterConfig(cluster string) *model.Cluster { for _, c := range Clusters { if c.Name == cluster { diff --git a/graph/schema.graphqls b/graph/schema.graphqls index 26a8821..dc3456f 100644 --- a/graph/schema.graphqls +++ b/graph/schema.graphqls @@ -103,9 +103,9 @@ type Series { } type MetricStatistics { - avg: Float! - min: Float! - max: Float! + avg: NullableFloat! + min: NullableFloat! + max: NullableFloat! } type StatsSeries { diff --git a/utils/add-job.mjs b/utils/add-job.mjs new file mode 100644 index 0000000..dc14039 --- /dev/null +++ b/utils/add-job.mjs @@ -0,0 +1,40 @@ +import fetch from 'node-fetch' + +// Just for testing + +const job = { + jobId: 123, + user: 'lou', + project: 'testproj', + cluster: 'heidi', + partition: 'default', + arrayJobId: 0, + numNodes: 1, + numHwthreads: 8, + numAcc: 0, + exclusive: 1, + monitoringStatus: 1, + smt: 1, + jobState: 'running', + duration: 2*60*60, + tags: [], + resources: [ + { + hostname: 'heidi', + hwthreads: [0, 1, 2, 3, 4, 5, 6, 7] + } + ], + metaData: null, + startTime: 1641427200 +} + +fetch('http://localhost:8080/api/jobs/start_job/', { + method: 'POST', + body: JSON.stringify(job), + headers: { + 'Content-Type': 'application/json', + 'Authorization': 'Bearer eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJpc19hZG1pbiI6dHJ1ZSwiaXNfYXBpIjpmYWxzZSwic3ViIjoibG91In0.nY6dCgLSdm7zXz1xPkrb_3JnnUCgExXeXcrTlAAySs4p72VKJhmzzC1RxgkJE26l8tDYUilM-o-urzlaqK5aDA' + } + }) + .then(res => res.status == 200 ? res.json() : res.text()) + .then(res => console.log(res))
Name/IDName Jobs System View Analysis View
{{.ClusterID}}JobsSystem ViewAnalysis View{{.Name}}JobsSystem ViewAnalysis View