mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-23 04:51:39 +02:00
Reformat and Refactor packages. Rebuild GraphQL.
This commit is contained in:
@@ -146,7 +146,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
ufrom, uto := time.Unix(from, 0), time.Unix(to, 0)
|
||||
filter.StartTime = &model.TimeRange{From: &ufrom, To: &uto}
|
||||
filter.StartTime = &schema.TimeRange{From: &ufrom, To: &uto}
|
||||
case "page":
|
||||
x, err := strconv.Atoi(vals[0])
|
||||
if err != nil {
|
||||
|
@@ -75,7 +75,7 @@ type Authentication struct {
|
||||
SessionMaxAge time.Duration
|
||||
|
||||
authenticators []Authenticator
|
||||
LdapAuth *LdapAutnenticator
|
||||
LdapAuth *LdapAuthenticator
|
||||
JwtAuth *JWTAuthenticator
|
||||
LocalAuth *LocalAuthenticator
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func Init(db *sqlx.DB,
|
||||
auth.authenticators = append(auth.authenticators, auth.JwtAuth)
|
||||
|
||||
if config, ok := configs["ldap"]; ok {
|
||||
auth.LdapAuth = &LdapAutnenticator{}
|
||||
auth.LdapAuth = &LdapAuthenticator{}
|
||||
if err := auth.LdapAuth.Init(auth, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,7 +135,10 @@ func Init(db *sqlx.DB,
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (auth *Authentication) AuthViaSession(rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (auth *Authentication) AuthViaSession(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
session, err := auth.sessionStore.Get(r, "session")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -155,7 +158,10 @@ func (auth *Authentication) AuthViaSession(rw http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Handle a POST request that should log the user in, starting a new session.
|
||||
func (auth *Authentication) Login(onsuccess http.Handler, onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error)) http.Handler {
|
||||
func (auth *Authentication) Login(
|
||||
onsuccess http.Handler,
|
||||
onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error)) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
var err error = errors.New("no authenticator applied")
|
||||
username := r.FormValue("username")
|
||||
@@ -211,7 +217,10 @@ func (auth *Authentication) Login(onsuccess http.Handler, onfailure func(rw http
|
||||
// Authenticate the user and put a User object in the
|
||||
// context of the request. If authentication fails,
|
||||
// do not continue but send client to the login screen.
|
||||
func (auth *Authentication) Auth(onsuccess http.Handler, onfailure func(rw http.ResponseWriter, r *http.Request, authErr error)) http.Handler {
|
||||
func (auth *Authentication) Auth(
|
||||
onsuccess http.Handler,
|
||||
onfailure func(rw http.ResponseWriter, r *http.Request, authErr error)) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
for _, authenticator := range auth.authenticators {
|
||||
user, err := authenticator.Auth(rw, r)
|
||||
@@ -237,6 +246,7 @@ func (auth *Authentication) Auth(onsuccess http.Handler, onfailure func(rw http.
|
||||
|
||||
// Clears the session cookie
|
||||
func (auth *Authentication) Logout(onsuccess http.Handler) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
session, err := auth.sessionStore.Get(r, "session")
|
||||
if err != nil {
|
||||
|
@@ -16,15 +16,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
)
|
||||
|
||||
type JWTAuthConfig struct {
|
||||
// Specifies for how long a session or JWT shall be valid
|
||||
// as a string parsable by time.ParseDuration().
|
||||
MaxAge int64 `json:"max-age"`
|
||||
}
|
||||
|
||||
type JWTAuthenticator struct {
|
||||
auth *Authentication
|
||||
|
||||
@@ -33,14 +28,15 @@ type JWTAuthenticator struct {
|
||||
|
||||
loginTokenKey []byte // HS256 key
|
||||
|
||||
config *JWTAuthConfig
|
||||
config *schema.JWTAuthConfig
|
||||
}
|
||||
|
||||
var _ Authenticator = (*JWTAuthenticator)(nil)
|
||||
|
||||
func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
|
||||
|
||||
ja.auth = auth
|
||||
ja.config = conf.(*JWTAuthConfig)
|
||||
ja.config = conf.(*schema.JWTAuthConfig)
|
||||
|
||||
pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY")
|
||||
if pubKey == "" || privKey == "" {
|
||||
@@ -69,11 +65,19 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ja *JWTAuthenticator) CanLogin(user *User, rw http.ResponseWriter, r *http.Request) bool {
|
||||
func (ja *JWTAuthenticator) CanLogin(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) bool {
|
||||
|
||||
return (user != nil && user.AuthSource == AuthViaToken) || r.Header.Get("Authorization") != "" || r.URL.Query().Get("login-token") != ""
|
||||
}
|
||||
|
||||
func (ja *JWTAuthenticator) Login(user *User, rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (ja *JWTAuthenticator) Login(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
rawtoken := r.Header.Get("X-Auth-Token")
|
||||
if rawtoken == "" {
|
||||
rawtoken = r.Header.Get("Authorization")
|
||||
@@ -135,7 +139,10 @@ func (ja *JWTAuthenticator) Login(user *User, rw http.ResponseWriter, r *http.Re
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (ja *JWTAuthenticator) Auth(rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (ja *JWTAuthenticator) Auth(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
rawtoken := r.Header.Get("X-Auth-Token")
|
||||
if rawtoken == "" {
|
||||
rawtoken = r.Header.Get("Authorization")
|
||||
@@ -183,6 +190,7 @@ func (ja *JWTAuthenticator) Auth(rw http.ResponseWriter, r *http.Request) (*User
|
||||
|
||||
// Generate a new JWT that can be used for authentication
|
||||
func (ja *JWTAuthenticator) ProvideJWT(user *User) (string, error) {
|
||||
|
||||
if ja.privateKey == nil {
|
||||
return "", errors.New("environment variable 'JWT_PRIVATE_KEY' not set")
|
||||
}
|
||||
|
@@ -12,30 +12,24 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/go-ldap/ldap/v3"
|
||||
)
|
||||
|
||||
type LdapConfig struct {
|
||||
Url string `json:"url"`
|
||||
UserBase string `json:"user_base"`
|
||||
SearchDN string `json:"search_dn"`
|
||||
UserBind string `json:"user_bind"`
|
||||
UserFilter string `json:"user_filter"`
|
||||
SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration.
|
||||
SyncDelOldUsers bool `json:"sync_del_old_users"`
|
||||
}
|
||||
|
||||
type LdapAutnenticator struct {
|
||||
type LdapAuthenticator struct {
|
||||
auth *Authentication
|
||||
config *LdapConfig
|
||||
config *schema.LdapConfig
|
||||
syncPassword string
|
||||
}
|
||||
|
||||
var _ Authenticator = (*LdapAutnenticator)(nil)
|
||||
var _ Authenticator = (*LdapAuthenticator)(nil)
|
||||
|
||||
func (la *LdapAuthenticator) Init(
|
||||
auth *Authentication,
|
||||
conf interface{}) error {
|
||||
|
||||
func (la *LdapAutnenticator) Init(auth *Authentication, conf interface{}) error {
|
||||
la.auth = auth
|
||||
la.config = conf.(*LdapConfig)
|
||||
la.config = conf.(*schema.LdapConfig)
|
||||
|
||||
la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD")
|
||||
if la.syncPassword == "" {
|
||||
@@ -67,11 +61,19 @@ func (la *LdapAutnenticator) Init(auth *Authentication, conf interface{}) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (la *LdapAutnenticator) CanLogin(user *User, rw http.ResponseWriter, r *http.Request) bool {
|
||||
func (la *LdapAuthenticator) CanLogin(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) bool {
|
||||
|
||||
return user != nil && user.AuthSource == AuthViaLDAP
|
||||
}
|
||||
|
||||
func (la *LdapAutnenticator) Login(user *User, rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (la *LdapAuthenticator) Login(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
l, err := la.getLdapConnection(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -86,11 +88,15 @@ func (la *LdapAutnenticator) Login(user *User, rw http.ResponseWriter, r *http.R
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (la *LdapAutnenticator) Auth(rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (la *LdapAuthenticator) Auth(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
return la.auth.AuthViaSession(rw, r)
|
||||
}
|
||||
|
||||
func (la *LdapAutnenticator) Sync() error {
|
||||
func (la *LdapAuthenticator) Sync() error {
|
||||
|
||||
const IN_DB int = 1
|
||||
const IN_LDAP int = 2
|
||||
const IN_BOTH int = 3
|
||||
@@ -160,7 +166,8 @@ func (la *LdapAutnenticator) Sync() error {
|
||||
|
||||
// TODO: Add a connection pool or something like
|
||||
// that so that connections can be reused/cached.
|
||||
func (la *LdapAutnenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
|
||||
func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
|
||||
|
||||
conn, err := ldap.DialURL(la.config.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@@ -17,16 +17,27 @@ type LocalAuthenticator struct {
|
||||
|
||||
var _ Authenticator = (*LocalAuthenticator)(nil)
|
||||
|
||||
func (la *LocalAuthenticator) Init(auth *Authentication, _ interface{}) error {
|
||||
func (la *LocalAuthenticator) Init(
|
||||
auth *Authentication,
|
||||
_ interface{}) error {
|
||||
|
||||
la.auth = auth
|
||||
return nil
|
||||
}
|
||||
|
||||
func (la *LocalAuthenticator) CanLogin(user *User, rw http.ResponseWriter, r *http.Request) bool {
|
||||
func (la *LocalAuthenticator) CanLogin(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) bool {
|
||||
|
||||
return user != nil && user.AuthSource == AuthViaLocalPassword
|
||||
}
|
||||
|
||||
func (la *LocalAuthenticator) Login(user *User, rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (la *LocalAuthenticator) Login(
|
||||
user *User,
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil {
|
||||
return nil, fmt.Errorf("user '%s' provided the wrong password (%w)", user.Username, e)
|
||||
}
|
||||
@@ -34,6 +45,9 @@ func (la *LocalAuthenticator) Login(user *User, rw http.ResponseWriter, r *http.
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (la *LocalAuthenticator) Auth(rw http.ResponseWriter, r *http.Request) (*User, error) {
|
||||
func (la *LocalAuthenticator) Auth(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
return la.auth.AuthViaSession(rw, r)
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func (auth *Authentication) GetUser(username string) (*User, error) {
|
||||
|
||||
user := &User{Username: username}
|
||||
var hashedPassword, name, rawRoles, email sql.NullString
|
||||
if err := sq.Select("password", "ldap", "name", "roles", "email").From("user").
|
||||
@@ -40,6 +41,7 @@ func (auth *Authentication) GetUser(username string) (*User, error) {
|
||||
}
|
||||
|
||||
func (auth *Authentication) AddUser(user *User) error {
|
||||
|
||||
rolesJson, _ := json.Marshal(user.Roles)
|
||||
|
||||
cols := []string{"username", "roles"}
|
||||
@@ -70,11 +72,13 @@ func (auth *Authentication) AddUser(user *User) error {
|
||||
}
|
||||
|
||||
func (auth *Authentication) DelUser(username string) error {
|
||||
|
||||
_, err := auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username)
|
||||
return err
|
||||
}
|
||||
|
||||
func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
||||
|
||||
q := sq.Select("username", "name", "email", "roles").From("user")
|
||||
if specialsOnly {
|
||||
q = q.Where("(roles != '[\"user\"]' AND roles != '[]')")
|
||||
@@ -106,7 +110,11 @@ func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func (auth *Authentication) AddRole(ctx context.Context, username string, role string) error {
|
||||
func (auth *Authentication) AddRole(
|
||||
ctx context.Context,
|
||||
username string,
|
||||
role string) error {
|
||||
|
||||
user, err := auth.GetUser(username)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -129,7 +137,11 @@ func (auth *Authentication) AddRole(ctx context.Context, username string, role s
|
||||
return nil
|
||||
}
|
||||
|
||||
func FetchUser(ctx context.Context, db *sqlx.DB, username string) (*model.User, error) {
|
||||
func FetchUser(
|
||||
ctx context.Context,
|
||||
db *sqlx.DB,
|
||||
username string) (*model.User, error) {
|
||||
|
||||
me := GetUser(ctx)
|
||||
if me != nil && !me.HasRole(RoleAdmin) && me.Username != username {
|
||||
return nil, errors.New("forbidden")
|
||||
|
@@ -9,92 +9,10 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
type LdapConfig struct {
|
||||
Url string `json:"url"`
|
||||
UserBase string `json:"user_base"`
|
||||
SearchDN string `json:"search_dn"`
|
||||
UserBind string `json:"user_bind"`
|
||||
UserFilter string `json:"user_filter"`
|
||||
SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration.
|
||||
SyncDelOldUsers bool `json:"sync_del_old_users"`
|
||||
}
|
||||
|
||||
type JWTAuthConfig struct {
|
||||
// Specifies for how long a session or JWT shall be valid
|
||||
// as a string parsable by time.ParseDuration().
|
||||
MaxAge int64 `json:"max-age"`
|
||||
}
|
||||
|
||||
type Cluster struct {
|
||||
Name string `json:"name"`
|
||||
FilterRanges *model.FilterRanges `json:"filterRanges"`
|
||||
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
|
||||
}
|
||||
|
||||
// Format of the configuration (file). See below for the defaults.
|
||||
type ProgramConfig struct {
|
||||
// Address where the http (or https) server will listen on (for example: 'localhost:80').
|
||||
Addr string `json:"addr"`
|
||||
|
||||
// Drop root permissions once .env was read and the port was taken.
|
||||
User string `json:"user"`
|
||||
Group string `json:"group"`
|
||||
|
||||
// Disable authentication (for everything: API, Web-UI, ...)
|
||||
DisableAuthentication bool `json:"disable-authentication"`
|
||||
|
||||
// If `embed-static-files` is true (default), the frontend files are directly
|
||||
// embeded into the go binary and expected to be in web/frontend. Only if
|
||||
// it is false the files in `static-files` are served instead.
|
||||
EmbedStaticFiles bool `json:"embed-static-files"`
|
||||
StaticFiles string `json:"static-files"`
|
||||
|
||||
// 'sqlite3' or 'mysql' (mysql will work for mariadb as well)
|
||||
DBDriver string `json:"db-driver"`
|
||||
|
||||
// For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!).
|
||||
DB string `json:"db"`
|
||||
|
||||
// Config for job archive
|
||||
Archive json.RawMessage `json:"archive"`
|
||||
|
||||
// Keep all metric data in the metric data repositories,
|
||||
// do not write to the job-archive.
|
||||
DisableArchive bool `json:"disable-archive"`
|
||||
|
||||
// For LDAP Authentication and user synchronisation.
|
||||
LdapConfig *LdapConfig `json:"ldap"`
|
||||
JwtConfig *JWTAuthConfig `json:"jwts"`
|
||||
|
||||
// If 0 or empty, the session/token does not expire!
|
||||
SessionMaxAge string `json:"session-max-age"`
|
||||
|
||||
// If both those options are not empty, use HTTPS using those certificates.
|
||||
HttpsCertFile string `json:"https-cert-file"`
|
||||
HttpsKeyFile string `json:"https-key-file"`
|
||||
|
||||
// If not the empty string and `addr` does not end in ":80",
|
||||
// redirect every request incoming at port 80 to that url.
|
||||
RedirectHttpTo string `json:"redirect-http-to"`
|
||||
|
||||
// If overwriten, at least all the options in the defaults below must
|
||||
// be provided! Most options here can be overwritten by the user.
|
||||
UiDefaults map[string]interface{} `json:"ui-defaults"`
|
||||
|
||||
// Where to store MachineState files
|
||||
MachineStateDir string `json:"machine-state-dir"`
|
||||
|
||||
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
|
||||
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
|
||||
|
||||
// Array of Clusters
|
||||
Clusters []*Cluster `json:"clusters"`
|
||||
}
|
||||
|
||||
var Keys ProgramConfig = ProgramConfig{
|
||||
var Keys schema.ProgramConfig = schema.ProgramConfig{
|
||||
Addr: ":8080",
|
||||
DisableAuthentication: false,
|
||||
EmbedStaticFiles: true,
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -3,124 +3,3 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
Name string `json:"name"`
|
||||
MetricConfig []*MetricConfig `json:"metricConfig"`
|
||||
FilterRanges *FilterRanges `json:"filterRanges"`
|
||||
SubClusters []*SubCluster `json:"subClusters"`
|
||||
|
||||
// NOT part of the GraphQL API. This has to be a JSON object with a field `"kind"`.
|
||||
// All other fields depend on that kind (e.g. "cc-metric-store", "influxdb-v2").
|
||||
MetricDataRepository json.RawMessage `json:"metricDataRepository"`
|
||||
}
|
||||
|
||||
// Return a list of socket IDs given a list of hwthread IDs.
|
||||
// Even if just one hwthread is in that socket, add it to the list.
|
||||
// If no hwthreads other than those in the argument list are assigned to
|
||||
// one of the sockets in the first return value, return true as the second value.
|
||||
// TODO: Optimize this, there must be a more efficient way/algorithm.
|
||||
func (topo *Topology) GetSocketsFromHWThreads(hwthreads []int) (sockets []int, exclusive bool) {
|
||||
socketsMap := map[int]int{}
|
||||
for _, hwthread := range hwthreads {
|
||||
for socket, hwthreadsInSocket := range topo.Socket {
|
||||
for _, hwthreadInSocket := range hwthreadsInSocket {
|
||||
if hwthread == hwthreadInSocket {
|
||||
socketsMap[socket] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exclusive = true
|
||||
hwthreadsPerSocket := len(topo.Node) / len(topo.Socket)
|
||||
sockets = make([]int, 0, len(socketsMap))
|
||||
for socket, count := range socketsMap {
|
||||
sockets = append(sockets, socket)
|
||||
exclusive = exclusive && count == hwthreadsPerSocket
|
||||
}
|
||||
|
||||
return sockets, exclusive
|
||||
}
|
||||
|
||||
// Return a list of core IDs given a list of hwthread IDs.
|
||||
// Even if just one hwthread is in that core, add it to the list.
|
||||
// If no hwthreads other than those in the argument list are assigned to
|
||||
// one of the cores in the first return value, return true as the second value.
|
||||
// TODO: Optimize this, there must be a more efficient way/algorithm.
|
||||
func (topo *Topology) GetCoresFromHWThreads(hwthreads []int) (cores []int, exclusive bool) {
|
||||
coresMap := map[int]int{}
|
||||
for _, hwthread := range hwthreads {
|
||||
for core, hwthreadsInCore := range topo.Core {
|
||||
for _, hwthreadInCore := range hwthreadsInCore {
|
||||
if hwthread == hwthreadInCore {
|
||||
coresMap[core] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exclusive = true
|
||||
hwthreadsPerCore := len(topo.Node) / len(topo.Core)
|
||||
cores = make([]int, 0, len(coresMap))
|
||||
for core, count := range coresMap {
|
||||
cores = append(cores, core)
|
||||
exclusive = exclusive && count == hwthreadsPerCore
|
||||
}
|
||||
|
||||
return cores, exclusive
|
||||
}
|
||||
|
||||
// Return a list of memory domain IDs given a list of hwthread IDs.
|
||||
// Even if just one hwthread is in that memory domain, add it to the list.
|
||||
// If no hwthreads other than those in the argument list are assigned to
|
||||
// one of the memory domains in the first return value, return true as the second value.
|
||||
// TODO: Optimize this, there must be a more efficient way/algorithm.
|
||||
func (topo *Topology) GetMemoryDomainsFromHWThreads(hwthreads []int) (memDoms []int, exclusive bool) {
|
||||
memDomsMap := map[int]int{}
|
||||
for _, hwthread := range hwthreads {
|
||||
for memDom, hwthreadsInmemDom := range topo.MemoryDomain {
|
||||
for _, hwthreadInmemDom := range hwthreadsInmemDom {
|
||||
if hwthread == hwthreadInmemDom {
|
||||
memDomsMap[memDom] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exclusive = true
|
||||
hwthreadsPermemDom := len(topo.Node) / len(topo.MemoryDomain)
|
||||
memDoms = make([]int, 0, len(memDomsMap))
|
||||
for memDom, count := range memDomsMap {
|
||||
memDoms = append(memDoms, memDom)
|
||||
exclusive = exclusive && count == hwthreadsPermemDom
|
||||
}
|
||||
|
||||
return memDoms, exclusive
|
||||
}
|
||||
|
||||
func (topo *Topology) GetAcceleratorIDs() ([]int, error) {
|
||||
accels := make([]int, 0)
|
||||
for _, accel := range topo.Accelerators {
|
||||
id, err := strconv.Atoi(accel.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accels = append(accels, id)
|
||||
}
|
||||
return accels, nil
|
||||
}
|
||||
|
||||
func (topo *Topology) GetAcceleratorIndex(id string) (int, bool) {
|
||||
for idx, accel := range topo.Accelerators {
|
||||
if accel.ID == id {
|
||||
return idx, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
@@ -11,26 +11,14 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
type Accelerator struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type Count struct {
|
||||
Name string `json:"name"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type FilterRanges struct {
|
||||
Duration *IntRangeOutput `json:"duration"`
|
||||
NumNodes *IntRangeOutput `json:"numNodes"`
|
||||
StartTime *TimeRangeOutput `json:"startTime"`
|
||||
}
|
||||
|
||||
type FloatRange struct {
|
||||
From float64 `json:"from"`
|
||||
To float64 `json:"to"`
|
||||
From schema.Float `json:"from"`
|
||||
To schema.Float `json:"to"`
|
||||
}
|
||||
|
||||
type Footprints struct {
|
||||
@@ -43,11 +31,6 @@ type HistoPoint struct {
|
||||
Value int `json:"value"`
|
||||
}
|
||||
|
||||
type IntRange struct {
|
||||
From int `json:"from"`
|
||||
To int `json:"to"`
|
||||
}
|
||||
|
||||
type IntRangeOutput struct {
|
||||
From int `json:"from"`
|
||||
To int `json:"to"`
|
||||
@@ -61,12 +44,12 @@ type JobFilter struct {
|
||||
Project *StringInput `json:"project"`
|
||||
Cluster *StringInput `json:"cluster"`
|
||||
Partition *StringInput `json:"partition"`
|
||||
Duration *IntRange `json:"duration"`
|
||||
Duration *schema.IntRange `json:"duration"`
|
||||
MinRunningFor *int `json:"minRunningFor"`
|
||||
NumNodes *IntRange `json:"numNodes"`
|
||||
NumAccelerators *IntRange `json:"numAccelerators"`
|
||||
NumHWThreads *IntRange `json:"numHWThreads"`
|
||||
StartTime *TimeRange `json:"startTime"`
|
||||
NumNodes *schema.IntRange `json:"numNodes"`
|
||||
NumAccelerators *schema.IntRange `json:"numAccelerators"`
|
||||
NumHWThreads *schema.IntRange `json:"numHWThreads"`
|
||||
StartTime *schema.TimeRange `json:"startTime"`
|
||||
State []schema.JobState `json:"state"`
|
||||
FlopsAnyAvg *FloatRange `json:"flopsAnyAvg"`
|
||||
MemBwAvg *FloatRange `json:"memBwAvg"`
|
||||
@@ -96,19 +79,6 @@ type JobsStatistics struct {
|
||||
HistNumNodes []*HistoPoint `json:"histNumNodes"`
|
||||
}
|
||||
|
||||
type MetricConfig struct {
|
||||
Name string `json:"name"`
|
||||
Unit string `json:"unit"`
|
||||
Scope schema.MetricScope `json:"scope"`
|
||||
Aggregation *string `json:"aggregation"`
|
||||
Timestep int `json:"timestep"`
|
||||
Peak *float64 `json:"peak"`
|
||||
Normal *float64 `json:"normal"`
|
||||
Caution *float64 `json:"caution"`
|
||||
Alert *float64 `json:"alert"`
|
||||
SubClusters []*SubClusterConfig `json:"subClusters"`
|
||||
}
|
||||
|
||||
type MetricFootprints struct {
|
||||
Metric string `json:"metric"`
|
||||
Data []schema.Float `json:"data"`
|
||||
@@ -137,47 +107,11 @@ type StringInput struct {
|
||||
EndsWith *string `json:"endsWith"`
|
||||
}
|
||||
|
||||
type SubCluster struct {
|
||||
Name string `json:"name"`
|
||||
Nodes string `json:"nodes"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
ProcessorType string `json:"processorType"`
|
||||
SocketsPerNode int `json:"socketsPerNode"`
|
||||
CoresPerSocket int `json:"coresPerSocket"`
|
||||
ThreadsPerCore int `json:"threadsPerCore"`
|
||||
FlopRateScalar int `json:"flopRateScalar"`
|
||||
FlopRateSimd int `json:"flopRateSimd"`
|
||||
MemoryBandwidth int `json:"memoryBandwidth"`
|
||||
Topology *Topology `json:"topology"`
|
||||
}
|
||||
|
||||
type SubClusterConfig struct {
|
||||
Name string `json:"name"`
|
||||
Peak float64 `json:"peak"`
|
||||
Normal float64 `json:"normal"`
|
||||
Caution float64 `json:"caution"`
|
||||
Alert float64 `json:"alert"`
|
||||
}
|
||||
|
||||
type TimeRange struct {
|
||||
From *time.Time `json:"from"`
|
||||
To *time.Time `json:"to"`
|
||||
}
|
||||
|
||||
type TimeRangeOutput struct {
|
||||
From time.Time `json:"from"`
|
||||
To time.Time `json:"to"`
|
||||
}
|
||||
|
||||
type Topology struct {
|
||||
Node []int `json:"node"`
|
||||
Socket [][]int `json:"socket"`
|
||||
MemoryDomain [][]int `json:"memoryDomain"`
|
||||
Die [][]int `json:"die"`
|
||||
Core [][]int `json:"core"`
|
||||
Accelerators []*Accelerator `json:"accelerators"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Username string `json:"username"`
|
||||
Name string `json:"name"`
|
||||
|
@@ -1,7 +1,3 @@
|
||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package graph
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
@@ -23,22 +19,82 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
func (r *clusterResolver) Partitions(ctx context.Context, obj *model.Cluster) ([]string, error) {
|
||||
// Partitions is the resolver for the partitions field.
|
||||
func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error) {
|
||||
return r.Repo.Partitions(obj.Name)
|
||||
}
|
||||
|
||||
// FilterRanges is the resolver for the filterRanges field.
|
||||
func (r *clusterResolver) FilterRanges(ctx context.Context, obj *schema.Cluster) (*schema.FilterRanges, error) {
|
||||
panic(fmt.Errorf("not implemented: FilterRanges - filterRanges"))
|
||||
}
|
||||
|
||||
// Duration is the resolver for the duration field.
|
||||
func (r *filterRangesResolver) Duration(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
|
||||
panic(fmt.Errorf("not implemented: Duration - duration"))
|
||||
}
|
||||
|
||||
// NumNodes is the resolver for the numNodes field.
|
||||
func (r *filterRangesResolver) NumNodes(ctx context.Context, obj *schema.FilterRanges) (*model.IntRangeOutput, error) {
|
||||
panic(fmt.Errorf("not implemented: NumNodes - numNodes"))
|
||||
}
|
||||
|
||||
// StartTime is the resolver for the startTime field.
|
||||
func (r *filterRangesResolver) StartTime(ctx context.Context, obj *schema.FilterRanges) (*model.TimeRangeOutput, error) {
|
||||
panic(fmt.Errorf("not implemented: StartTime - startTime"))
|
||||
}
|
||||
|
||||
// Tags is the resolver for the tags field.
|
||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||
return r.Repo.GetTags(&obj.ID)
|
||||
}
|
||||
|
||||
// MetaData is the resolver for the metaData field.
|
||||
func (r *jobResolver) MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) {
|
||||
return r.Repo.FetchMetadata(obj)
|
||||
}
|
||||
|
||||
// UserData is the resolver for the userData field.
|
||||
func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) {
|
||||
return auth.FetchUser(ctx, r.DB, obj.User)
|
||||
}
|
||||
|
||||
// Peak is the resolver for the peak field.
|
||||
func (r *metricConfigResolver) Peak(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Peak - peak"))
|
||||
}
|
||||
|
||||
// Normal is the resolver for the normal field.
|
||||
func (r *metricConfigResolver) Normal(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Normal - normal"))
|
||||
}
|
||||
|
||||
// Caution is the resolver for the caution field.
|
||||
func (r *metricConfigResolver) Caution(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Caution - caution"))
|
||||
}
|
||||
|
||||
// Alert is the resolver for the alert field.
|
||||
func (r *metricConfigResolver) Alert(ctx context.Context, obj *schema.MetricConfig) (*schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Alert - alert"))
|
||||
}
|
||||
|
||||
// Avg is the resolver for the avg field.
|
||||
func (r *metricStatisticsResolver) Avg(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Avg - avg"))
|
||||
}
|
||||
|
||||
// Min is the resolver for the min field.
|
||||
func (r *metricStatisticsResolver) Min(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Min - min"))
|
||||
}
|
||||
|
||||
// Max is the resolver for the max field.
|
||||
func (r *metricStatisticsResolver) Max(ctx context.Context, obj *schema.MetricStatistics) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Max - max"))
|
||||
}
|
||||
|
||||
// CreateTag is the resolver for the createTag field.
|
||||
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
|
||||
id, err := r.Repo.CreateTag(typeArg, name)
|
||||
if err != nil {
|
||||
@@ -48,11 +104,12 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s
|
||||
return &schema.Tag{ID: id, Type: typeArg, Name: name}, nil
|
||||
}
|
||||
|
||||
// DeleteTag is the resolver for the deleteTag field.
|
||||
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
|
||||
// The UI does not allow this currently anyways.
|
||||
panic(fmt.Errorf("not implemented"))
|
||||
panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
|
||||
}
|
||||
|
||||
// AddTagsToJob is the resolver for the addTagsToJob field.
|
||||
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
||||
jid, err := strconv.ParseInt(job, 10, 64)
|
||||
if err != nil {
|
||||
@@ -74,6 +131,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
|
||||
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
||||
jid, err := strconv.ParseInt(job, 10, 64)
|
||||
if err != nil {
|
||||
@@ -95,7 +153,9 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
||||
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
||||
|
||||
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -103,18 +163,22 @@ func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *queryResolver) Clusters(ctx context.Context) ([]*model.Cluster, error) {
|
||||
// Clusters is the resolver for the clusters field.
|
||||
func (r *queryResolver) Clusters(ctx context.Context) ([]*schema.Cluster, error) {
|
||||
return archive.Clusters, nil
|
||||
}
|
||||
|
||||
// Tags is the resolver for the tags field.
|
||||
func (r *queryResolver) Tags(ctx context.Context) ([]*schema.Tag, error) {
|
||||
return r.Repo.GetTags(nil)
|
||||
}
|
||||
|
||||
// User is the resolver for the user field.
|
||||
func (r *queryResolver) User(ctx context.Context, username string) (*model.User, error) {
|
||||
return auth.FetchUser(ctx, r.DB, username)
|
||||
}
|
||||
|
||||
// AllocatedNodes is the resolver for the allocatedNodes field.
|
||||
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
||||
data, err := r.Repo.AllocatedNodes(cluster)
|
||||
if err != nil {
|
||||
@@ -132,6 +196,7 @@ func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*
|
||||
return counts, nil
|
||||
}
|
||||
|
||||
// Job is the resolver for the job field.
|
||||
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
||||
numericId, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
@@ -143,13 +208,16 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user := auth.GetUser(ctx); user != nil && !user.HasRole(auth.RoleAdmin) && job.User != user.Username {
|
||||
if user := auth.GetUser(ctx); user != nil &&
|
||||
!user.HasRole(auth.RoleAdmin) &&
|
||||
job.User != user.Username {
|
||||
return nil, errors.New("you are not allowed to see this job")
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// JobMetrics is the resolver for the jobMetrics field.
|
||||
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) {
|
||||
job, err := r.Query().Job(ctx, id)
|
||||
if err != nil {
|
||||
@@ -178,10 +246,12 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
|
||||
return res, err
|
||||
}
|
||||
|
||||
// JobsFootprints is the resolver for the jobsFootprints field.
|
||||
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
||||
return r.jobsFootprints(ctx, filter, metrics)
|
||||
}
|
||||
|
||||
// Jobs is the resolver for the jobs field.
|
||||
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
|
||||
if page == nil {
|
||||
page = &model.PageRequest{
|
||||
@@ -203,10 +273,12 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
||||
return &model.JobResultList{Items: jobs, Count: &count}, nil
|
||||
}
|
||||
|
||||
// JobsStatistics is the resolver for the jobsStatistics field.
|
||||
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
||||
return r.jobsStatistics(ctx, filter, groupBy)
|
||||
}
|
||||
|
||||
// JobsCount is the resolver for the jobsCount field.
|
||||
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) {
|
||||
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit)
|
||||
if err != nil {
|
||||
@@ -223,10 +295,12 @@ func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
||||
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
|
||||
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX schema.Float, minY schema.Float, maxX schema.Float, maxY schema.Float) ([][]schema.Float, error) {
|
||||
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)
|
||||
}
|
||||
|
||||
// NodeMetrics is the resolver for the nodeMetrics field.
|
||||
func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) {
|
||||
user := auth.GetUser(ctx)
|
||||
if user != nil && !user.HasRole(auth.RoleAdmin) {
|
||||
@@ -267,19 +341,59 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
||||
return nodeMetrics, nil
|
||||
}
|
||||
|
||||
// Peak is the resolver for the peak field.
|
||||
func (r *subClusterConfigResolver) Peak(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Peak - peak"))
|
||||
}
|
||||
|
||||
// Normal is the resolver for the normal field.
|
||||
func (r *subClusterConfigResolver) Normal(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Normal - normal"))
|
||||
}
|
||||
|
||||
// Caution is the resolver for the caution field.
|
||||
func (r *subClusterConfigResolver) Caution(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Caution - caution"))
|
||||
}
|
||||
|
||||
// Alert is the resolver for the alert field.
|
||||
func (r *subClusterConfigResolver) Alert(ctx context.Context, obj *schema.SubClusterConfig) (schema.Float, error) {
|
||||
panic(fmt.Errorf("not implemented: Alert - alert"))
|
||||
}
|
||||
|
||||
// Cluster returns generated.ClusterResolver implementation.
|
||||
func (r *Resolver) Cluster() generated.ClusterResolver { return &clusterResolver{r} }
|
||||
|
||||
// FilterRanges returns generated.FilterRangesResolver implementation.
|
||||
func (r *Resolver) FilterRanges() generated.FilterRangesResolver { return &filterRangesResolver{r} }
|
||||
|
||||
// Job returns generated.JobResolver implementation.
|
||||
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
||||
|
||||
// MetricConfig returns generated.MetricConfigResolver implementation.
|
||||
func (r *Resolver) MetricConfig() generated.MetricConfigResolver { return &metricConfigResolver{r} }
|
||||
|
||||
// MetricStatistics returns generated.MetricStatisticsResolver implementation.
|
||||
func (r *Resolver) MetricStatistics() generated.MetricStatisticsResolver {
|
||||
return &metricStatisticsResolver{r}
|
||||
}
|
||||
|
||||
// Mutation returns generated.MutationResolver implementation.
|
||||
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
|
||||
|
||||
// Query returns generated.QueryResolver implementation.
|
||||
func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }
|
||||
|
||||
// SubClusterConfig returns generated.SubClusterConfigResolver implementation.
|
||||
func (r *Resolver) SubClusterConfig() generated.SubClusterConfigResolver {
|
||||
return &subClusterConfigResolver{r}
|
||||
}
|
||||
|
||||
type clusterResolver struct{ *Resolver }
|
||||
type filterRangesResolver struct{ *Resolver }
|
||||
type jobResolver struct{ *Resolver }
|
||||
type metricConfigResolver struct{ *Resolver }
|
||||
type metricStatisticsResolver struct{ *Resolver }
|
||||
type mutationResolver struct{ *Resolver }
|
||||
type queryResolver struct{ *Resolver }
|
||||
type subClusterConfigResolver struct{ *Resolver }
|
||||
|
@@ -200,7 +200,17 @@ func (r *queryResolver) jobsStatisticsHistogram(ctx context.Context, value strin
|
||||
const MAX_JOBS_FOR_ANALYSIS = 500
|
||||
|
||||
// Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full.
|
||||
func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
|
||||
func (r *queryResolver) rooflineHeatmap(
|
||||
ctx context.Context,
|
||||
filter []*model.JobFilter,
|
||||
rows int, cols int,
|
||||
minXF schema.Float, minYF schema.Float, maxXF schema.Float, maxYF schema.Float) ([][]schema.Float, error) {
|
||||
|
||||
var minX, minY, maxX, maxY float64
|
||||
minX = float64(minXF)
|
||||
minY = float64(minYF)
|
||||
maxX = float64(maxXF)
|
||||
maxY = float64(maxYF)
|
||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -211,9 +221,9 @@ func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilte
|
||||
|
||||
fcols, frows := float64(cols), float64(rows)
|
||||
minX, minY, maxX, maxY = math.Log10(minX), math.Log10(minY), math.Log10(maxX), math.Log10(maxY)
|
||||
tiles := make([][]float64, rows)
|
||||
tiles := make([][]schema.Float, rows)
|
||||
for i := range tiles {
|
||||
tiles[i] = make([]float64, cols)
|
||||
tiles[i] = make([]schema.Float, cols)
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
|
@@ -75,6 +75,7 @@ type ApiMetricData struct {
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
|
||||
|
||||
var config CCMetricStoreConfig
|
||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||
return err
|
||||
@@ -117,7 +118,10 @@ func (ccms *CCMetricStore) toLocalName(metric string) string {
|
||||
return metric
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStore) doRequest(ctx context.Context, body *ApiQueryRequest) (*ApiQueryResponse, error) {
|
||||
func (ccms *CCMetricStore) doRequest(
|
||||
ctx context.Context,
|
||||
body *ApiQueryRequest) (*ApiQueryResponse, error) {
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
||||
return nil, err
|
||||
@@ -148,7 +152,12 @@ func (ccms *CCMetricStore) doRequest(ctx context.Context, body *ApiQueryRequest)
|
||||
return &resBody, nil
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStore) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
|
||||
func (ccms *CCMetricStore) LoadData(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.JobData, error) {
|
||||
|
||||
topology := archive.GetSubCluster(job.Cluster, job.SubCluster).Topology
|
||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes)
|
||||
if err != nil {
|
||||
@@ -250,7 +259,11 @@ var (
|
||||
acceleratorString = string(schema.MetricScopeAccelerator)
|
||||
)
|
||||
|
||||
func (ccms *CCMetricStore) buildQueries(job *schema.Job, metrics []string, scopes []schema.MetricScope) ([]ApiQuery, []schema.MetricScope, error) {
|
||||
func (ccms *CCMetricStore) buildQueries(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope) ([]ApiQuery, []schema.MetricScope, error) {
|
||||
|
||||
queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources))
|
||||
topology := archive.GetSubCluster(job.Cluster, job.SubCluster).Topology
|
||||
assignedScope := []schema.MetricScope{}
|
||||
@@ -478,7 +491,11 @@ func (ccms *CCMetricStore) buildQueries(job *schema.Job, metrics []string, scope
|
||||
return queries, assignedScope, nil
|
||||
}
|
||||
|
||||
func (ccms *CCMetricStore) LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
func (ccms *CCMetricStore) LoadStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
|
||||
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -528,7 +545,13 @@ func (ccms *CCMetricStore) LoadStats(job *schema.Job, metrics []string, ctx cont
|
||||
}
|
||||
|
||||
// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known!
|
||||
func (ccms *CCMetricStore) LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
func (ccms *CCMetricStore) LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
scopes []schema.MetricScope,
|
||||
from, to time.Time,
|
||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
|
||||
req := ApiQueryRequest{
|
||||
Cluster: cluster,
|
||||
From: from.Unix(),
|
||||
@@ -611,6 +634,7 @@ func (ccms *CCMetricStore) LoadNodeData(cluster string, metrics, nodes []string,
|
||||
}
|
||||
|
||||
func intToStringSlice(is []int) []string {
|
||||
|
||||
ss := make([]string, len(is))
|
||||
for i, x := range is {
|
||||
ss[i] = strconv.Itoa(x)
|
||||
|
@@ -55,7 +55,11 @@ func (idb *InfluxDBv2DataRepository) epochToTime(epoch int64) time.Time {
|
||||
return time.Unix(epoch, 0)
|
||||
}
|
||||
|
||||
func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
|
||||
func (idb *InfluxDBv2DataRepository) LoadData(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.JobData, error) {
|
||||
|
||||
measurementsConds := make([]string, 0, len(metrics))
|
||||
for _, m := range metrics {
|
||||
@@ -234,7 +238,10 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
||||
return jobData, nil
|
||||
}
|
||||
|
||||
func (idb *InfluxDBv2DataRepository) LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
func (idb *InfluxDBv2DataRepository) LoadStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
|
||||
stats := map[string]map[string]schema.MetricStatistics{}
|
||||
|
||||
@@ -304,7 +311,13 @@ func (idb *InfluxDBv2DataRepository) LoadStats(job *schema.Job, metrics []string
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (idb *InfluxDBv2DataRepository) LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
func (idb *InfluxDBv2DataRepository) LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
scopes []schema.MetricScope,
|
||||
from, to time.Time,
|
||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
|
||||
// TODO : Implement to be used in Analysis- und System/Node-View
|
||||
log.Println(fmt.Sprintf("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes))
|
||||
|
||||
|
@@ -38,6 +38,7 @@ var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepos
|
||||
var useArchive bool
|
||||
|
||||
func Init(disableArchive bool) error {
|
||||
|
||||
useArchive = !disableArchive
|
||||
for _, cluster := range config.Keys.Clusters {
|
||||
if cluster.MetricDataRepository != nil {
|
||||
@@ -73,6 +74,7 @@ var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
||||
|
||||
// Fetches the metric data for a job.
|
||||
func LoadData(job *schema.Job,
|
||||
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.JobData, error) {
|
||||
@@ -151,7 +153,12 @@ func LoadData(job *schema.Job,
|
||||
}
|
||||
|
||||
// Used for the jobsFootprint GraphQL-Query. TODO: Rename/Generalize.
|
||||
func LoadAverages(job *schema.Job, metrics []string, data [][]schema.Float, ctx context.Context) error {
|
||||
func LoadAverages(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
data [][]schema.Float,
|
||||
ctx context.Context) error {
|
||||
|
||||
if job.State != schema.JobStateRunning && useArchive {
|
||||
return archive.LoadAveragesFromArchive(job, metrics, data)
|
||||
}
|
||||
@@ -184,7 +191,13 @@ func LoadAverages(job *schema.Job, metrics []string, data [][]schema.Float, ctx
|
||||
}
|
||||
|
||||
// Used for the node/system view. Returns a map of nodes to a map of metrics.
|
||||
func LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
func LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
scopes []schema.MetricScope,
|
||||
from, to time.Time,
|
||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
|
||||
repo, ok := metricDataRepos[cluster]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no metric data repository configured for '%s'", cluster)
|
||||
@@ -212,17 +225,26 @@ func LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.Metri
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func cacheKey(job *schema.Job, metrics []string, scopes []schema.MetricScope) string {
|
||||
func cacheKey(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope) string {
|
||||
|
||||
// Duration and StartTime do not need to be in the cache key as StartTime is less unique than
|
||||
// job.ID and the TTL of the cache entry makes sure it does not stay there forever.
|
||||
return fmt.Sprintf("%d(%s):[%v],[%v]",
|
||||
job.ID, job.State, metrics, scopes)
|
||||
}
|
||||
|
||||
// For /monitoring/job/<job> and some other places, flops_any and mem_bw need to be available at the scope 'node'.
|
||||
// If a job has a lot of nodes, statisticsSeries should be available so that a min/mean/max Graph can be used instead of
|
||||
// a lot of single lines.
|
||||
func prepareJobData(job *schema.Job, jobData schema.JobData, scopes []schema.MetricScope) {
|
||||
// For /monitoring/job/<job> and some other places, flops_any and mem_bw need
|
||||
// to be available at the scope 'node'. If a job has a lot of nodes,
|
||||
// statisticsSeries should be available so that a min/mean/max Graph can be
|
||||
// used instead of a lot of single lines.
|
||||
func prepareJobData(
|
||||
job *schema.Job,
|
||||
jobData schema.JobData,
|
||||
scopes []schema.MetricScope) {
|
||||
|
||||
const maxSeriesSize int = 15
|
||||
for _, scopes := range jobData {
|
||||
for _, jm := range scopes {
|
||||
@@ -249,6 +271,7 @@ func prepareJobData(job *schema.Job, jobData schema.JobData, scopes []schema.Met
|
||||
|
||||
// Writes a running job to the job-archive
|
||||
func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
|
||||
|
||||
allMetrics := make([]string, 0)
|
||||
metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
|
||||
for _, mc := range metricConfigs {
|
||||
|
@@ -23,14 +23,28 @@ func (tmdr *TestMetricDataRepository) Init(_ json.RawMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
|
||||
func (tmdr *TestMetricDataRepository) LoadData(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.JobData, error) {
|
||||
|
||||
return TestLoadDataCallback(job, metrics, scopes, ctx)
|
||||
}
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
func (tmdr *TestMetricDataRepository) LoadStats(
|
||||
job *schema.Job,
|
||||
metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
|
||||
panic("TODO")
|
||||
}
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
func (tmdr *TestMetricDataRepository) LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
scopes []schema.MetricScope,
|
||||
from, to time.Time,
|
||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
|
||||
panic("TODO")
|
||||
}
|
||||
|
@@ -167,11 +167,11 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
||||
return query
|
||||
}
|
||||
|
||||
func buildIntCondition(field string, cond *model.IntRange, query sq.SelectBuilder) sq.SelectBuilder {
|
||||
func buildIntCondition(field string, cond *schema.IntRange, query sq.SelectBuilder) sq.SelectBuilder {
|
||||
return query.Where(field+" BETWEEN ? AND ?", cond.From, cond.To)
|
||||
}
|
||||
|
||||
func buildTimeCondition(field string, cond *model.TimeRange, query sq.SelectBuilder) sq.SelectBuilder {
|
||||
func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBuilder) sq.SelectBuilder {
|
||||
if cond.From != nil && cond.To != nil {
|
||||
return query.Where(field+" BETWEEN ? AND ?", cond.From.Unix(), cond.To.Unix())
|
||||
} else if cond.From != nil {
|
||||
|
@@ -71,8 +71,8 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
from := time.Now().Add(-24 * time.Hour)
|
||||
recentShortJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
StartTime: &model.TimeRange{From: &from, To: nil},
|
||||
Duration: &model.IntRange{From: 0, To: graph.ShortJobDuration},
|
||||
StartTime: &schema.TimeRange{From: &from, To: nil},
|
||||
Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration},
|
||||
}}, nil, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
|
Reference in New Issue
Block a user