individual configurations per user

This commit is contained in:
Lou Knauer 2021-12-08 10:12:19 +01:00
parent 960b0245b2
commit 4ca0cba7cd
7 changed files with 160 additions and 49 deletions

View File

@ -3,63 +3,143 @@ package config
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"log" "log"
"net/http" "net/http"
"os" "os"
"path/filepath"
"sync" "sync"
"time"
"github.com/ClusterCockpit/cc-jobarchive/auth"
"github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/graph/model"
"github.com/jmoiron/sqlx"
) )
var db *sqlx.DB
var lock sync.RWMutex var lock sync.RWMutex
var config map[string]interface{} var uiDefaults map[string]interface{}
var Clusters []*model.Cluster var Clusters []*model.Cluster
const configFilePath string = "./var/ui.config.json" func Init(usersdb *sqlx.DB, authEnabled bool, uiConfig map[string]interface{}, jobArchive string) error {
db = usersdb
func init() { uiDefaults = uiConfig
lock.Lock() entries, err := os.ReadDir(jobArchive)
defer lock.Unlock()
bytes, err := os.ReadFile(configFilePath)
if err != nil { if err != nil {
log.Fatal(err) return err
} }
if err := json.Unmarshal(bytes, &config); err != nil { Clusters = []*model.Cluster{}
log.Fatal(err) for _, de := range entries {
bytes, err := os.ReadFile(filepath.Join(jobArchive, de.Name(), "cluster.json"))
if err != nil {
return err
}
var cluster model.Cluster
if err := json.Unmarshal(bytes, &cluster); err != nil {
return err
}
if cluster.FilterRanges.StartTime.To.IsZero() {
cluster.FilterRanges.StartTime.To = time.Unix(0, 0)
}
if cluster.ClusterID != de.Name() {
return fmt.Errorf("the file '%s/cluster.json' contains the clusterId '%s'", de.Name(), cluster.ClusterID)
}
Clusters = append(Clusters, &cluster)
}
if authEnabled {
_, err := db.Exec(`
CREATE TABLE IF NOT EXISTS configuration (
username varchar(255),
key varchar(255),
value varchar(255),
PRIMARY KEY (username, key),
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
if err != nil {
return err
} }
} }
// Call this function to change the current configuration. return nil
// `value` must be valid JSON. This This function is thread-safe. }
// Return the personalised UI config for the currently authenticated
// user or return the plain default config.
func GetUIConfig(r *http.Request) (map[string]interface{}, error) {
lock.RLock()
config := make(map[string]interface{}, len(uiDefaults))
for k, v := range uiDefaults {
config[k] = v
}
lock.RUnlock()
user := auth.GetUser(r.Context())
if user == nil {
return config, nil
}
rows, err := db.Query(`SELECT key, value FROM configuration WHERE configuration.username = ?`, user.Username)
if err != nil {
return nil, err
}
for rows.Next() {
var key, rawval string
if err := rows.Scan(&key, &rawval); err != nil {
return nil, err
}
var val interface{}
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
return nil, err
}
config[key] = val
}
return config, nil
}
// If the context does not have a user, update the global ui configuration without persisting it!
// If there is a (authenticated) user, update only his configuration.
func UpdateConfig(key, value string, ctx context.Context) error { func UpdateConfig(key, value string, ctx context.Context) error {
var v interface{} user := auth.GetUser(ctx)
if err := json.Unmarshal([]byte(value), &v); err != nil { if user == nil {
lock.RLock()
defer lock.RUnlock()
var val interface{}
if err := json.Unmarshal([]byte(value), &val); err != nil {
return err return err
} }
lock.Lock() uiDefaults[key] = val
defer lock.Unlock() return nil
config[key] = v
bytes, err := json.Marshal(config)
if err != nil {
return err
} }
if err := os.WriteFile(configFilePath, bytes, 0644); err != nil { if _, err := db.Exec(`REPLACE INTO configuration (username, key, value) VALUES (?, ?, ?)`,
user.Username, key, value); err != nil {
log.Printf("db.Exec: %s\n", err.Error())
return err return err
} }
return nil return nil
} }
// http.HandlerFunc compatible function that serves the current configuration as JSON // http.HandlerFunc compatible function that serves the current configuration as JSON.
// TODO: Use templates and stuff instead of this...
func ServeConfig(rw http.ResponseWriter, r *http.Request) { func ServeConfig(rw http.ResponseWriter, r *http.Request) {
lock.RLock() config, err := GetUIConfig(r)
defer lock.RUnlock() if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/json") rw.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(rw).Encode(config); err != nil { if err := json.NewEncoder(rw).Encode(config); err != nil {

View File

@ -7,3 +7,20 @@ type JobTag struct {
TagType string `json:"tagType" db:"tag_type"` TagType string `json:"tagType" db:"tag_type"`
TagName string `json:"tagName" db:"tag_name"` TagName string `json:"tagName" db:"tag_name"`
} }
type Cluster struct {
ClusterID string `json:"clusterID"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar int `json:"flopRateScalar"`
FlopRateSimd int `json:"flopRateSimd"`
MemoryBandwidth int `json:"memoryBandwidth"`
MetricConfig []*MetricConfig `json:"metricConfig"`
FilterRanges *FilterRanges `json:"filterRanges"`
MetricDataRepository *struct {
Kind string `json:"kind"`
Url string `json:"url"`
} `json:"metricDataRepository"`
}

View File

@ -11,19 +11,6 @@ import (
"github.com/ClusterCockpit/cc-jobarchive/schema" "github.com/ClusterCockpit/cc-jobarchive/schema"
) )
type Cluster struct {
ClusterID string `json:"clusterID"`
ProcessorType string `json:"processorType"`
SocketsPerNode int `json:"socketsPerNode"`
CoresPerSocket int `json:"coresPerSocket"`
ThreadsPerCore int `json:"threadsPerCore"`
FlopRateScalar int `json:"flopRateScalar"`
FlopRateSimd int `json:"flopRateSimd"`
MemoryBandwidth int `json:"memoryBandwidth"`
MetricConfig []*MetricConfig `json:"metricConfig"`
FilterRanges *FilterRanges `json:"filterRanges"`
}
type FilterRanges struct { type FilterRanges struct {
Duration *IntRangeOutput `json:"duration"` Duration *IntRangeOutput `json:"duration"`
NumNodes *IntRangeOutput `json:"numNodes"` NumNodes *IntRangeOutput `json:"numNodes"`

View File

@ -1,11 +1,14 @@
package graph package graph
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"regexp" "regexp"
"strings" "strings"
"time"
"github.com/ClusterCockpit/cc-jobarchive/auth"
"github.com/ClusterCockpit/cc-jobarchive/graph/model" "github.com/ClusterCockpit/cc-jobarchive/graph/model"
sq "github.com/Masterminds/squirrel" sq "github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
@ -37,13 +40,18 @@ func ScanJob(row Scannable) (*model.Job, error) {
return nil, err return nil, err
} }
if job.Duration == 0 && job.State == model.JobStateRunning {
job.Duration = int(time.Since(job.StartTime).Seconds())
}
job.Nodes = strings.Split(nodeList, ",") job.Nodes = strings.Split(nodeList, ",")
return job, nil return job, nil
} }
// Helper function for the `jobs` GraphQL-Query. Is also used elsewhere when a list of jobs is needed. // Helper function for the `jobs` GraphQL-Query. Is also used elsewhere when a list of jobs is needed.
func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) { func (r *Resolver) queryJobs(ctx context.Context, filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) {
query := sq.Select(JobTableCols...).From("job") query := sq.Select(JobTableCols...).From("job")
query = securityCheck(ctx, query)
if order != nil { if order != nil {
field := toSnakeCase(order.Field) field := toSnakeCase(order.Field)
@ -100,6 +108,20 @@ func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest
return jobs, count, nil return jobs, count, nil
} }
func securityCheck(ctx context.Context, query sq.SelectBuilder) sq.SelectBuilder {
val := ctx.Value(auth.ContextUserKey)
if val == nil {
return query
}
user := val.(*auth.User)
if user.IsAdmin {
return query
}
return query.Where("job.user_id = ?", user.Username)
}
// Build a sq.SelectBuilder out of a model.JobFilter. // Build a sq.SelectBuilder out of a model.JobFilter.
func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder { func buildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.SelectBuilder {
if filter.Tags != nil { if filter.Tags != nil {

View File

@ -151,7 +151,9 @@ func (r *queryResolver) Tags(ctx context.Context) ([]*model.JobTag, error) {
} }
func (r *queryResolver) Job(ctx context.Context, id string) (*model.Job, error) { func (r *queryResolver) Job(ctx context.Context, id string) (*model.Job, error) {
return ScanJob(sq.Select(JobTableCols...).From("job").Where("job.id = ?", id).RunWith(r.DB).QueryRow()) query := sq.Select(JobTableCols...).From("job").Where("job.id = ?", id)
query = securityCheck(ctx, query)
return ScanJob(query.RunWith(r.DB).QueryRow())
} }
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) { func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) {
@ -181,7 +183,7 @@ func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobF
} }
func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) { func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) {
jobs, count, err := r.queryJobs(filter, page, order) jobs, count, err := r.queryJobs(ctx, filter, page, order)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -47,6 +47,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
).From("job").Where("job.cluster_id = ?", cluster.ClusterID).GroupBy(col) ).From("job").Where("job.cluster_id = ?", cluster.ClusterID).GroupBy(col)
} }
query = securityCheck(ctx, query)
for _, f := range filter { for _, f := range filter {
query = buildWhereClause(f, query) query = buildWhereClause(f, query)
} }
@ -82,6 +83,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
if groupBy == nil { if groupBy == nil {
query := sq.Select("COUNT(job.id)").From("job").Where("job.duration < 120") query := sq.Select("COUNT(job.id)").From("job").Where("job.duration < 120")
query = securityCheck(ctx, query)
for _, f := range filter { for _, f := range filter {
query = buildWhereClause(f, query) query = buildWhereClause(f, query)
} }
@ -91,6 +93,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
} else { } else {
col := groupBy2column[*groupBy] col := groupBy2column[*groupBy]
query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < 120") query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < 120")
query = securityCheck(ctx, query)
for _, f := range filter { for _, f := range filter {
query = buildWhereClause(f, query) query = buildWhereClause(f, query)
} }
@ -133,12 +136,12 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
if histogramsNeeded { if histogramsNeeded {
var err error var err error
stat.HistWalltime, err = r.jobsStatisticsHistogram("ROUND(job.duration / 3600) as value", filter, id, col) stat.HistWalltime, err = r.jobsStatisticsHistogram(ctx, "ROUND(job.duration / 3600) as value", filter, id, col)
if err != nil { if err != nil {
return nil, err return nil, err
} }
stat.HistNumNodes, err = r.jobsStatisticsHistogram("job.num_nodes as value", filter, id, col) stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -150,8 +153,9 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
// `value` must be the column grouped by, but renamed to "value". `id` and `col` can optionally be used // `value` must be the column grouped by, but renamed to "value". `id` and `col` can optionally be used
// to add a condition to the query of the kind "<col> = <id>". // to add a condition to the query of the kind "<col> = <id>".
func (r *queryResolver) jobsStatisticsHistogram(value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) { func (r *queryResolver) jobsStatisticsHistogram(ctx context.Context, value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) {
query := sq.Select(value, "COUNT(job.id) AS count").From("job") query := sq.Select(value, "COUNT(job.id) AS count").From("job")
query = securityCheck(ctx, query)
for _, f := range filters { for _, f := range filters {
query = buildWhereClause(f, query) query = buildWhereClause(f, query)
} }
@ -179,7 +183,7 @@ func (r *queryResolver) jobsStatisticsHistogram(value string, filters []*model.J
// Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full. // Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full.
func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) { func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
jobs, count, err := r.queryJobs(filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) jobs, count, err := r.queryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -232,7 +236,7 @@ func (r *Resolver) rooflineHeatmap(ctx context.Context, filter []*model.JobFilte
// Helper function for the jobsFootprints GraphQL query placed here so that schema.resolvers.go is not too full. // Helper function for the jobsFootprints GraphQL query placed here so that schema.resolvers.go is not too full.
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) { func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.MetricFootprints, error) {
jobs, count, err := r.queryJobs(filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil) jobs, count, err := r.queryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: 501}, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1 +0,0 @@
{"analysis_view_histogramMetrics":["flops_any","mem_bw","mem_used"],"analysis_view_scatterPlotMetrics":[["flops_any","mem_bw"],["flops_any","cpu_load"],["cpu_load","mem_bw"]],"job_view_nodestats_selectedMetrics":["flops_any","mem_bw","mem_used"],"job_view_polarPlotMetrics":["flops_any","mem_bw","mem_used","net_bw","file_bw"],"job_view_selectedMetrics":["flops_any","mem_bw","mem_used"],"plot_general_colorBackground":true,"plot_general_colorscheme":["#00bfff","#0000ff","#ff00ff","#ff0000","#ff8000","#ffff00","#80ff00"],"plot_general_lineWidth":1,"plot_list_jobsPerPage":10,"plot_list_selectedMetrics":["cpu_load","mem_used","flops_any","mem_bw","clock"],"plot_view_plotsPerRow":4,"plot_view_showPolarplot":true,"plot_view_showRoofline":true,"plot_view_showStatTable":true}