mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
cleanup and comments
This commit is contained in:
parent
f69c3945d4
commit
7be38277a9
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@ -14,6 +14,4 @@ jobs:
|
||||
run: |
|
||||
go build ./...
|
||||
go vet ./...
|
||||
go test .
|
||||
env BASEPATH="../" go test ./repository
|
||||
env BASEPATH="../" go test ./config
|
||||
go test ./...
|
||||
|
@ -1,4 +1,4 @@
|
||||
package main
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -9,14 +9,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/log"
|
||||
"github.com/ClusterCockpit/cc-backend/repository"
|
||||
"github.com/ClusterCockpit/cc-backend/schema"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// `AUTO_INCREMENT` is in a comment because of this hack:
|
||||
// https://stackoverflow.com/a/41028314 (sqlite creates unique ids automatically)
|
||||
const JOBS_DB_SCHEMA string = `
|
||||
const JobsDBSchema string = `
|
||||
DROP TABLE IF EXISTS jobtag;
|
||||
DROP TABLE IF EXISTS job;
|
||||
DROP TABLE IF EXISTS tag;
|
||||
@ -32,8 +31,8 @@ const JOBS_DB_SCHEMA string = `
|
||||
project VARCHAR(255) NOT NULL,
|
||||
` + "`partition`" + ` VARCHAR(255) NOT NULL, -- partition is a keyword in mysql -.-
|
||||
array_job_id BIGINT NOT NULL,
|
||||
duration INT,
|
||||
walltime INT,
|
||||
duration INT NOT NULL DEFAULT 0,
|
||||
walltime INT NOT NULL DEFAULT 0,
|
||||
job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
||||
meta_data TEXT, -- JSON
|
||||
resources TEXT NOT NULL, -- JSON
|
||||
@ -68,7 +67,8 @@ const JOBS_DB_SCHEMA string = `
|
||||
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||
`
|
||||
|
||||
const JOBS_DB_INDEXES string = `
|
||||
// Indexes are created after the job-archive is traversed for faster inserts.
|
||||
const JobsDbIndexes string = `
|
||||
CREATE INDEX job_by_user ON job (user);
|
||||
CREATE INDEX job_by_starttime ON job (start_time);
|
||||
CREATE INDEX job_by_job_id ON job (job_id);
|
||||
@ -77,12 +77,12 @@ const JOBS_DB_INDEXES string = `
|
||||
|
||||
// Delete the tables "job", "tag" and "jobtag" from the database and
|
||||
// repopulate them using the jobs found in `archive`.
|
||||
func initDB(db *sqlx.DB, archive string) error {
|
||||
func InitDB(db *sqlx.DB, archive string) error {
|
||||
starttime := time.Now()
|
||||
log.Print("Building job table...")
|
||||
|
||||
// Basic database structure:
|
||||
_, err := db.Exec(JOBS_DB_SCHEMA)
|
||||
_, err := db.Exec(JobsDBSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -96,16 +96,21 @@ func initDB(db *sqlx.DB, archive string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Inserts are bundled into transactions because in sqlite,
|
||||
// that speeds up inserts A LOT.
|
||||
tx, err := db.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.PrepareNamed(repository.NamedJobInsert)
|
||||
stmt, err := tx.PrepareNamed(NamedJobInsert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Not using log.Print because we want the line to end with `\r` and
|
||||
// this function is only ever called when a special command line flag
|
||||
// is passed anyways.
|
||||
fmt.Printf("%d jobs inserted...\r", 0)
|
||||
i := 0
|
||||
tags := make(map[string]int64)
|
||||
@ -159,6 +164,8 @@ func initDB(db *sqlx.DB, archive string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// For compability with the old job-archive directory structure where
|
||||
// there was no start time directory.
|
||||
for _, startTimeDir := range startTimeDirs {
|
||||
if startTimeDir.Type().IsRegular() && startTimeDir.Name() == "meta.json" {
|
||||
if err := handleDirectory(dirpath); err != nil {
|
||||
@ -180,7 +187,7 @@ func initDB(db *sqlx.DB, archive string) error {
|
||||
|
||||
// Create indexes after inserts so that they do not
|
||||
// need to be continually updated.
|
||||
if _, err := db.Exec(JOBS_DB_INDEXES); err != nil {
|
||||
if _, err := db.Exec(JobsDbIndexes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -226,7 +233,7 @@ func loadJob(tx *sqlx.Tx, stmt *sqlx.NamedStmt, tags map[string]int64, path stri
|
||||
return err
|
||||
}
|
||||
|
||||
if err := repository.SanityChecks(&job.BaseJob); err != nil {
|
||||
if err := SanityChecks(&job.BaseJob); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -262,11 +269,3 @@ func loadJob(tx *sqlx.Tx, stmt *sqlx.NamedStmt, tags map[string]int64, path stri
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadJobStat(job *schema.JobMeta, metric string) float64 {
|
||||
if stats, ok := job.Statistics[metric]; ok {
|
||||
return stats.Avg
|
||||
}
|
||||
|
||||
return 0.0
|
||||
}
|
@ -5,14 +5,17 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/test"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var db *sqlx.DB
|
||||
|
||||
func init() {
|
||||
db = test.InitDB()
|
||||
var err error
|
||||
db, err = sqlx.Open("sqlite3", "../test/test.db")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T) *JobRepository {
|
||||
|
128
routes.go
128
routes.go
@ -9,6 +9,9 @@ import (
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/auth"
|
||||
"github.com/ClusterCockpit/cc-backend/config"
|
||||
"github.com/ClusterCockpit/cc-backend/graph"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/log"
|
||||
"github.com/ClusterCockpit/cc-backend/schema"
|
||||
"github.com/ClusterCockpit/cc-backend/templates"
|
||||
"github.com/gorilla/mux"
|
||||
@ -24,6 +27,131 @@ type Route struct {
|
||||
Setup func(i InfoType, r *http.Request) InfoType
|
||||
}
|
||||
|
||||
var routes []Route = []Route{
|
||||
{"/", "home.tmpl", "ClusterCockpit", false, setupHomeRoute},
|
||||
{"/config", "config.tmpl", "Settings", false, func(i InfoType, r *http.Request) InfoType { return i }},
|
||||
{"/monitoring/jobs/", "monitoring/jobs.tmpl", "Jobs - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { return i }},
|
||||
{"/monitoring/job/{id:[0-9]+}", "monitoring/job.tmpl", "Job <ID> - ClusterCockpit", false, setupJobRoute},
|
||||
{"/monitoring/users/", "monitoring/list.tmpl", "Users - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "USER"; return i }},
|
||||
{"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }},
|
||||
{"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute},
|
||||
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
|
||||
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> - ClusterCockpit", false, setupClusterRoute},
|
||||
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
|
||||
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analaysis - ClusterCockpit", true, setupAnalysisRoute},
|
||||
}
|
||||
|
||||
func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
||||
type cluster struct {
|
||||
Name string
|
||||
RunningJobs int
|
||||
TotalJobs int
|
||||
RecentShortJobs int
|
||||
}
|
||||
|
||||
runningJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
State: []schema.JobState{schema.JobStateRunning},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
runningJobs = map[string]int{}
|
||||
}
|
||||
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
totalJobs = map[string]int{}
|
||||
}
|
||||
|
||||
from := time.Now().Add(-24 * time.Hour)
|
||||
recentShortJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
StartTime: &model.TimeRange{From: &from, To: nil},
|
||||
Duration: &model.IntRange{From: 0, To: graph.ShortJobDuration},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
recentShortJobs = map[string]int{}
|
||||
}
|
||||
|
||||
clusters := make([]cluster, 0)
|
||||
for _, c := range config.Clusters {
|
||||
clusters = append(clusters, cluster{
|
||||
Name: c.Name,
|
||||
RunningJobs: runningJobs[c.Name],
|
||||
TotalJobs: totalJobs[c.Name],
|
||||
RecentShortJobs: recentShortJobs[c.Name],
|
||||
})
|
||||
}
|
||||
|
||||
i["clusters"] = clusters
|
||||
return i
|
||||
}
|
||||
|
||||
func setupJobRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["id"] = mux.Vars(r)["id"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupUserRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["id"] = mux.Vars(r)["id"]
|
||||
i["username"] = mux.Vars(r)["id"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupClusterRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
if from != "" || to != "" {
|
||||
i["from"] = from
|
||||
i["to"] = to
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func setupNodeRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["cluster"] = vars["cluster"]
|
||||
i["hostname"] = vars["hostname"]
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
if from != "" || to != "" {
|
||||
i["from"] = from
|
||||
i["to"] = to
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func setupAnalysisRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["cluster"] = mux.Vars(r)["cluster"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
var username *string = nil
|
||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleAdmin) {
|
||||
username = &user.Username
|
||||
}
|
||||
|
||||
tags, counts, err := jobRepo.CountTags(username)
|
||||
tagMap := make(map[string][]map[string]interface{})
|
||||
if err != nil {
|
||||
log.Errorf("GetTags failed: %s", err.Error())
|
||||
i["tagmap"] = tagMap
|
||||
return i
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
tagItem := map[string]interface{}{
|
||||
"id": tag.ID,
|
||||
"name": tag.Name,
|
||||
"count": counts[tag.Name],
|
||||
}
|
||||
tagMap[tag.Type] = append(tagMap[tag.Type], tagItem)
|
||||
}
|
||||
i["tagmap"] = tagMap
|
||||
return i
|
||||
}
|
||||
|
||||
func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
filterPresets := map[string]interface{}{}
|
||||
|
||||
|
@ -12,6 +12,9 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Very simple and limited .env file reader.
|
||||
// All variable definitions found are directly
|
||||
// added to the processes environment.
|
||||
func loadEnv(file string) error {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
@ -74,6 +77,10 @@ func loadEnv(file string) error {
|
||||
return s.Err()
|
||||
}
|
||||
|
||||
// Changes the processes user and group to that
|
||||
// specified in the config.json. The go runtime
|
||||
// takes care of all threads (and not only the calling one)
|
||||
// executing the underlying systemcall.
|
||||
func dropPrivileges() error {
|
||||
if programConfig.Group != "" {
|
||||
g, err := user.LookupGroup(programConfig.Group)
|
||||
|
202
server.go
202
server.go
@ -25,11 +25,9 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/config"
|
||||
"github.com/ClusterCockpit/cc-backend/graph"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/generated"
|
||||
"github.com/ClusterCockpit/cc-backend/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/log"
|
||||
"github.com/ClusterCockpit/cc-backend/metricdata"
|
||||
"github.com/ClusterCockpit/cc-backend/repository"
|
||||
"github.com/ClusterCockpit/cc-backend/schema"
|
||||
"github.com/ClusterCockpit/cc-backend/templates"
|
||||
"github.com/google/gops/agent"
|
||||
"github.com/gorilla/handlers"
|
||||
@ -40,7 +38,6 @@ import (
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var db *sqlx.DB
|
||||
var jobRepo *repository.JobRepository
|
||||
|
||||
// Format of the configurartion (file). See below for the defaults.
|
||||
@ -127,147 +124,22 @@ var programConfig ProgramConfig = ProgramConfig{
|
||||
},
|
||||
}
|
||||
|
||||
func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
||||
type cluster struct {
|
||||
Name string
|
||||
RunningJobs int
|
||||
TotalJobs int
|
||||
RecentShortJobs int
|
||||
}
|
||||
|
||||
runningJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
State: []schema.JobState{schema.JobStateRunning},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
runningJobs = map[string]int{}
|
||||
}
|
||||
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
totalJobs = map[string]int{}
|
||||
}
|
||||
|
||||
from := time.Now().Add(-24 * time.Hour)
|
||||
recentShortJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||
StartTime: &model.TimeRange{From: &from, To: nil},
|
||||
Duration: &model.IntRange{From: 0, To: graph.ShortJobDuration},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("failed to count jobs: %s", err.Error())
|
||||
recentShortJobs = map[string]int{}
|
||||
}
|
||||
|
||||
clusters := make([]cluster, 0)
|
||||
for _, c := range config.Clusters {
|
||||
clusters = append(clusters, cluster{
|
||||
Name: c.Name,
|
||||
RunningJobs: runningJobs[c.Name],
|
||||
TotalJobs: totalJobs[c.Name],
|
||||
RecentShortJobs: recentShortJobs[c.Name],
|
||||
})
|
||||
}
|
||||
|
||||
i["clusters"] = clusters
|
||||
return i
|
||||
}
|
||||
|
||||
func setupJobRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["id"] = mux.Vars(r)["id"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupUserRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["id"] = mux.Vars(r)["id"]
|
||||
i["username"] = mux.Vars(r)["id"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupClusterRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["id"] = vars["cluster"]
|
||||
i["cluster"] = vars["cluster"]
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
if from != "" || to != "" {
|
||||
i["from"] = from
|
||||
i["to"] = to
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func setupNodeRoute(i InfoType, r *http.Request) InfoType {
|
||||
vars := mux.Vars(r)
|
||||
i["cluster"] = vars["cluster"]
|
||||
i["hostname"] = vars["hostname"]
|
||||
from, to := r.URL.Query().Get("from"), r.URL.Query().Get("to")
|
||||
if from != "" || to != "" {
|
||||
i["from"] = from
|
||||
i["to"] = to
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func setupAnalysisRoute(i InfoType, r *http.Request) InfoType {
|
||||
i["cluster"] = mux.Vars(r)["cluster"]
|
||||
return i
|
||||
}
|
||||
|
||||
func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||
var username *string = nil
|
||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleAdmin) {
|
||||
username = &user.Username
|
||||
}
|
||||
|
||||
tags, counts, err := jobRepo.CountTags(username)
|
||||
tagMap := make(map[string][]map[string]interface{})
|
||||
if err != nil {
|
||||
log.Errorf("GetTags failed: %s", err.Error())
|
||||
i["tagmap"] = tagMap
|
||||
return i
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
tagItem := map[string]interface{}{
|
||||
"id": tag.ID,
|
||||
"name": tag.Name,
|
||||
"count": counts[tag.Name],
|
||||
}
|
||||
tagMap[tag.Type] = append(tagMap[tag.Type], tagItem)
|
||||
}
|
||||
log.Infof("TAGS %+v", tags)
|
||||
i["tagmap"] = tagMap
|
||||
return i
|
||||
}
|
||||
|
||||
var routes []Route = []Route{
|
||||
{"/", "home.tmpl", "ClusterCockpit", false, setupHomeRoute},
|
||||
{"/config", "config.tmpl", "Settings", false, func(i InfoType, r *http.Request) InfoType { return i }},
|
||||
{"/monitoring/jobs/", "monitoring/jobs.tmpl", "Jobs - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { return i }},
|
||||
{"/monitoring/job/{id:[0-9]+}", "monitoring/job.tmpl", "Job <ID> - ClusterCockpit", false, setupJobRoute},
|
||||
{"/monitoring/users/", "monitoring/list.tmpl", "Users - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "USER"; return i }},
|
||||
{"/monitoring/projects/", "monitoring/list.tmpl", "Projects - ClusterCockpit", true, func(i InfoType, r *http.Request) InfoType { i["listType"] = "PROJECT"; return i }},
|
||||
{"/monitoring/tags/", "monitoring/taglist.tmpl", "Tags - ClusterCockpit", false, setupTaglistRoute},
|
||||
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
|
||||
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> - ClusterCockpit", false, setupClusterRoute},
|
||||
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
|
||||
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analaysis - ClusterCockpit", true, setupAnalysisRoute},
|
||||
}
|
||||
|
||||
func main() {
|
||||
var flagReinitDB, flagStopImmediately, flagSyncLDAP, flagGops bool
|
||||
var flagConfigFile, flagImportJob string
|
||||
var flagNewUser, flagDelUser, flagGenJWT string
|
||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize `job`, `tag`, and `jobtag` tables")
|
||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the `user` table with ldap")
|
||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
||||
flag.BoolVar(&flagStopImmediately, "no-server", false, "Do not start a server, stop right after initialization and argument handling")
|
||||
flag.BoolVar(&flagGops, "gops", false, "Enable a github.com/google/gops/agent")
|
||||
flag.StringVar(&flagConfigFile, "config", "", "Location of the config file for this server (overwrites the defaults)")
|
||||
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
||||
flag.StringVar(&flagConfigFile, "config", "", "Overwrite the global config options by those specified in `config.json`")
|
||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,api,user]:<password>`")
|
||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by username")
|
||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by the username")
|
||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
||||
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
||||
flag.Parse()
|
||||
|
||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||
if flagGops {
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
log.Fatalf("gops/agent.Listen failed: %s", err.Error())
|
||||
@ -291,18 +163,24 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// As a special case for `db`, allow using an environment variable instead of the value
|
||||
// stored in the config. This can be done for people having security concerns about storing
|
||||
// the password for their mysql database in the config.json.
|
||||
if strings.HasPrefix(programConfig.DB, "env:") {
|
||||
envvar := strings.TrimPrefix(programConfig.DB, "env:")
|
||||
programConfig.DB = os.Getenv(envvar)
|
||||
}
|
||||
|
||||
var err error
|
||||
var db *sqlx.DB
|
||||
if programConfig.DBDriver == "sqlite3" {
|
||||
db, err = sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", programConfig.DB))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// sqlite does not multithread. Having more than one connection open would just mean
|
||||
// waiting for locks.
|
||||
db.SetMaxOpenConns(1)
|
||||
} else if programConfig.DBDriver == "mysql" {
|
||||
db, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", programConfig.DB))
|
||||
@ -317,7 +195,9 @@ func main() {
|
||||
log.Fatalf("unsupported database driver: %s", programConfig.DBDriver)
|
||||
}
|
||||
|
||||
// Initialize sub-modules...
|
||||
// Initialize sub-modules and handle all command line flags.
|
||||
// The order here is important! For example, the metricdata package
|
||||
// depends on the config package.
|
||||
|
||||
var authentication *auth.Authentication
|
||||
if !programConfig.DisableAuthentication {
|
||||
@ -380,7 +260,7 @@ func main() {
|
||||
}
|
||||
|
||||
if flagReinitDB {
|
||||
if err := initDB(db, programConfig.JobArchive); err != nil {
|
||||
if err := repository.InitDB(db, programConfig.JobArchive); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -400,11 +280,13 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
// Build routes...
|
||||
// Setup the http.Handler/Router used by the server
|
||||
|
||||
resolver := &graph.Resolver{DB: db, Repo: jobRepo}
|
||||
graphQLEndpoint := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: resolver}))
|
||||
if os.Getenv("DEBUG") != "1" {
|
||||
// Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed.
|
||||
// The problem with this is that then, no more stacktrace is printed to stderr.
|
||||
graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error {
|
||||
switch e := err.(type) {
|
||||
case string:
|
||||
@ -417,7 +299,6 @@ func main() {
|
||||
})
|
||||
}
|
||||
|
||||
graphQLPlayground := playground.Handler("GraphQL playground", "/query")
|
||||
api := &api.RestApi{
|
||||
JobRepository: jobRepo,
|
||||
Resolver: resolver,
|
||||
@ -425,33 +306,21 @@ func main() {
|
||||
Authentication: authentication,
|
||||
}
|
||||
|
||||
handleGetLogin := func(rw http.ResponseWriter, r *http.Request) {
|
||||
templates.Render(rw, r, "login.tmpl", &templates.Page{
|
||||
Title: "Login",
|
||||
})
|
||||
}
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
templates.Render(rw, r, "404.tmpl", &templates.Page{
|
||||
Title: "Not found",
|
||||
})
|
||||
})
|
||||
|
||||
r.Handle("/playground", graphQLPlayground)
|
||||
|
||||
r.HandleFunc("/login", handleGetLogin).Methods(http.MethodGet)
|
||||
r.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) {
|
||||
templates.Render(rw, r, "login.tmpl", &templates.Page{Title: "Login"})
|
||||
}).Methods(http.MethodGet)
|
||||
r.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) {
|
||||
templates.Render(rw, r, "imprint.tmpl", &templates.Page{
|
||||
Title: "Imprint",
|
||||
})
|
||||
templates.Render(rw, r, "imprint.tmpl", &templates.Page{Title: "Imprint"})
|
||||
})
|
||||
r.HandleFunc("/privacy", func(rw http.ResponseWriter, r *http.Request) {
|
||||
templates.Render(rw, r, "privacy.tmpl", &templates.Page{
|
||||
Title: "Privacy",
|
||||
})
|
||||
templates.Render(rw, r, "privacy.tmpl", &templates.Page{Title: "Privacy"})
|
||||
})
|
||||
|
||||
// Some routes, such as /login or /query, should only be accessible to a user that is logged in.
|
||||
// Those should be mounted to this subrouter. If authentication is enabled, a middleware will prevent
|
||||
// any unauthenticated accesses.
|
||||
secured := r.PathPrefix("/").Subrouter()
|
||||
if !programConfig.DisableAuthentication {
|
||||
r.Handle("/login", authentication.Login(
|
||||
@ -490,8 +359,11 @@ func main() {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
r.Handle("/playground", playground.Handler("GraphQL playground", "/query"))
|
||||
secured.Handle("/query", graphQLEndpoint)
|
||||
|
||||
// Send a searchId and then reply with a redirect to a user or job.
|
||||
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
||||
if search := r.URL.Query().Get("searchId"); search != "" {
|
||||
job, username, err := api.JobRepository.FindJobOrUser(r.Context(), search)
|
||||
@ -515,6 +387,7 @@ func main() {
|
||||
}
|
||||
})
|
||||
|
||||
// Mount all /monitoring/... and /api/... routes.
|
||||
setupRoutes(secured, routes)
|
||||
api.MountRoutes(secured)
|
||||
|
||||
@ -525,11 +398,18 @@ func main() {
|
||||
handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}),
|
||||
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
|
||||
handlers.AllowedOrigins([]string{"*"})))
|
||||
handler := handlers.CustomLoggingHandler(log.InfoWriter, r, func(w io.Writer, params handlers.LogFormatterParams) {
|
||||
log.Finfof(w, "%s %s (%d, %.02fkb, %dms)",
|
||||
handler := handlers.CustomLoggingHandler(io.Discard, r, func(_ io.Writer, params handlers.LogFormatterParams) {
|
||||
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
|
||||
log.Infof("%s %s (%d, %.02fkb, %dms)",
|
||||
params.Request.Method, params.URL.RequestURI(),
|
||||
params.StatusCode, float32(params.Size)/1024,
|
||||
time.Since(params.TimeStamp).Milliseconds())
|
||||
} else {
|
||||
log.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||
params.Request.Method, params.URL.RequestURI(),
|
||||
params.StatusCode, float32(params.Size)/1024,
|
||||
time.Since(params.TimeStamp).Milliseconds())
|
||||
}
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -1,4 +1,4 @@
|
||||
package main
|
||||
package test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -21,13 +21,11 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/schema"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) *api.RestApi {
|
||||
if db != nil {
|
||||
panic("prefer using sub-tests (`t.Run`) or implement `cleanup` before calling setup twice.")
|
||||
}
|
||||
|
||||
const testclusterJson = `{
|
||||
"name": "testcluster",
|
||||
"subClusters": [
|
||||
@ -96,17 +94,17 @@ func setup(t *testing.T) *api.RestApi {
|
||||
}
|
||||
f.Close()
|
||||
|
||||
db, err = sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", dbfilepath))
|
||||
db, err := sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", dbfilepath))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(1)
|
||||
if _, err := db.Exec(JOBS_DB_SCHEMA); err != nil {
|
||||
if _, err := db.Exec(repository.JobsDBSchema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := config.Init(db, false, programConfig.UiDefaults, jobarchive); err != nil {
|
||||
if err := config.Init(db, false, map[string]interface{}{}, jobarchive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
26
test/db.go
26
test/db.go
@ -1,26 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func InitDB() *sqlx.DB {
|
||||
|
||||
bp := "./"
|
||||
ebp := os.Getenv("BASEPATH")
|
||||
|
||||
if ebp != "" {
|
||||
bp = ebp + "test/"
|
||||
}
|
||||
|
||||
db, err := sqlx.Open("sqlite3", bp+"test.db")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
Loading…
Reference in New Issue
Block a user