mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-27 05:49:04 +01:00
Extract DB queries from REST API
This commit is contained in:
parent
99865f4152
commit
d9aac00476
74
api/rest.go
74
api/rest.go
@ -19,14 +19,11 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/metricdata"
|
"github.com/ClusterCockpit/cc-backend/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/repository"
|
"github.com/ClusterCockpit/cc-backend/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/schema"
|
"github.com/ClusterCockpit/cc-backend/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type RestApi struct {
|
type RestApi struct {
|
||||||
r *repository.JobRepository
|
JobRepository *repository.JobRepository
|
||||||
DB *sqlx.DB
|
|
||||||
Resolver *graph.Resolver
|
Resolver *graph.Resolver
|
||||||
AsyncArchiving bool
|
AsyncArchiving bool
|
||||||
MachineStateDir string
|
MachineStateDir string
|
||||||
@ -157,12 +154,12 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
var tagId int64
|
var tagId int64
|
||||||
exists := false
|
exists := false
|
||||||
|
|
||||||
if exists, tagId = api.r.TagExists(tag.Type, tag.Name); exists {
|
if exists, tagId = api.JobRepository.TagExists(tag.Type, tag.Name); exists {
|
||||||
http.Error(rw, fmt.Sprintf("the tag '%s:%s' does not exist", tag.Type, tag.Name), http.StatusNotFound)
|
http.Error(rw, fmt.Sprintf("the tag '%s:%s' does not exist", tag.Type, tag.Name), http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.r.AddTag(job.JobID, tagId); err != nil {
|
if err := api.JobRepository.AddTag(job.JobID, tagId); err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -206,7 +203,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
||||||
rows, err := api.r.JobExists(req.JobID, req.Cluster, req.StartTime)
|
rows, err := api.JobRepository.JobExists(req.JobID, req.Cluster, req.StartTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
@ -229,13 +226,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := api.DB.NamedExec(`INSERT INTO job (
|
res, err := api.JobRepository.Add(req)
|
||||||
job_id, user, project, cluster, `+"`partition`"+`, array_job_id, num_nodes, num_hwthreads, num_acc,
|
|
||||||
exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data
|
|
||||||
) VALUES (
|
|
||||||
:job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
|
|
||||||
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data
|
|
||||||
);`, req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("insert into job table failed: %s", err.Error())
|
log.Errorf("insert into job table failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
@ -269,26 +260,15 @@ func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
var sql string
|
|
||||||
var args []interface{}
|
|
||||||
id, ok := mux.Vars(r)["id"]
|
id, ok := mux.Vars(r)["id"]
|
||||||
|
var job *schema.Job
|
||||||
|
var err error
|
||||||
if ok {
|
if ok {
|
||||||
sql, args, err = sq.Select(schema.JobColumns...).From("job").Where("job.id = ?", id).ToSql()
|
job, err = api.JobRepository.StopById(id)
|
||||||
} else {
|
} else {
|
||||||
qb := sq.Select(schema.JobColumns...).From("job").
|
job, err = api.JobRepository.Stop(*req.JobId, *req.Cluster, *req.StartTime)
|
||||||
Where("job.job_id = ?", req.JobId).
|
|
||||||
Where("job.cluster = ?", req.Cluster)
|
|
||||||
if req.StartTime != nil {
|
|
||||||
qb = qb.Where("job.start_time = ?", *req.StartTime)
|
|
||||||
}
|
}
|
||||||
sql, args, err = qb.ToSql()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
job, err := schema.ScanJob(api.DB.QueryRowx(sql, args...))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
@ -321,39 +301,7 @@ func (api *RestApi) stopJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
stmt := sq.Update("job").
|
api.JobRepository.Close(job.JobID, job.Duration, req.State, jobMeta.Statistics)
|
||||||
Set("job_state", req.State).
|
|
||||||
Set("duration", job.Duration).
|
|
||||||
Where("job.id = ?", job.ID)
|
|
||||||
|
|
||||||
for metric, stats := range jobMeta.Statistics {
|
|
||||||
switch metric {
|
|
||||||
case "flops_any":
|
|
||||||
stmt = stmt.Set("flops_any_avg", stats.Avg)
|
|
||||||
case "mem_used":
|
|
||||||
stmt = stmt.Set("mem_used_max", stats.Max)
|
|
||||||
case "mem_bw":
|
|
||||||
stmt = stmt.Set("mem_bw_avg", stats.Avg)
|
|
||||||
case "load":
|
|
||||||
stmt = stmt.Set("load_avg", stats.Avg)
|
|
||||||
case "net_bw":
|
|
||||||
stmt = stmt.Set("net_bw_avg", stats.Avg)
|
|
||||||
case "file_bw":
|
|
||||||
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sql, args, err := stmt.ToSql()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := api.DB.Exec(sql, args...); err != nil {
|
|
||||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("job stopped and archived (dbid: %d)", job.ID)
|
log.Printf("job stopped and archived (dbid: %d)", job.ID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@ package repository
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/log"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
@ -11,15 +13,152 @@ type JobRepository struct {
|
|||||||
DB *sqlx.DB
|
DB *sqlx.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) JobExists(jobId int64, cluster string, startTime int64) (rows *sql.Rows, err error) {
|
func (r *JobRepository) FindJobById(
|
||||||
|
jobId int64) (*schema.Job, error) {
|
||||||
|
sql, args, err := sq.Select(schema.JobColumns...).
|
||||||
|
From("job").Where("job.id = ?", jobId).ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
job, err := schema.ScanJob(r.DB.QueryRowx(sql, args...))
|
||||||
|
return job, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (r *JobRepository) FindJobsByFilter( ) ([]*schema.Job, int, error) {
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (r *JobRepository) FindJobByIdWithUser(
|
||||||
|
jobId int64,
|
||||||
|
username string) (*schema.Job, error) {
|
||||||
|
|
||||||
|
sql, args, err := sq.Select(schema.JobColumns...).
|
||||||
|
From("job").
|
||||||
|
Where("job.id = ?", jobId).
|
||||||
|
Where("job.user = ?", username).ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
job, err := schema.ScanJob(r.DB.QueryRowx(sql, args...))
|
||||||
|
return job, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) JobExists(
|
||||||
|
jobId int64,
|
||||||
|
cluster string,
|
||||||
|
startTime int64) (rows *sql.Rows, err error) {
|
||||||
rows, err = r.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ? AND job.cluster = ? AND job.start_time = ?`,
|
rows, err = r.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ? AND job.cluster = ? AND job.start_time = ?`,
|
||||||
jobId, cluster, startTime)
|
jobId, cluster, startTime)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) IdExists(jobId int64) bool {
|
func (r *JobRepository) Add(job schema.JobMeta) (res sql.Result, err error) {
|
||||||
|
res, err = r.DB.NamedExec(`INSERT INTO job (
|
||||||
|
job_id, user, project, cluster, `+"`partition`"+`, array_job_id, num_nodes, num_hwthreads, num_acc,
|
||||||
|
exclusive, monitoring_status, smt, job_state, start_time, duration, resources, meta_data
|
||||||
|
) VALUES (
|
||||||
|
:job_id, :user, :project, :cluster, :partition, :array_job_id, :num_nodes, :num_hwthreads, :num_acc,
|
||||||
|
:exclusive, :monitoring_status, :smt, :job_state, :start_time, :duration, :resources, :meta_data
|
||||||
|
);`, job)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) Stop(
|
||||||
|
jobId int64,
|
||||||
|
cluster string,
|
||||||
|
startTime int64) (job *schema.Job, err error) {
|
||||||
|
var sql string
|
||||||
|
var args []interface{}
|
||||||
|
qb := sq.Select(schema.JobColumns...).From("job").
|
||||||
|
Where("job.job_id = ?", jobId).
|
||||||
|
Where("job.cluster = ?", cluster)
|
||||||
|
if startTime != 0 {
|
||||||
|
qb = qb.Where("job.start_time = ?", startTime)
|
||||||
|
}
|
||||||
|
sql, args, err = qb.ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
job, err = schema.ScanJob(r.DB.QueryRowx(sql, args...))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) StopById(id string) (job *schema.Job, err error) {
|
||||||
|
var sql string
|
||||||
|
var args []interface{}
|
||||||
|
qb := sq.Select(schema.JobColumns...).From("job").
|
||||||
|
Where("job.id = ?", id)
|
||||||
|
sql, args, err = qb.ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
job, err = schema.ScanJob(r.DB.QueryRowx(sql, args...))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) Close(
|
||||||
|
jobId int64,
|
||||||
|
duration int32,
|
||||||
|
state schema.JobState,
|
||||||
|
metricStats map[string]schema.JobStatistics) {
|
||||||
|
|
||||||
|
stmt := sq.Update("job").
|
||||||
|
Set("job_state", state).
|
||||||
|
Set("duration", duration).
|
||||||
|
Where("job.id = ?", jobId)
|
||||||
|
|
||||||
|
for metric, stats := range metricStats {
|
||||||
|
switch metric {
|
||||||
|
case "flops_any":
|
||||||
|
stmt = stmt.Set("flops_any_avg", stats.Avg)
|
||||||
|
case "mem_used":
|
||||||
|
stmt = stmt.Set("mem_used_max", stats.Max)
|
||||||
|
case "mem_bw":
|
||||||
|
stmt = stmt.Set("mem_bw_avg", stats.Avg)
|
||||||
|
case "load":
|
||||||
|
stmt = stmt.Set("load_avg", stats.Avg)
|
||||||
|
case "net_bw":
|
||||||
|
stmt = stmt.Set("net_bw_avg", stats.Avg)
|
||||||
|
case "file_bw":
|
||||||
|
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sql, args, err := stmt.ToSql()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("archiving job (dbid: %d) failed: %s", jobId, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := r.DB.Exec(sql, args...); err != nil {
|
||||||
|
log.Errorf("archiving job (dbid: %d) failed: %s", jobId, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) findById(id string) (job *schema.Job, err error) {
|
||||||
|
var sql string
|
||||||
|
var args []interface{}
|
||||||
|
sql, args, err = sq.Select(schema.JobColumns...).From("job").Where("job.id = ?", id).ToSql()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
job, err = schema.ScanJob(r.DB.QueryRowx(sql, args...))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) Exists(jobId int64) bool {
|
||||||
|
rows, err := r.DB.Query(`SELECT job.id FROM job WHERE job.job_id = ?`, jobId)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if rows.Next() {
|
||||||
return true
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) AddTag(jobId int64, tagId int64) error {
|
func (r *JobRepository) AddTag(jobId int64, tagId int64) error {
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/graph/generated"
|
"github.com/ClusterCockpit/cc-backend/graph/generated"
|
||||||
"github.com/ClusterCockpit/cc-backend/log"
|
"github.com/ClusterCockpit/cc-backend/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/metricdata"
|
"github.com/ClusterCockpit/cc-backend/metricdata"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/templates"
|
"github.com/ClusterCockpit/cc-backend/templates"
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
@ -307,7 +308,7 @@ func main() {
|
|||||||
|
|
||||||
graphQLPlayground := playground.Handler("GraphQL playground", "/query")
|
graphQLPlayground := playground.Handler("GraphQL playground", "/query")
|
||||||
api := &api.RestApi{
|
api := &api.RestApi{
|
||||||
DB: db,
|
JobRepository: &repository.JobRepository{db},
|
||||||
AsyncArchiving: programConfig.AsyncArchiving,
|
AsyncArchiving: programConfig.AsyncArchiving,
|
||||||
Resolver: resolver,
|
Resolver: resolver,
|
||||||
MachineStateDir: programConfig.MachineStateDir,
|
MachineStateDir: programConfig.MachineStateDir,
|
||||||
|
Loading…
Reference in New Issue
Block a user