mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 12:59:06 +01:00
Fixes; Get Clusters from config.go
This commit is contained in:
parent
8e5e278ea4
commit
2c81a96133
@ -1,6 +1,6 @@
|
||||
# ClusterCockpit with a Golang backend (Only supports archived jobs)
|
||||
|
||||
[![Build](https://github.com/ClusterCockpit/cc-metric-store/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-metric-store/actions/workflows/test.yml)
|
||||
[![Build](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-jobarchive/actions/workflows/test.yml)
|
||||
|
||||
### Run server
|
||||
|
||||
|
@ -16,18 +16,17 @@ import (
|
||||
// It serves as dependency injection for your app, add any dependencies you require here.
|
||||
|
||||
type Resolver struct {
|
||||
DB *sqlx.DB
|
||||
ClusterConfigs []*model.Cluster
|
||||
DB *sqlx.DB
|
||||
}
|
||||
|
||||
var jobTableCols []string = []string{"id", "job_id", "user_id", "project_id", "cluster_id", "start_time", "duration", "job_state", "num_nodes", "node_list", "flops_any_avg", "mem_bw_avg", "net_bw_avg", "file_bw_avg", "load_avg"}
|
||||
var JobTableCols []string = []string{"id", "job_id", "user_id", "project_id", "cluster_id", "start_time", "duration", "job_state", "num_nodes", "node_list", "flops_any_avg", "mem_bw_avg", "net_bw_avg", "file_bw_avg", "load_avg"}
|
||||
|
||||
type Scannable interface {
|
||||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
// Helper function for scanning jobs with the `jobTableCols` columns selected.
|
||||
func scanJob(row Scannable) (*model.Job, error) {
|
||||
func ScanJob(row Scannable) (*model.Job, error) {
|
||||
job := &model.Job{HasProfile: true}
|
||||
|
||||
var nodeList string
|
||||
@ -44,7 +43,7 @@ func scanJob(row Scannable) (*model.Job, error) {
|
||||
|
||||
// Helper function for the `jobs` GraphQL-Query. Is also used elsewhere when a list of jobs is needed.
|
||||
func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) ([]*model.Job, int, error) {
|
||||
query := sq.Select(jobTableCols...).From("job")
|
||||
query := sq.Select(JobTableCols...).From("job")
|
||||
|
||||
if order != nil {
|
||||
field := toSnakeCase(order.Field)
|
||||
@ -76,7 +75,7 @@ func (r *Resolver) queryJobs(filters []*model.JobFilter, page *model.PageRequest
|
||||
|
||||
jobs := make([]*model.Job, 0, 50)
|
||||
for rows.Next() {
|
||||
job, err := scanJob(rows)
|
||||
job, err := ScanJob(rows)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string,
|
||||
}
|
||||
|
||||
func (r *queryResolver) Clusters(ctx context.Context) ([]*model.Cluster, error) {
|
||||
return r.ClusterConfigs, nil
|
||||
return config.Clusters, nil
|
||||
}
|
||||
|
||||
func (r *queryResolver) Tags(ctx context.Context) ([]*model.JobTag, error) {
|
||||
@ -151,7 +151,7 @@ func (r *queryResolver) Tags(ctx context.Context) ([]*model.JobTag, error) {
|
||||
}
|
||||
|
||||
func (r *queryResolver) Job(ctx context.Context, id string) (*model.Job, error) {
|
||||
return scanJob(sq.Select(jobTableCols...).From("job").Where("job.id = ?", id).RunWith(r.DB).QueryRow())
|
||||
return ScanJob(sq.Select(JobTableCols...).From("job").Where("job.id = ?", id).RunWith(r.DB).QueryRow())
|
||||
}
|
||||
|
||||
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string) ([]*model.JobMetricWithName, error) {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"math"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/ClusterCockpit/cc-jobarchive/config"
|
||||
"github.com/ClusterCockpit/cc-jobarchive/graph/model"
|
||||
"github.com/ClusterCockpit/cc-jobarchive/metricdata"
|
||||
"github.com/ClusterCockpit/cc-jobarchive/schema"
|
||||
@ -26,7 +27,7 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
|
||||
stats := map[string]*model.JobsStatistics{}
|
||||
|
||||
// `socketsPerNode` and `coresPerSocket` can differ from cluster to cluster, so we need to explicitly loop over those.
|
||||
for _, cluster := range r.ClusterConfigs {
|
||||
for _, cluster := range config.Clusters {
|
||||
corehoursCol := fmt.Sprintf("SUM(job.duration * job.num_nodes * %d * %d) / 3600", cluster.SocketsPerNode, cluster.CoresPerSocket)
|
||||
var query sq.SelectBuilder
|
||||
if groupBy == nil {
|
||||
@ -89,7 +90,11 @@ func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobF
|
||||
}
|
||||
} else {
|
||||
col := groupBy2column[*groupBy]
|
||||
rows, err := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < 120").RunWith(r.DB).Query()
|
||||
query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < 120")
|
||||
for _, f := range filter {
|
||||
query = buildWhereClause(f, query)
|
||||
}
|
||||
rows, err := query.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user