Introduce clusters query and type

This commit is contained in:
Lou Knauer 2021-04-22 15:00:54 +02:00
parent 3004e2909a
commit 365b1a2066
7 changed files with 1063 additions and 2 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
job.db
job-data
cc-jobarchive
clusters

View File

@ -1,7 +1,8 @@
# Run server
* The server expects the SQLite Job database in `job.db`.
* The metric data as JSON is expected in `job-data/.../.../{data.json|meta.json}`
* The server expects the SQLite Job database in `./job.db`.
* The metric data as JSON is expected in `./job-data/.../.../data.json`.
* A JSON-description of the clusters is expected in `./clusters/*.json`.
* Run ```go run server.go```
* The GraphQL backend is located at http://localhost:8080/query/ .

File diff suppressed because it is too large Load Diff

View File

@ -30,3 +30,15 @@ type JobTag struct {
TagType string `db:"tag_type"`
TagName string `db:"tag_name"`
}
type Cluster struct {
ClusterID string `json:"cluster_id"`
ProcessorType string `json:"processor_type"`
SocketsPerNode int `json:"sockets_per_node"`
CoresPerSocket int `json:"cores_per_socket"`
ThreadsPerCore int `json:"threads_per_core"`
FlopRateScalar int `json:"flop_rate_scalar"`
FlopRateSimd int `json:"flop_rate_simd"`
MemoryBandwidth int `json:"memory_bandwidth"`
MetricConfig []MetricConfig `json:"metric_config"`
}

View File

@ -91,6 +91,16 @@ type JobsStatistics struct {
HistNumNodes []*HistoPoint `json:"histNumNodes"`
}
type MetricConfig struct {
Name string `json:"name"`
Unit string `json:"unit"`
Sampletime int `json:"sampletime"`
Peak int `json:"peak"`
Normal int `json:"normal"`
Caution int `json:"caution"`
Alert int `json:"alert"`
}
type OrderByInput struct {
Field string `json:"field"`
Order *SortDirectionEnum `json:"order"`

View File

@ -295,6 +295,31 @@ func (r *queryResolver) JobsStatistics(
return &stats, nil
}
func (r *queryResolver) Clusters(ctx context.Context) ([]*model.Cluster, error) {
files, err := os.ReadDir("./clusters");
if err != nil {
return nil, err
}
var clusters []*model.Cluster
for _, entry := range files {
f, err := os.ReadFile("./clusters/" + entry.Name())
if err != nil {
return nil, err
}
var cluster model.Cluster
err = json.Unmarshal(f, &cluster)
if err != nil {
return nil, err
}
clusters = append(clusters, &cluster)
}
return clusters, nil
}
func (r *queryResolver) JobMetrics(
ctx context.Context, jobId string,
metrics []*string) ([]*model.JobMetricWithName, error) {

View File

@ -19,6 +19,28 @@ type Job {
tags: [JobTag!]
}
type Cluster {
clusterID: String!
processorType: String!
socketsPerNode: Int!
coresPerSocket: Int!
threadsPerCore: Int!
flopRateScalar: Int!
flopRateSimd: Int!
memoryBandwidth: Int!
metricConfig: [MetricConfig!]!
}
type MetricConfig {
name: String!
unit: String!
sampletime: Int!
peak: Int!
normal: Int!
caution: Int!
alert: Int!
}
type JobMetric {
unit: String!
scope: JobMetricScope!
@ -51,6 +73,8 @@ type JobTag {
}
type Query {
clusters: [Cluster!]!
jobById(jobId: String!): Job
jobs(filter: JobFilterList, page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: JobFilterList): JobsStatistics!