mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-26 13:29:05 +01:00
Change cluster.json to camelCase, go fmt
This commit is contained in:
parent
030f1a3fba
commit
2605623324
@ -1,10 +1,10 @@
|
|||||||
# Run server
|
# Run server
|
||||||
|
|
||||||
* The server expects the SQLite Job database in `./job.db`.
|
* The server expects the SQLite Job database in `./job.db`.
|
||||||
* The metric data as JSON is expected in `./job-data/.../.../data.json`.
|
* The metric data as JSON is expected in `./job-data/<clusterId>/.../data.json`.
|
||||||
* A JSON-description of the clusters is expected in `./clusters/*.json`.
|
* A JSON-description of the clusters is expected in `./job-data/<clusterId>/cluster.json`.
|
||||||
* Run ```go run server.go```
|
* Run `go run server.go`
|
||||||
* The GraphQL backend is located at http://localhost:8080/query/ .
|
* The GraphQL backend is located at http://localhost:8080/query .
|
||||||
|
|
||||||
# Debugging and Testing
|
# Debugging and Testing
|
||||||
|
|
||||||
|
@ -33,13 +33,13 @@ type JobTag struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
ClusterID string `json:"cluster_id"`
|
ClusterID string `json:"clusterID"`
|
||||||
ProcessorType string `json:"processor_type"`
|
ProcessorType string `json:"processorType"`
|
||||||
SocketsPerNode int `json:"sockets_per_node"`
|
SocketsPerNode int `json:"socketsPerNode"`
|
||||||
CoresPerSocket int `json:"cores_per_socket"`
|
CoresPerSocket int `json:"coresPerSocket"`
|
||||||
ThreadsPerCore int `json:"threads_per_core"`
|
ThreadsPerCore int `json:"threadsPerCore"`
|
||||||
FlopRateScalar int `json:"flop_rate_scalar"`
|
FlopRateScalar int `json:"flopRateScalar"`
|
||||||
FlopRateSimd int `json:"flop_rate_simd"`
|
FlopRateSimd int `json:"flopRateSimd"`
|
||||||
MemoryBandwidth int `json:"memory_bandwidth"`
|
MemoryBandwidth int `json:"memoryBandwidth"`
|
||||||
MetricConfig []MetricConfig `json:"metric_config"`
|
MetricConfig []MetricConfig `json:"metricConfig"`
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ func addTimeCondition(conditions []string, field string, input *model.TimeRange)
|
|||||||
|
|
||||||
func addFloatCondition(conditions []string, field string, input *model.FloatRange) []string {
|
func addFloatCondition(conditions []string, field string, input *model.FloatRange) []string {
|
||||||
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %f AND %f", field, input.From, input.To))
|
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %f AND %f", field, input.From, input.To))
|
||||||
return conditions;
|
return conditions
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
|
func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
|
||||||
@ -410,8 +410,8 @@ func (r *queryResolver) FilterRanges(
|
|||||||
panic("expected exactly one row")
|
panic("expected exactly one row")
|
||||||
}
|
}
|
||||||
|
|
||||||
duration := &model.IntRangeOutput{};
|
duration := &model.IntRangeOutput{}
|
||||||
numNodes := &model.IntRangeOutput{};
|
numNodes := &model.IntRangeOutput{}
|
||||||
var startTimeMin, startTimeMax int64
|
var startTimeMin, startTimeMax int64
|
||||||
|
|
||||||
err = rows.Scan(&duration.From, &duration.To,
|
err = rows.Scan(&duration.From, &duration.To,
|
||||||
@ -468,8 +468,8 @@ func (r *clusterResolver) FilterRanges(
|
|||||||
panic("expected exactly one row")
|
panic("expected exactly one row")
|
||||||
}
|
}
|
||||||
|
|
||||||
duration := &model.IntRangeOutput{};
|
duration := &model.IntRangeOutput{}
|
||||||
numNodes := &model.IntRangeOutput{};
|
numNodes := &model.IntRangeOutput{}
|
||||||
var startTimeMin, startTimeMax int64
|
var startTimeMin, startTimeMax int64
|
||||||
|
|
||||||
err = rows.Scan(&duration.From, &duration.To,
|
err = rows.Scan(&duration.From, &duration.To,
|
||||||
|
Loading…
Reference in New Issue
Block a user