mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-26 05:19:05 +01:00
Change cluster.json to camelCase, go fmt
This commit is contained in:
parent
030f1a3fba
commit
2605623324
@ -1,10 +1,10 @@
|
|||||||
# Run server
|
# Run server
|
||||||
|
|
||||||
* The server expects the SQLite Job database in `./job.db`.
|
* The server expects the SQLite Job database in `./job.db`.
|
||||||
* The metric data as JSON is expected in `./job-data/.../.../data.json`.
|
* The metric data as JSON is expected in `./job-data/<clusterId>/.../data.json`.
|
||||||
* A JSON-description of the clusters is expected in `./clusters/*.json`.
|
* A JSON-description of the clusters is expected in `./job-data/<clusterId>/cluster.json`.
|
||||||
* Run ```go run server.go```
|
* Run `go run server.go`
|
||||||
* The GraphQL backend is located at http://localhost:8080/query/ .
|
* The GraphQL backend is located at http://localhost:8080/query .
|
||||||
|
|
||||||
# Debugging and Testing
|
# Debugging and Testing
|
||||||
|
|
||||||
|
@ -33,13 +33,13 @@ type JobTag struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
ClusterID string `json:"cluster_id"`
|
ClusterID string `json:"clusterID"`
|
||||||
ProcessorType string `json:"processor_type"`
|
ProcessorType string `json:"processorType"`
|
||||||
SocketsPerNode int `json:"sockets_per_node"`
|
SocketsPerNode int `json:"socketsPerNode"`
|
||||||
CoresPerSocket int `json:"cores_per_socket"`
|
CoresPerSocket int `json:"coresPerSocket"`
|
||||||
ThreadsPerCore int `json:"threads_per_core"`
|
ThreadsPerCore int `json:"threadsPerCore"`
|
||||||
FlopRateScalar int `json:"flop_rate_scalar"`
|
FlopRateScalar int `json:"flopRateScalar"`
|
||||||
FlopRateSimd int `json:"flop_rate_simd"`
|
FlopRateSimd int `json:"flopRateSimd"`
|
||||||
MemoryBandwidth int `json:"memory_bandwidth"`
|
MemoryBandwidth int `json:"memoryBandwidth"`
|
||||||
MetricConfig []MetricConfig `json:"metric_config"`
|
MetricConfig []MetricConfig `json:"metricConfig"`
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ func addTimeCondition(conditions []string, field string, input *model.TimeRange)
|
|||||||
|
|
||||||
func addFloatCondition(conditions []string, field string, input *model.FloatRange) []string {
|
func addFloatCondition(conditions []string, field string, input *model.FloatRange) []string {
|
||||||
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %f AND %f", field, input.From, input.To))
|
conditions = append(conditions, fmt.Sprintf("%s BETWEEN %f AND %f", field, input.From, input.To))
|
||||||
return conditions;
|
return conditions
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
|
func buildQueryConditions(filterList *model.JobFilterList) (string, string) {
|
||||||
@ -410,8 +410,8 @@ func (r *queryResolver) FilterRanges(
|
|||||||
panic("expected exactly one row")
|
panic("expected exactly one row")
|
||||||
}
|
}
|
||||||
|
|
||||||
duration := &model.IntRangeOutput{};
|
duration := &model.IntRangeOutput{}
|
||||||
numNodes := &model.IntRangeOutput{};
|
numNodes := &model.IntRangeOutput{}
|
||||||
var startTimeMin, startTimeMax int64
|
var startTimeMin, startTimeMax int64
|
||||||
|
|
||||||
err = rows.Scan(&duration.From, &duration.To,
|
err = rows.Scan(&duration.From, &duration.To,
|
||||||
@ -421,9 +421,9 @@ func (r *queryResolver) FilterRanges(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime := &model.TimeRangeOutput {
|
startTime := &model.TimeRangeOutput{
|
||||||
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0) }
|
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0)}
|
||||||
return &model.FilterRanges{ duration, numNodes, startTime }, nil
|
return &model.FilterRanges{duration, numNodes, startTime}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *jobResolver) Tags(ctx context.Context, job *model.Job) ([]*model.JobTag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, job *model.Job) ([]*model.JobTag, error) {
|
||||||
@ -468,8 +468,8 @@ func (r *clusterResolver) FilterRanges(
|
|||||||
panic("expected exactly one row")
|
panic("expected exactly one row")
|
||||||
}
|
}
|
||||||
|
|
||||||
duration := &model.IntRangeOutput{};
|
duration := &model.IntRangeOutput{}
|
||||||
numNodes := &model.IntRangeOutput{};
|
numNodes := &model.IntRangeOutput{}
|
||||||
var startTimeMin, startTimeMax int64
|
var startTimeMin, startTimeMax int64
|
||||||
|
|
||||||
err = rows.Scan(&duration.From, &duration.To,
|
err = rows.Scan(&duration.From, &duration.To,
|
||||||
@ -479,9 +479,9 @@ func (r *clusterResolver) FilterRanges(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime := &model.TimeRangeOutput {
|
startTime := &model.TimeRangeOutput{
|
||||||
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0) }
|
time.Unix(startTimeMin, 0), time.Unix(startTimeMax, 0)}
|
||||||
return &model.FilterRanges{ duration, numNodes, startTime }, nil
|
return &model.FilterRanges{duration, numNodes, startTime}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
func (r *Resolver) Job() generated.JobResolver { return &jobResolver{r} }
|
||||||
|
@ -36,7 +36,7 @@ func main() {
|
|||||||
r.Handle("/query", srv)
|
r.Handle("/query", srv)
|
||||||
|
|
||||||
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
|
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
|
||||||
log.Fatal(http.ListenAndServe("127.0.0.1:" + port,
|
log.Fatal(http.ListenAndServe("127.0.0.1:"+port,
|
||||||
handlers.CORS(handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}),
|
handlers.CORS(handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"}),
|
||||||
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
|
handlers.AllowedMethods([]string{"GET", "POST", "HEAD", "OPTIONS"}),
|
||||||
handlers.AllowedOrigins([]string{"*"}))(loggedRouter)))
|
handlers.AllowedOrigins([]string{"*"}))(loggedRouter)))
|
||||||
|
Loading…
Reference in New Issue
Block a user