mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-26 05:19:05 +01:00
Correct return of node data, revised code
-Scopes socket and core added as switch cases, only barebone
This commit is contained in:
parent
686e5eaa0e
commit
e4730a16eb
@ -53,12 +53,6 @@ func (idb *InfluxDBv2DataRepository) epochToTime(epoch int64) time.Time {
|
|||||||
|
|
||||||
func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
|
func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.JobData, error) {
|
||||||
|
|
||||||
// DEBUG
|
|
||||||
// log.Println("<< Requested Metrics >> ")
|
|
||||||
// log.Println(metrics)
|
|
||||||
// log.Println("<< Requested Scope >> ")
|
|
||||||
// log.Println(scopes)
|
|
||||||
|
|
||||||
measurementsConds := make([]string, 0, len(metrics))
|
measurementsConds := make([]string, 0, len(metrics))
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
|
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
|
||||||
@ -79,13 +73,12 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
|||||||
jobData := make(schema.JobData) // Empty Schema: map[<string>FIELD]map[<MetricScope>SCOPE]<*JobMetric>METRIC
|
jobData := make(schema.JobData) // Empty Schema: map[<string>FIELD]map[<MetricScope>SCOPE]<*JobMetric>METRIC
|
||||||
// Requested Scopes
|
// Requested Scopes
|
||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
|
|
||||||
// Query Influxdb
|
// Query Influxdb
|
||||||
query := ""
|
query := ""
|
||||||
|
|
||||||
switch scope {
|
switch scope {
|
||||||
case "node":
|
case "node":
|
||||||
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean, Set NULL to 0.0
|
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean, Set NULL to 0.0
|
||||||
|
// log.Println("Note: Scope 'node' requested. ")
|
||||||
query = fmt.Sprintf(`
|
query = fmt.Sprintf(`
|
||||||
from(bucket: "%s")
|
from(bucket: "%s")
|
||||||
|> range(start: %s, stop: %s)
|
|> range(start: %s, stop: %s)
|
||||||
@ -99,37 +92,22 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
|||||||
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
||||||
measurementsCond, hostsCond)
|
measurementsCond, hostsCond)
|
||||||
case "socket":
|
case "socket":
|
||||||
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean, Set NULL to 0.0
|
|
||||||
log.Println("Note: Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
log.Println("Note: Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
continue
|
continue
|
||||||
// query = fmt.Sprintf(`
|
|
||||||
// from(bucket: "%s")
|
|
||||||
// |> range(start: %s, stop: %s)
|
|
||||||
// |> filter(fn: (r) => %s )
|
|
||||||
// |> filter(fn: (r) => %s )
|
|
||||||
// |> drop(columns: ["_start", "_stop"])
|
|
||||||
// |> group(columns: ["hostname", "_measurement"])
|
|
||||||
// |> aggregateWindow(every: 60s, fn: mean)
|
|
||||||
// |> map(fn: (r) => (if exists r._value then {r with _value: r._value} else {r with _value: 0.0}))`,
|
|
||||||
// idb.bucket,
|
|
||||||
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
|
||||||
// measurementsCond, hostsCond)
|
|
||||||
case "core":
|
case "core":
|
||||||
// Get Finest Granularity
|
log.Println("Note: Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
log.Println("Note: Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
|
||||||
continue
|
continue
|
||||||
|
// Get Finest Granularity only, Set NULL to 0.0
|
||||||
// query = fmt.Sprintf(`
|
// query = fmt.Sprintf(`
|
||||||
// from(bucket: "%s")
|
// from(bucket: "%s")
|
||||||
// |> range(start: %s, stop: %s)
|
// |> range(start: %s, stop: %s)
|
||||||
// |> filter(fn: (r) => %s )
|
// |> filter(fn: (r) => %s )
|
||||||
// |> filter(fn: (r) => %s )
|
// |> filter(fn: (r) => %s )
|
||||||
// |> drop(columns: ["_start", "_stop"])
|
// |> drop(columns: ["_start", "_stop", "cluster"])
|
||||||
// |> group(columns: ["hostname", "_measurement"])
|
// |> map(fn: (r) => (if exists r._value then {r with _value: r._value} else {r with _value: 0.0}))`,
|
||||||
// |> aggregateWindow(every: 60s, fn: mean)
|
// idb.bucket,
|
||||||
// |> map(fn: (r) => (if exists r._value then {r with _value: r._value} else {r with _value: 0.0}))`,
|
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
||||||
// idb.bucket,
|
// measurementsCond, hostsCond)
|
||||||
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
|
||||||
// measurementsCond, hostsCond)
|
|
||||||
default:
|
default:
|
||||||
log.Println("Note: Unknown Scope requested: Will return 'node' scope. ")
|
log.Println("Note: Unknown Scope requested: Will return 'node' scope. ")
|
||||||
continue
|
continue
|
||||||
@ -141,7 +119,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init Metrics
|
// Init Metrics: Needs matching on scope level ...
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
jobMetric, ok := jobData[metric]
|
jobMetric, ok := jobData[metric]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -160,23 +138,53 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process Result: Time-Data
|
// Process Result: Time-Data
|
||||||
field, host, hostSeries := "", "", schema.Series{}
|
field, host, hostSeries := "", "", schema.Series{}
|
||||||
for rows.Next() {
|
// typeId := 0
|
||||||
row := rows.Record()
|
switch scope {
|
||||||
if ( host == "" || host != row.ValueByKey("hostname").(string) || rows.TableChanged() ) {
|
case "node":
|
||||||
if ( host != "" ) {
|
for rows.Next() {
|
||||||
// Append Series before reset
|
row := rows.Record()
|
||||||
jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
|
if ( host == "" || host != row.ValueByKey("hostname").(string) || rows.TableChanged() ) {
|
||||||
}
|
if ( host != "" ) {
|
||||||
field, host = row.Measurement(), row.ValueByKey("hostname").(string)
|
// Append Series before reset
|
||||||
hostSeries = schema.Series{
|
jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
|
||||||
Hostname: host,
|
}
|
||||||
Statistics: nil,
|
field, host = row.Measurement(), row.ValueByKey("hostname").(string)
|
||||||
Data: make([]schema.Float, 0),
|
hostSeries = schema.Series{
|
||||||
}
|
Hostname: host,
|
||||||
}
|
Statistics: nil,
|
||||||
val := row.Value().(float64)
|
Data: make([]schema.Float, 0),
|
||||||
hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
}
|
||||||
|
}
|
||||||
|
val := row.Value().(float64)
|
||||||
|
hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
||||||
|
}
|
||||||
|
case "socket":
|
||||||
|
continue
|
||||||
|
case "core":
|
||||||
|
continue
|
||||||
|
// Include Series.Id in hostSeries
|
||||||
|
// for rows.Next() {
|
||||||
|
// row := rows.Record()
|
||||||
|
// if ( host == "" || host != row.ValueByKey("hostname").(string) || typeId != row.ValueByKey("type-id").(int) || rows.TableChanged() ) {
|
||||||
|
// if ( host != "" ) {
|
||||||
|
// // Append Series before reset
|
||||||
|
// jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
|
||||||
|
// }
|
||||||
|
// field, host, typeId = row.Measurement(), row.ValueByKey("hostname").(string), row.ValueByKey("type-id").(int)
|
||||||
|
// hostSeries = schema.Series{
|
||||||
|
// Hostname: host,
|
||||||
|
// Id: &typeId,
|
||||||
|
// Statistics: nil,
|
||||||
|
// Data: make([]schema.Float, 0),
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// val := row.Value().(float64)
|
||||||
|
// hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
||||||
|
// }
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
|
||||||
}
|
}
|
||||||
// Append last Series
|
// Append last Series
|
||||||
jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
|
jobData[field][scope].Series = append(jobData[field][scope].Series, hostSeries)
|
||||||
@ -189,7 +197,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(job *schema.Job, metrics []string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope == "node" { // Only 'node' support yet
|
if scope == "node" { // No 'socket/core' support yet
|
||||||
for metric, nodes := range stats {
|
for metric, nodes := range stats {
|
||||||
// log.Println(fmt.Sprintf("<< Add Stats for : Field %s >>", metric))
|
// log.Println(fmt.Sprintf("<< Add Stats for : Field %s >>", metric))
|
||||||
for node, stats := range nodes {
|
for node, stats := range nodes {
|
||||||
|
Loading…
Reference in New Issue
Block a user