mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-24 12:29:05 +01:00
Remove loglevel notice
This commit is contained in:
parent
491baafd1d
commit
033598a656
@ -75,7 +75,7 @@ func main() {
|
|||||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
||||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
||||||
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
||||||
flag.StringVar(&flagLogLevel, "loglevel", "debug", "Sets the logging level: `[debug (default),info,notice,warn,err,fatal,crit]`")
|
flag.StringVar(&flagLogLevel, "loglevel", "debug", "Sets the logging level: `[debug (default),info,warn,err,fatal,crit]`")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if flagVersion {
|
if flagVersion {
|
||||||
|
@ -44,7 +44,7 @@ func (la *LdapAuthenticator) Init(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if interval == 0 {
|
if interval == 0 {
|
||||||
log.Note("Sync interval is zero")
|
log.Info("Sync interval is zero")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CCMetricStoreConfig struct {
|
type CCMetricStoreConfig struct {
|
||||||
@ -283,7 +283,7 @@ func (ccms *CCMetricStore) buildQueries(
|
|||||||
mc := archive.GetMetricConfig(job.Cluster, metric)
|
mc := archive.GetMetricConfig(job.Cluster, metric)
|
||||||
if mc == nil {
|
if mc == nil {
|
||||||
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
log.Notef("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,8 +14,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
)
|
)
|
||||||
@ -98,10 +98,10 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
|
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
|
||||||
measurementsCond, hostsCond)
|
measurementsCond, hostsCond)
|
||||||
case "socket":
|
case "socket":
|
||||||
log.Note("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
log.Info("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
continue
|
continue
|
||||||
case "core":
|
case "core":
|
||||||
log.Note(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
log.Info(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
continue
|
continue
|
||||||
// Get Finest Granularity only, Set NULL to 0.0
|
// Get Finest Granularity only, Set NULL to 0.0
|
||||||
// query = fmt.Sprintf(`
|
// query = fmt.Sprintf(`
|
||||||
@ -115,7 +115,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
||||||
// measurementsCond, hostsCond)
|
// measurementsCond, hostsCond)
|
||||||
default:
|
default:
|
||||||
log.Notef("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
||||||
continue
|
continue
|
||||||
// return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
|
// return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
|
||||||
}
|
}
|
||||||
@ -194,7 +194,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
// hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
||||||
// }
|
// }
|
||||||
default:
|
default:
|
||||||
log.Notef("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
||||||
continue
|
continue
|
||||||
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
|
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
|
||||||
}
|
}
|
||||||
@ -324,7 +324,7 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
|
|||||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
|
|
||||||
// TODO : Implement to be used in Analysis- und System/Node-View
|
// TODO : Implement to be used in Analysis- und System/Node-View
|
||||||
log.Notef("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
|
log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
|
||||||
|
|
||||||
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
|
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
|
||||||
}
|
}
|
||||||
|
@ -5,46 +5,46 @@
|
|||||||
package metricdata
|
package metricdata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"text/template"
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"net/http"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
promapi "github.com/prometheus/client_golang/api"
|
promapi "github.com/prometheus/client_golang/api"
|
||||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
promcfg "github.com/prometheus/common/config"
|
promcfg "github.com/prometheus/common/config"
|
||||||
promm "github.com/prometheus/common/model"
|
promm "github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PrometheusDataRepositoryConfig struct {
|
type PrometheusDataRepositoryConfig struct {
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
Username string `json:"username,omitempty"`
|
Username string `json:"username,omitempty"`
|
||||||
Suffix string `json:"suffix,omitempty"`
|
Suffix string `json:"suffix,omitempty"`
|
||||||
Templates map[string]string `json:"query-templates"`
|
Templates map[string]string `json:"query-templates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PrometheusDataRepository struct {
|
type PrometheusDataRepository struct {
|
||||||
client promapi.Client
|
client promapi.Client
|
||||||
queryClient promv1.API
|
queryClient promv1.API
|
||||||
suffix string
|
suffix string
|
||||||
templates map[string]*template.Template
|
templates map[string]*template.Template
|
||||||
}
|
}
|
||||||
|
|
||||||
type PromQLArgs struct {
|
type PromQLArgs struct {
|
||||||
Nodes string
|
Nodes string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Trie map[rune]Trie
|
type Trie map[rune]Trie
|
||||||
@ -60,10 +60,9 @@ func contains(s []schema.MetricScope, str schema.MetricScope) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func MinMaxMean(data []schema.Float) (float64, float64, float64) {
|
func MinMaxMean(data []schema.Float) (float64, float64, float64) {
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return 0.0, 0.0, 0.0
|
return 0.0, 0.0, 0.0
|
||||||
}
|
}
|
||||||
min := math.MaxFloat64
|
min := math.MaxFloat64
|
||||||
max := -math.MaxFloat64
|
max := -math.MaxFloat64
|
||||||
@ -75,81 +74,87 @@ func MinMaxMean(data []schema.Float) (float64, float64, float64) {
|
|||||||
}
|
}
|
||||||
sum += float64(val)
|
sum += float64(val)
|
||||||
n += 1
|
n += 1
|
||||||
if float64(val) > max {max = float64(val)}
|
if float64(val) > max {
|
||||||
if float64(val) < min {min = float64(val)}
|
max = float64(val)
|
||||||
|
}
|
||||||
|
if float64(val) < min {
|
||||||
|
min = float64(val)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return min, max, sum / n
|
return min, max, sum / n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Rewritten from
|
// Rewritten from
|
||||||
// https://github.com/ermanh/trieregex/blob/master/trieregex/trieregex.py
|
// https://github.com/ermanh/trieregex/blob/master/trieregex/trieregex.py
|
||||||
func nodeRegex(nodes []string) string {
|
func nodeRegex(nodes []string) string {
|
||||||
root := Trie{}
|
root := Trie{}
|
||||||
// add runes of each compute node to trie
|
// add runes of each compute node to trie
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
_trie := root
|
_trie := root
|
||||||
for _, c := range node {
|
for _, c := range node {
|
||||||
if _, ok := _trie[c]; !ok {_trie[c] = Trie{}}
|
if _, ok := _trie[c]; !ok {
|
||||||
_trie = _trie[c]
|
_trie[c] = Trie{}
|
||||||
}
|
}
|
||||||
_trie['*'] = Trie{}
|
_trie = _trie[c]
|
||||||
}
|
}
|
||||||
// recursively build regex from rune trie
|
_trie['*'] = Trie{}
|
||||||
var trieRegex func(trie Trie, reset bool) string
|
}
|
||||||
trieRegex = func(trie Trie, reset bool) string {
|
// recursively build regex from rune trie
|
||||||
if reset == true {
|
var trieRegex func(trie Trie, reset bool) string
|
||||||
trie = root
|
trieRegex = func(trie Trie, reset bool) string {
|
||||||
}
|
if reset == true {
|
||||||
if len(trie) == 0 {
|
trie = root
|
||||||
return ""
|
}
|
||||||
}
|
if len(trie) == 0 {
|
||||||
if len(trie) == 1 {
|
return ""
|
||||||
for key, _trie := range trie {
|
}
|
||||||
if key == '*' { return "" }
|
if len(trie) == 1 {
|
||||||
return regexp.QuoteMeta(string(key)) + trieRegex(_trie, false)
|
for key, _trie := range trie {
|
||||||
}
|
if key == '*' {
|
||||||
} else {
|
return ""
|
||||||
sequences := []string{}
|
}
|
||||||
for key, _trie := range trie {
|
return regexp.QuoteMeta(string(key)) + trieRegex(_trie, false)
|
||||||
if key != '*' {
|
}
|
||||||
sequences = append(sequences, regexp.QuoteMeta(string(key)) + trieRegex(_trie, false))
|
} else {
|
||||||
}
|
sequences := []string{}
|
||||||
}
|
for key, _trie := range trie {
|
||||||
sort.Slice(sequences, func(i, j int) bool {
|
if key != '*' {
|
||||||
return (-len(sequences[i]) < -len(sequences[j])) || (sequences[i] < sequences[j])
|
sequences = append(sequences, regexp.QuoteMeta(string(key))+trieRegex(_trie, false))
|
||||||
})
|
}
|
||||||
var result string
|
}
|
||||||
// single edge from this tree node
|
sort.Slice(sequences, func(i, j int) bool {
|
||||||
if len(sequences) == 1 {
|
return (-len(sequences[i]) < -len(sequences[j])) || (sequences[i] < sequences[j])
|
||||||
result = sequences[0]
|
})
|
||||||
if len(result) > 1 {
|
var result string
|
||||||
result = "(?:" + result + ")"
|
// single edge from this tree node
|
||||||
}
|
if len(sequences) == 1 {
|
||||||
// multiple edges, each length 1
|
result = sequences[0]
|
||||||
} else if s := strings.Join(sequences, ""); len(s) == len(sequences) {
|
if len(result) > 1 {
|
||||||
// char or numeric range
|
result = "(?:" + result + ")"
|
||||||
if len(s)-1 == int(s[len(s)-1]) - int(s[0]) {
|
}
|
||||||
result = fmt.Sprintf("[%c-%c]", s[0], s[len(s)-1])
|
// multiple edges, each length 1
|
||||||
// char or numeric set
|
} else if s := strings.Join(sequences, ""); len(s) == len(sequences) {
|
||||||
} else {
|
// char or numeric range
|
||||||
result = "[" + s + "]"
|
if len(s)-1 == int(s[len(s)-1])-int(s[0]) {
|
||||||
}
|
result = fmt.Sprintf("[%c-%c]", s[0], s[len(s)-1])
|
||||||
// multiple edges of different lengths
|
// char or numeric set
|
||||||
} else {
|
} else {
|
||||||
result = "(?:" + strings.Join(sequences, "|") + ")"
|
result = "[" + s + "]"
|
||||||
}
|
}
|
||||||
if _, ok := trie['*']; ok { result += "?"}
|
// multiple edges of different lengths
|
||||||
return result
|
} else {
|
||||||
}
|
result = "(?:" + strings.Join(sequences, "|") + ")"
|
||||||
return ""
|
}
|
||||||
}
|
if _, ok := trie['*']; ok {
|
||||||
return trieRegex(root, true)
|
result += "?"
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return trieRegex(root, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
||||||
var config PrometheusDataRepositoryConfig
|
var config PrometheusDataRepositoryConfig
|
||||||
// parse config
|
// parse config
|
||||||
@ -169,7 +174,7 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|||||||
}
|
}
|
||||||
// init client
|
// init client
|
||||||
client, err := promapi.NewClient(promapi.Config{
|
client, err := promapi.NewClient(promapi.Config{
|
||||||
Address: config.Url,
|
Address: config.Url,
|
||||||
RoundTripper: rt,
|
RoundTripper: rt,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -194,9 +199,6 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// TODO: respect scope argument
|
// TODO: respect scope argument
|
||||||
func (pdb *PrometheusDataRepository) FormatQuery(
|
func (pdb *PrometheusDataRepository) FormatQuery(
|
||||||
metric string,
|
metric string,
|
||||||
@ -226,42 +228,36 @@ func (pdb *PrometheusDataRepository) FormatQuery(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Convert PromAPI row to CC schema.Series
|
// Convert PromAPI row to CC schema.Series
|
||||||
func (pdb *PrometheusDataRepository) RowToSeries(
|
func (pdb *PrometheusDataRepository) RowToSeries(
|
||||||
from time.Time,
|
from time.Time,
|
||||||
step int64,
|
step int64,
|
||||||
steps int64,
|
steps int64,
|
||||||
row *promm.SampleStream) (schema.Series) {
|
row *promm.SampleStream) schema.Series {
|
||||||
ts := from.Unix()
|
ts := from.Unix()
|
||||||
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
||||||
// init array of expected length with NaN
|
// init array of expected length with NaN
|
||||||
values := make([]schema.Float, steps + 1)
|
values := make([]schema.Float, steps+1)
|
||||||
for i, _ := range values {
|
for i, _ := range values {
|
||||||
values[i] = schema.NaN
|
values[i] = schema.NaN
|
||||||
}
|
|
||||||
// copy recorded values from prom sample pair
|
|
||||||
for _, v := range row.Values {
|
|
||||||
idx := (v.Timestamp.Unix() - ts) / step
|
|
||||||
values[idx] = schema.Float(v.Value)
|
|
||||||
}
|
|
||||||
min, max, mean := MinMaxMean(values)
|
|
||||||
// output struct
|
|
||||||
return schema.Series{
|
|
||||||
Hostname: hostname,
|
|
||||||
Data: values,
|
|
||||||
Statistics: &schema.MetricStatistics{
|
|
||||||
Avg: mean,
|
|
||||||
Min: min,
|
|
||||||
Max: max,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// copy recorded values from prom sample pair
|
||||||
|
for _, v := range row.Values {
|
||||||
|
idx := (v.Timestamp.Unix() - ts) / step
|
||||||
|
values[idx] = schema.Float(v.Value)
|
||||||
|
}
|
||||||
|
min, max, mean := MinMaxMean(values)
|
||||||
|
// output struct
|
||||||
|
return schema.Series{
|
||||||
|
Hostname: hostname,
|
||||||
|
Data: values,
|
||||||
|
Statistics: &schema.MetricStatistics{
|
||||||
|
Avg: mean,
|
||||||
|
Min: min,
|
||||||
|
Max: max,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) LoadData(
|
func (pdb *PrometheusDataRepository) LoadData(
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
@ -270,7 +266,7 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
ctx context.Context) (schema.JobData, error) {
|
ctx context.Context) (schema.JobData, error) {
|
||||||
|
|
||||||
// TODO respect requested scope
|
// TODO respect requested scope
|
||||||
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode){
|
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
||||||
scopes = append(scopes, schema.MetricScopeNode)
|
scopes = append(scopes, schema.MetricScopeNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +281,9 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
|
|
||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func(){log.Notef("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
|
logOnce.Do(func() {
|
||||||
|
log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,10 +301,10 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
|
|
||||||
// ranged query over all job nodes
|
// ranged query over all job nodes
|
||||||
r := promv1.Range{
|
r := promv1.Range{
|
||||||
Start: from,
|
Start: from,
|
||||||
End: to,
|
End: to,
|
||||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
Step: time.Duration(metricConfig.Timestep * 1e9),
|
||||||
}
|
}
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -340,16 +338,13 @@ func (pdb *PrometheusDataRepository) LoadData(
|
|||||||
}
|
}
|
||||||
// sort by hostname to get uniform coloring
|
// sort by hostname to get uniform coloring
|
||||||
sort.Slice(jobMetric.Series, func(i, j int) bool {
|
sort.Slice(jobMetric.Series, func(i, j int) bool {
|
||||||
return (jobMetric.Series[i].Hostname < jobMetric.Series[j].Hostname)
|
return (jobMetric.Series[i].Hostname < jobMetric.Series[j].Hostname)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return jobData, nil
|
return jobData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// TODO change implementation to precomputed/cached stats
|
// TODO change implementation to precomputed/cached stats
|
||||||
func (pdb *PrometheusDataRepository) LoadStats(
|
func (pdb *PrometheusDataRepository) LoadStats(
|
||||||
job *schema.Job,
|
job *schema.Job,
|
||||||
@ -374,9 +369,6 @@ func (pdb *PrometheusDataRepository) LoadStats(
|
|||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
func (pdb *PrometheusDataRepository) LoadNodeData(
|
func (pdb *PrometheusDataRepository) LoadNodeData(
|
||||||
cluster string,
|
cluster string,
|
||||||
metrics, nodes []string,
|
metrics, nodes []string,
|
||||||
@ -393,7 +385,9 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
}
|
}
|
||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope != schema.MetricScopeNode {
|
if scope != schema.MetricScopeNode {
|
||||||
logOnce.Do(func(){log.Notef("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)})
|
logOnce.Do(func() {
|
||||||
|
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
@ -410,10 +404,10 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
|
|
||||||
// ranged query over all nodes
|
// ranged query over all nodes
|
||||||
r := promv1.Range{
|
r := promv1.Range{
|
||||||
Start: from,
|
Start: from,
|
||||||
End: to,
|
End: to,
|
||||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
Step: time.Duration(metricConfig.Timestep * 1e9),
|
||||||
}
|
}
|
||||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -440,8 +434,8 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
|||||||
Unit: metricConfig.Unit,
|
Unit: metricConfig.Unit,
|
||||||
Scope: scope,
|
Scope: scope,
|
||||||
Timestep: metricConfig.Timestep,
|
Timestep: metricConfig.Timestep,
|
||||||
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -339,7 +339,7 @@ func (r *JobRepository) CountGroupedJobs(ctx context.Context, aggreg model.Aggre
|
|||||||
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
|
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
|
||||||
runner = r.DB
|
runner = r.DB
|
||||||
default:
|
default:
|
||||||
log.Notef("CountGroupedJobs() Weight %v unknown.", *weight)
|
log.Infof("CountGroupedJobs() Weight %v unknown.", *weight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -407,7 +407,7 @@ func (r *JobRepository) MarkArchived(
|
|||||||
case "file_bw":
|
case "file_bw":
|
||||||
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
||||||
default:
|
default:
|
||||||
log.Notef("MarkArchived() Metric '%v' unknown", metric)
|
log.Infof("MarkArchived() Metric '%v' unknown", metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -633,7 +633,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rowsAffected > 0 {
|
if rowsAffected > 0 {
|
||||||
log.Notef("%d jobs have been marked as failed due to running too long", rowsAffected)
|
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -652,6 +652,7 @@ var groupBy2column = map[model.Aggregate]string{
|
|||||||
func (r *JobRepository) JobsStatistics(ctx context.Context,
|
func (r *JobRepository) JobsStatistics(ctx context.Context,
|
||||||
filter []*model.JobFilter,
|
filter []*model.JobFilter,
|
||||||
groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
// In case `groupBy` is nil (not used), the model.JobsStatistics used is at the key '' (empty string)
|
// In case `groupBy` is nil (not used), the model.JobsStatistics used is at the key '' (empty string)
|
||||||
stats := map[string]*model.JobsStatistics{}
|
stats := map[string]*model.JobsStatistics{}
|
||||||
@ -793,6 +794,7 @@ func (r *JobRepository) JobsStatistics(ctx context.Context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Timer %s", time.Since(start))
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
DebugWriter io.Writer = os.Stderr
|
DebugWriter io.Writer = os.Stderr
|
||||||
NoteWriter io.Writer = os.Stderr
|
|
||||||
InfoWriter io.Writer = os.Stderr
|
InfoWriter io.Writer = os.Stderr
|
||||||
WarnWriter io.Writer = os.Stderr
|
WarnWriter io.Writer = os.Stderr
|
||||||
ErrWriter io.Writer = os.Stderr
|
ErrWriter io.Writer = os.Stderr
|
||||||
@ -29,19 +28,17 @@ var (
|
|||||||
var (
|
var (
|
||||||
DebugPrefix string = "<7>[DEBUG] "
|
DebugPrefix string = "<7>[DEBUG] "
|
||||||
InfoPrefix string = "<6>[INFO] "
|
InfoPrefix string = "<6>[INFO] "
|
||||||
NotePrefix string = "<5>[NOTICE] "
|
|
||||||
WarnPrefix string = "<4>[WARNING] "
|
WarnPrefix string = "<4>[WARNING] "
|
||||||
ErrPrefix string = "<3>[ERROR] "
|
ErrPrefix string = "<3>[ERROR] "
|
||||||
CritPrefix string = "<2>[CRITICAL] "
|
CritPrefix string = "<2>[CRITICAL] "
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, 0)
|
DebugLog *log.Logger
|
||||||
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, 0)
|
InfoLog *log.Logger
|
||||||
NoteLog *log.Logger = log.New(NoteWriter, NotePrefix, log.Lshortfile)
|
WarnLog *log.Logger
|
||||||
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
ErrLog *log.Logger
|
||||||
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
CritLog *log.Logger
|
||||||
CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.Llongfile)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
/* CONFIG */
|
/* CONFIG */
|
||||||
@ -57,9 +54,6 @@ func Init(lvl string, logdate bool) {
|
|||||||
case "warn":
|
case "warn":
|
||||||
InfoWriter = io.Discard
|
InfoWriter = io.Discard
|
||||||
fallthrough
|
fallthrough
|
||||||
case "notice":
|
|
||||||
NoteWriter = io.Discard
|
|
||||||
fallthrough
|
|
||||||
case "info":
|
case "info":
|
||||||
DebugWriter = io.Discard
|
DebugWriter = io.Discard
|
||||||
case "debug":
|
case "debug":
|
||||||
@ -72,15 +66,13 @@ func Init(lvl string, logdate bool) {
|
|||||||
|
|
||||||
if !logdate {
|
if !logdate {
|
||||||
DebugLog = log.New(DebugWriter, DebugPrefix, 0)
|
DebugLog = log.New(DebugWriter, DebugPrefix, 0)
|
||||||
InfoLog = log.New(InfoWriter, InfoPrefix, 0)
|
InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile)
|
||||||
NoteLog = log.New(NoteWriter, NotePrefix, log.Lshortfile)
|
|
||||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
||||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
||||||
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
|
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
|
||||||
} else {
|
} else {
|
||||||
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
||||||
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags)
|
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
||||||
NoteLog = log.New(NoteWriter, NotePrefix, log.LstdFlags|log.Lshortfile)
|
|
||||||
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
||||||
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
||||||
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
||||||
@ -108,10 +100,6 @@ func Info(v ...interface{}) {
|
|||||||
InfoLog.Output(2, printStr(v...))
|
InfoLog.Output(2, printStr(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Note(v ...interface{}) {
|
|
||||||
NoteLog.Output(2, printStr(v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Warn(v ...interface{}) {
|
func Warn(v ...interface{}) {
|
||||||
WarnLog.Output(2, printStr(v...))
|
WarnLog.Output(2, printStr(v...))
|
||||||
}
|
}
|
||||||
@ -157,10 +145,6 @@ func Infof(format string, v ...interface{}) {
|
|||||||
InfoLog.Output(2, printfStr(format, v...))
|
InfoLog.Output(2, printfStr(format, v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Notef(format string, v ...interface{}) {
|
|
||||||
NoteLog.Output(2, printfStr(format, v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Warnf(format string, v ...interface{}) {
|
func Warnf(format string, v ...interface{}) {
|
||||||
WarnLog.Output(2, printfStr(format, v...))
|
WarnLog.Output(2, printfStr(format, v...))
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user