mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-05-09 02:31:43 +02:00
Merge pull request #386 from ClusterCockpit/hotfix
Prepare re-release for v1.4.4
This commit is contained in:
commit
f65e122f8d
@ -6,6 +6,20 @@ This is a bug fix release of `cc-backend`, the API backend and frontend
|
||||
implementation of ClusterCockpit.
|
||||
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
|
||||
|
||||
## Breaking changes
|
||||
|
||||
The option `apiAllowedIPs` is now a required configuration attribute in
|
||||
`config.json`. This option restricts access to the admin API.
|
||||
|
||||
To retain the previous behavior that the API is per default accessible from
|
||||
everywhere set:
|
||||
|
||||
```json
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
]
|
||||
```
|
||||
|
||||
## Breaking changes for minor release 1.4.x
|
||||
|
||||
- You need to perform a database migration. Depending on your database size the
|
||||
|
@ -32,6 +32,18 @@ const configString = `
|
||||
"jwts": {
|
||||
"max-age": "2000h"
|
||||
},
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"enable-resampling": {
|
||||
"trigger": 30,
|
||||
"resolutions": [
|
||||
600,
|
||||
300,
|
||||
120,
|
||||
60
|
||||
]
|
||||
},
|
||||
"clusters": [
|
||||
{
|
||||
"name": "name",
|
||||
|
@ -17,6 +17,9 @@
|
||||
60
|
||||
]
|
||||
},
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"emission-constant": 317,
|
||||
"clusters": [
|
||||
{
|
||||
|
@ -1,50 +1,62 @@
|
||||
{
|
||||
"addr": "0.0.0.0:443",
|
||||
"ldap": {
|
||||
"url": "ldaps://test",
|
||||
"user_base": "ou=people,ou=hpc,dc=test,dc=de",
|
||||
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
|
||||
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
|
||||
"user_filter": "(&(objectclass=posixAccount))"
|
||||
},
|
||||
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
|
||||
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
|
||||
"user": "clustercockpit",
|
||||
"group": "clustercockpit",
|
||||
"archive": {
|
||||
"kind": "file",
|
||||
"path": "./var/job-archive"
|
||||
},
|
||||
"validate": true,
|
||||
"clusters": [
|
||||
{
|
||||
"name": "test",
|
||||
"metricDataRepository": {
|
||||
"kind": "cc-metric-store",
|
||||
"url": "http://localhost:8082",
|
||||
"token": "eyJhbGciOiJF-E-pQBQ"
|
||||
},
|
||||
"filterRanges": {
|
||||
"numNodes": {
|
||||
"from": 1,
|
||||
"to": 64
|
||||
},
|
||||
"duration": {
|
||||
"from": 0,
|
||||
"to": 86400
|
||||
},
|
||||
"startTime": {
|
||||
"from": "2022-01-01T00:00:00Z",
|
||||
"to": null
|
||||
}
|
||||
}
|
||||
"addr": "0.0.0.0:443",
|
||||
"ldap": {
|
||||
"url": "ldaps://test",
|
||||
"user_base": "ou=people,ou=hpc,dc=test,dc=de",
|
||||
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
|
||||
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
|
||||
"user_filter": "(&(objectclass=posixAccount))"
|
||||
},
|
||||
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
|
||||
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
|
||||
"user": "clustercockpit",
|
||||
"group": "clustercockpit",
|
||||
"archive": {
|
||||
"kind": "file",
|
||||
"path": "./var/job-archive"
|
||||
},
|
||||
"validate": false,
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "test",
|
||||
"metricDataRepository": {
|
||||
"kind": "cc-metric-store",
|
||||
"url": "http://localhost:8082",
|
||||
"token": "eyJhbGciOiJF-E-pQBQ"
|
||||
},
|
||||
"filterRanges": {
|
||||
"numNodes": {
|
||||
"from": 1,
|
||||
"to": 64
|
||||
},
|
||||
"duration": {
|
||||
"from": 0,
|
||||
"to": 86400
|
||||
},
|
||||
"startTime": {
|
||||
"from": "2022-01-01T00:00:00Z",
|
||||
"to": null
|
||||
}
|
||||
],
|
||||
"jwts": {
|
||||
"cookieName": "",
|
||||
"validateUser": false,
|
||||
"max-age": "2000h",
|
||||
"trustedIssuer": ""
|
||||
},
|
||||
"short-running-jobs-duration": 300
|
||||
}
|
||||
}
|
||||
],
|
||||
"jwts": {
|
||||
"cookieName": "",
|
||||
"validateUser": false,
|
||||
"max-age": "2000h",
|
||||
"trustedIssuer": ""
|
||||
},
|
||||
"enable-resampling": {
|
||||
"trigger": 30,
|
||||
"resolutions": [
|
||||
600,
|
||||
300,
|
||||
120,
|
||||
60
|
||||
]
|
||||
},
|
||||
"short-running-jobs-duration": 300
|
||||
}
|
||||
|
@ -45,6 +45,9 @@ func setup(t *testing.T) *api.RestApi {
|
||||
"jwts": {
|
||||
"max-age": "2m"
|
||||
},
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "testcluster",
|
||||
|
@ -45,6 +45,9 @@ func setup(t *testing.T) *repository.JobRepository {
|
||||
"jwts": {
|
||||
"max-age": "2m"
|
||||
},
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "testcluster",
|
||||
|
@ -674,57 +674,32 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
}
|
||||
}
|
||||
|
||||
// log.Debugf("Metric %s, Peak %f, Unit %s, Aggregation %s", metric, peak, unit, aggreg)
|
||||
// Make bins, see https://jereze.com/code/sql-histogram/
|
||||
|
||||
// log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
||||
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
|
||||
start := time.Now()
|
||||
jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, (metric + "_" + footprintStat))
|
||||
|
||||
crossJoinQuery := sq.Select(
|
||||
fmt.Sprintf(`max(%s) as max`, jm),
|
||||
fmt.Sprintf(`min(%s) as min`, jm),
|
||||
).From("job").Where(
|
||||
"JSON_VALID(footprint)",
|
||||
).Where(
|
||||
fmt.Sprintf(`%s is not null`, jm),
|
||||
).Where(
|
||||
fmt.Sprintf(`%s <= %f`, jm, peak),
|
||||
)
|
||||
|
||||
crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery)
|
||||
|
||||
if cjqerr != nil {
|
||||
return nil, cjqerr
|
||||
}
|
||||
|
||||
for _, f := range filters {
|
||||
crossJoinQuery = BuildWhereClause(f, crossJoinQuery)
|
||||
}
|
||||
|
||||
crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql()
|
||||
if sqlerr != nil {
|
||||
return nil, sqlerr
|
||||
}
|
||||
|
||||
binQuery := fmt.Sprintf(`CAST( (case when %s = value.max
|
||||
then value.max*0.999999999 else %s end - value.min) / (value.max -
|
||||
value.min) * %v as INTEGER )`, jm, jm, *bins)
|
||||
// Find Jobs' Value Bin Number: Divide Value by Peak, Multiply by RequestedBins, then CAST to INT: Gets Bin-Number of Job
|
||||
binQuery := fmt.Sprintf(`CAST(
|
||||
((case when json_extract(footprint, "$.%s") = %f then %f*0.999999999 else json_extract(footprint, "$.%s") end) / %f)
|
||||
* %v as INTEGER )`,
|
||||
(metric + "_" + footprintStat), peak, peak, (metric + "_" + footprintStat), peak, *bins)
|
||||
|
||||
mainQuery := sq.Select(
|
||||
fmt.Sprintf(`%s + 1 as bin`, binQuery),
|
||||
fmt.Sprintf(`count(%s) as count`, jm),
|
||||
fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery),
|
||||
fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery),
|
||||
).From("job").CrossJoin(
|
||||
fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs...,
|
||||
).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak))
|
||||
fmt.Sprintf(`count(*) as count`),
|
||||
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * %s as min`, peak, *bins, binQuery),
|
||||
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * (%s + 1) as max`, peak, *bins, binQuery),
|
||||
).From("job").Where(
|
||||
"JSON_VALID(footprint)",
|
||||
).Where(fmt.Sprintf(`json_extract(footprint, "$.%s") is not null and json_extract(footprint, "$.%s") <= %f`, (metric + "_" + footprintStat), (metric + "_" + footprintStat), peak))
|
||||
|
||||
// Only accessible Jobs...
|
||||
mainQuery, qerr := SecurityCheck(ctx, mainQuery)
|
||||
|
||||
if qerr != nil {
|
||||
return nil, qerr
|
||||
}
|
||||
|
||||
// Filters...
|
||||
for _, f := range filters {
|
||||
mainQuery = BuildWhereClause(f, mainQuery)
|
||||
}
|
||||
@ -738,32 +713,34 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup Array
|
||||
// Setup Return Array With Bin-Numbers for Match and Min/Max based on Peak
|
||||
points := make([]*model.MetricHistoPoint, 0)
|
||||
binStep := int(peak) / *bins
|
||||
for i := 1; i <= *bins; i++ {
|
||||
binMax := ((int(peak) / *bins) * i)
|
||||
binMin := ((int(peak) / *bins) * (i - 1))
|
||||
point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
|
||||
points = append(points, &point)
|
||||
binMin := (binStep * (i - 1))
|
||||
binMax := (binStep * i)
|
||||
epoint := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
|
||||
points = append(points, &epoint)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
point := model.MetricHistoPoint{}
|
||||
if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil {
|
||||
log.Warnf("Error while scanning rows for %s", jm)
|
||||
return nil, err // Totally bricks cc-backend if returned and if all metrics requested?
|
||||
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
|
||||
rpoint := model.MetricHistoPoint{}
|
||||
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
|
||||
log.Warnf("Error while scanning rows for %s", metric)
|
||||
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
|
||||
}
|
||||
|
||||
for _, e := range points {
|
||||
if e.Bin != nil && point.Bin != nil {
|
||||
if *e.Bin == *point.Bin {
|
||||
e.Count = point.Count
|
||||
if point.Min != nil {
|
||||
e.Min = point.Min
|
||||
}
|
||||
if point.Max != nil {
|
||||
e.Max = point.Max
|
||||
}
|
||||
if e.Bin != nil && rpoint.Bin != nil {
|
||||
if *e.Bin == *rpoint.Bin {
|
||||
e.Count = rpoint.Count
|
||||
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
|
||||
// if rpoint.Min != nil {
|
||||
// log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
||||
// }
|
||||
// if rpoint.Max != nil {
|
||||
// log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
||||
// }
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,9 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
||||
"jwts": {
|
||||
"max-age": "2m"
|
||||
},
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "testcluster",
|
||||
|
@ -492,6 +492,7 @@
|
||||
},
|
||||
"required": [
|
||||
"jwts",
|
||||
"clusters"
|
||||
"clusters",
|
||||
"apiAllowedIPs"
|
||||
]
|
||||
}
|
||||
|
@ -14,17 +14,20 @@ func TestValidateConfig(t *testing.T) {
|
||||
"jwts": {
|
||||
"max-age": "2m"
|
||||
},
|
||||
"clusters": [
|
||||
{
|
||||
"name": "testcluster",
|
||||
"metricDataRepository": {
|
||||
"kind": "cc-metric-store",
|
||||
"url": "localhost:8082"},
|
||||
"filterRanges": {
|
||||
"numNodes": { "from": 1, "to": 64 },
|
||||
"duration": { "from": 0, "to": 86400 },
|
||||
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
||||
}}]
|
||||
"apiAllowedIPs": [
|
||||
"*"
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "testcluster",
|
||||
"metricDataRepository": {
|
||||
"kind": "cc-metric-store",
|
||||
"url": "localhost:8082"},
|
||||
"filterRanges": {
|
||||
"numNodes": { "from": 1, "to": 64 },
|
||||
"duration": { "from": 0, "to": 86400 },
|
||||
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
||||
}}]
|
||||
}`)
|
||||
|
||||
if err := Validate(Config, bytes.NewReader(json)); err != nil {
|
||||
@ -33,7 +36,6 @@ func TestValidateConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateJobMeta(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestValidateCluster(t *testing.T) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user