mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-05-09 10:41:42 +02:00
Merge pull request #386 from ClusterCockpit/hotfix
Prepare re-release for v1.4.4
This commit is contained in:
commit
f65e122f8d
@ -6,6 +6,20 @@ This is a bug fix release of `cc-backend`, the API backend and frontend
|
|||||||
implementation of ClusterCockpit.
|
implementation of ClusterCockpit.
|
||||||
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
|
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
|
||||||
|
|
||||||
|
## Breaking changes
|
||||||
|
|
||||||
|
The option `apiAllowedIPs` is now a required configuration attribute in
|
||||||
|
`config.json`. This option restricts access to the admin API.
|
||||||
|
|
||||||
|
To retain the previous behavior that the API is per default accessible from
|
||||||
|
everywhere set:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
## Breaking changes for minor release 1.4.x
|
## Breaking changes for minor release 1.4.x
|
||||||
|
|
||||||
- You need to perform a database migration. Depending on your database size the
|
- You need to perform a database migration. Depending on your database size the
|
||||||
|
@ -32,6 +32,18 @@ const configString = `
|
|||||||
"jwts": {
|
"jwts": {
|
||||||
"max-age": "2000h"
|
"max-age": "2000h"
|
||||||
},
|
},
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
|
"enable-resampling": {
|
||||||
|
"trigger": 30,
|
||||||
|
"resolutions": [
|
||||||
|
600,
|
||||||
|
300,
|
||||||
|
120,
|
||||||
|
60
|
||||||
|
]
|
||||||
|
},
|
||||||
"clusters": [
|
"clusters": [
|
||||||
{
|
{
|
||||||
"name": "name",
|
"name": "name",
|
||||||
|
@ -17,6 +17,9 @@
|
|||||||
60
|
60
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
"emission-constant": 317,
|
"emission-constant": 317,
|
||||||
"clusters": [
|
"clusters": [
|
||||||
{
|
{
|
||||||
|
@ -1,50 +1,62 @@
|
|||||||
{
|
{
|
||||||
"addr": "0.0.0.0:443",
|
"addr": "0.0.0.0:443",
|
||||||
"ldap": {
|
"ldap": {
|
||||||
"url": "ldaps://test",
|
"url": "ldaps://test",
|
||||||
"user_base": "ou=people,ou=hpc,dc=test,dc=de",
|
"user_base": "ou=people,ou=hpc,dc=test,dc=de",
|
||||||
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
|
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
|
||||||
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
|
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
|
||||||
"user_filter": "(&(objectclass=posixAccount))"
|
"user_filter": "(&(objectclass=posixAccount))"
|
||||||
},
|
},
|
||||||
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
|
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
|
||||||
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
|
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
|
||||||
"user": "clustercockpit",
|
"user": "clustercockpit",
|
||||||
"group": "clustercockpit",
|
"group": "clustercockpit",
|
||||||
"archive": {
|
"archive": {
|
||||||
"kind": "file",
|
"kind": "file",
|
||||||
"path": "./var/job-archive"
|
"path": "./var/job-archive"
|
||||||
},
|
},
|
||||||
"validate": true,
|
"validate": false,
|
||||||
"clusters": [
|
"apiAllowedIPs": [
|
||||||
{
|
"*"
|
||||||
"name": "test",
|
],
|
||||||
"metricDataRepository": {
|
"clusters": [
|
||||||
"kind": "cc-metric-store",
|
{
|
||||||
"url": "http://localhost:8082",
|
"name": "test",
|
||||||
"token": "eyJhbGciOiJF-E-pQBQ"
|
"metricDataRepository": {
|
||||||
},
|
"kind": "cc-metric-store",
|
||||||
"filterRanges": {
|
"url": "http://localhost:8082",
|
||||||
"numNodes": {
|
"token": "eyJhbGciOiJF-E-pQBQ"
|
||||||
"from": 1,
|
},
|
||||||
"to": 64
|
"filterRanges": {
|
||||||
},
|
"numNodes": {
|
||||||
"duration": {
|
"from": 1,
|
||||||
"from": 0,
|
"to": 64
|
||||||
"to": 86400
|
},
|
||||||
},
|
"duration": {
|
||||||
"startTime": {
|
"from": 0,
|
||||||
"from": "2022-01-01T00:00:00Z",
|
"to": 86400
|
||||||
"to": null
|
},
|
||||||
}
|
"startTime": {
|
||||||
}
|
"from": "2022-01-01T00:00:00Z",
|
||||||
|
"to": null
|
||||||
}
|
}
|
||||||
],
|
}
|
||||||
"jwts": {
|
}
|
||||||
"cookieName": "",
|
],
|
||||||
"validateUser": false,
|
"jwts": {
|
||||||
"max-age": "2000h",
|
"cookieName": "",
|
||||||
"trustedIssuer": ""
|
"validateUser": false,
|
||||||
},
|
"max-age": "2000h",
|
||||||
"short-running-jobs-duration": 300
|
"trustedIssuer": ""
|
||||||
|
},
|
||||||
|
"enable-resampling": {
|
||||||
|
"trigger": 30,
|
||||||
|
"resolutions": [
|
||||||
|
600,
|
||||||
|
300,
|
||||||
|
120,
|
||||||
|
60
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"short-running-jobs-duration": 300
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,9 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
"jwts": {
|
"jwts": {
|
||||||
"max-age": "2m"
|
"max-age": "2m"
|
||||||
},
|
},
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
"clusters": [
|
"clusters": [
|
||||||
{
|
{
|
||||||
"name": "testcluster",
|
"name": "testcluster",
|
||||||
|
@ -45,6 +45,9 @@ func setup(t *testing.T) *repository.JobRepository {
|
|||||||
"jwts": {
|
"jwts": {
|
||||||
"max-age": "2m"
|
"max-age": "2m"
|
||||||
},
|
},
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
"clusters": [
|
"clusters": [
|
||||||
{
|
{
|
||||||
"name": "testcluster",
|
"name": "testcluster",
|
||||||
|
@ -674,57 +674,32 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Debugf("Metric %s, Peak %f, Unit %s, Aggregation %s", metric, peak, unit, aggreg)
|
// log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
||||||
// Make bins, see https://jereze.com/code/sql-histogram/
|
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, (metric + "_" + footprintStat))
|
|
||||||
|
|
||||||
crossJoinQuery := sq.Select(
|
// Find Jobs' Value Bin Number: Divide Value by Peak, Multiply by RequestedBins, then CAST to INT: Gets Bin-Number of Job
|
||||||
fmt.Sprintf(`max(%s) as max`, jm),
|
binQuery := fmt.Sprintf(`CAST(
|
||||||
fmt.Sprintf(`min(%s) as min`, jm),
|
((case when json_extract(footprint, "$.%s") = %f then %f*0.999999999 else json_extract(footprint, "$.%s") end) / %f)
|
||||||
).From("job").Where(
|
* %v as INTEGER )`,
|
||||||
"JSON_VALID(footprint)",
|
(metric + "_" + footprintStat), peak, peak, (metric + "_" + footprintStat), peak, *bins)
|
||||||
).Where(
|
|
||||||
fmt.Sprintf(`%s is not null`, jm),
|
|
||||||
).Where(
|
|
||||||
fmt.Sprintf(`%s <= %f`, jm, peak),
|
|
||||||
)
|
|
||||||
|
|
||||||
crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery)
|
|
||||||
|
|
||||||
if cjqerr != nil {
|
|
||||||
return nil, cjqerr
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range filters {
|
|
||||||
crossJoinQuery = BuildWhereClause(f, crossJoinQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql()
|
|
||||||
if sqlerr != nil {
|
|
||||||
return nil, sqlerr
|
|
||||||
}
|
|
||||||
|
|
||||||
binQuery := fmt.Sprintf(`CAST( (case when %s = value.max
|
|
||||||
then value.max*0.999999999 else %s end - value.min) / (value.max -
|
|
||||||
value.min) * %v as INTEGER )`, jm, jm, *bins)
|
|
||||||
|
|
||||||
mainQuery := sq.Select(
|
mainQuery := sq.Select(
|
||||||
fmt.Sprintf(`%s + 1 as bin`, binQuery),
|
fmt.Sprintf(`%s + 1 as bin`, binQuery),
|
||||||
fmt.Sprintf(`count(%s) as count`, jm),
|
fmt.Sprintf(`count(*) as count`),
|
||||||
fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery),
|
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * %s as min`, peak, *bins, binQuery),
|
||||||
fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery),
|
// For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * (%s + 1) as max`, peak, *bins, binQuery),
|
||||||
).From("job").CrossJoin(
|
).From("job").Where(
|
||||||
fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs...,
|
"JSON_VALID(footprint)",
|
||||||
).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak))
|
).Where(fmt.Sprintf(`json_extract(footprint, "$.%s") is not null and json_extract(footprint, "$.%s") <= %f`, (metric + "_" + footprintStat), (metric + "_" + footprintStat), peak))
|
||||||
|
|
||||||
|
// Only accessible Jobs...
|
||||||
mainQuery, qerr := SecurityCheck(ctx, mainQuery)
|
mainQuery, qerr := SecurityCheck(ctx, mainQuery)
|
||||||
|
|
||||||
if qerr != nil {
|
if qerr != nil {
|
||||||
return nil, qerr
|
return nil, qerr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Filters...
|
||||||
for _, f := range filters {
|
for _, f := range filters {
|
||||||
mainQuery = BuildWhereClause(f, mainQuery)
|
mainQuery = BuildWhereClause(f, mainQuery)
|
||||||
}
|
}
|
||||||
@ -738,32 +713,34 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup Array
|
// Setup Return Array With Bin-Numbers for Match and Min/Max based on Peak
|
||||||
points := make([]*model.MetricHistoPoint, 0)
|
points := make([]*model.MetricHistoPoint, 0)
|
||||||
|
binStep := int(peak) / *bins
|
||||||
for i := 1; i <= *bins; i++ {
|
for i := 1; i <= *bins; i++ {
|
||||||
binMax := ((int(peak) / *bins) * i)
|
binMin := (binStep * (i - 1))
|
||||||
binMin := ((int(peak) / *bins) * (i - 1))
|
binMax := (binStep * i)
|
||||||
point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
|
epoint := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
|
||||||
points = append(points, &point)
|
points = append(points, &epoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
|
||||||
point := model.MetricHistoPoint{}
|
rpoint := model.MetricHistoPoint{}
|
||||||
if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil {
|
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
|
||||||
log.Warnf("Error while scanning rows for %s", jm)
|
log.Warnf("Error while scanning rows for %s", metric)
|
||||||
return nil, err // Totally bricks cc-backend if returned and if all metrics requested?
|
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, e := range points {
|
for _, e := range points {
|
||||||
if e.Bin != nil && point.Bin != nil {
|
if e.Bin != nil && rpoint.Bin != nil {
|
||||||
if *e.Bin == *point.Bin {
|
if *e.Bin == *rpoint.Bin {
|
||||||
e.Count = point.Count
|
e.Count = rpoint.Count
|
||||||
if point.Min != nil {
|
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
|
||||||
e.Min = point.Min
|
// if rpoint.Min != nil {
|
||||||
}
|
// log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
||||||
if point.Max != nil {
|
// }
|
||||||
e.Max = point.Max
|
// if rpoint.Max != nil {
|
||||||
}
|
// log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
||||||
|
// }
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,9 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
|||||||
"jwts": {
|
"jwts": {
|
||||||
"max-age": "2m"
|
"max-age": "2m"
|
||||||
},
|
},
|
||||||
|
"apiAllowedIPs": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
"clusters": [
|
"clusters": [
|
||||||
{
|
{
|
||||||
"name": "testcluster",
|
"name": "testcluster",
|
||||||
|
@ -492,6 +492,7 @@
|
|||||||
},
|
},
|
||||||
"required": [
|
"required": [
|
||||||
"jwts",
|
"jwts",
|
||||||
"clusters"
|
"clusters",
|
||||||
|
"apiAllowedIPs"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -14,17 +14,20 @@ func TestValidateConfig(t *testing.T) {
|
|||||||
"jwts": {
|
"jwts": {
|
||||||
"max-age": "2m"
|
"max-age": "2m"
|
||||||
},
|
},
|
||||||
"clusters": [
|
"apiAllowedIPs": [
|
||||||
{
|
"*"
|
||||||
"name": "testcluster",
|
],
|
||||||
"metricDataRepository": {
|
"clusters": [
|
||||||
"kind": "cc-metric-store",
|
{
|
||||||
"url": "localhost:8082"},
|
"name": "testcluster",
|
||||||
"filterRanges": {
|
"metricDataRepository": {
|
||||||
"numNodes": { "from": 1, "to": 64 },
|
"kind": "cc-metric-store",
|
||||||
"duration": { "from": 0, "to": 86400 },
|
"url": "localhost:8082"},
|
||||||
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
"filterRanges": {
|
||||||
}}]
|
"numNodes": { "from": 1, "to": 64 },
|
||||||
|
"duration": { "from": 0, "to": 86400 },
|
||||||
|
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
||||||
|
}}]
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
if err := Validate(Config, bytes.NewReader(json)); err != nil {
|
if err := Validate(Config, bytes.NewReader(json)); err != nil {
|
||||||
@ -33,7 +36,6 @@ func TestValidateConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateJobMeta(t *testing.T) {
|
func TestValidateJobMeta(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateCluster(t *testing.T) {
|
func TestValidateCluster(t *testing.T) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user