mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-07-22 20:41:40 +02:00
Port to cc-lib. Extend legal header.
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/qustavo/sqlhooks/v2"
|
||||
@@ -53,7 +53,7 @@ func Connect(driver string, db string) {
|
||||
// - Enable foreign key checks
|
||||
opts.URL += "?_journal=WAL&_timeout=5000&_fk=true"
|
||||
|
||||
if log.Loglevel() == "debug" {
|
||||
if cclog.Loglevel() == "debug" {
|
||||
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
|
||||
dbHandle, err = sqlx.Open("sqlite3WithHooks", opts.URL)
|
||||
} else {
|
||||
@@ -63,11 +63,11 @@ func Connect(driver string, db string) {
|
||||
opts.URL += "?multiStatements=true"
|
||||
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
||||
default:
|
||||
log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
||||
cclog.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
||||
cclog.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
||||
}
|
||||
|
||||
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
||||
@@ -78,14 +78,14 @@ func Connect(driver string, db string) {
|
||||
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||
err = checkDBVersion(driver, dbHandle.DB)
|
||||
if err != nil {
|
||||
log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
||||
cclog.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func GetConnection() *DBConnection {
|
||||
if dbConnInstance == nil {
|
||||
log.Fatalf("Database connection not initialized!")
|
||||
cclog.Fatalf("Database connection not initialized!")
|
||||
}
|
||||
|
||||
return dbConnInstance
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
)
|
||||
|
||||
// Hooks satisfies the sqlhook.Hooks interface
|
||||
@@ -16,13 +16,13 @@ type Hooks struct{}
|
||||
|
||||
// Before hook will print the query with it's args and return the context with the timestamp
|
||||
func (h *Hooks) Before(ctx context.Context, query string, args ...any) (context.Context, error) {
|
||||
log.Debugf("SQL query %s %q", query, args)
|
||||
cclog.Debugf("SQL query %s %q", query, args)
|
||||
return context.WithValue(ctx, "begin", time.Now()), nil
|
||||
}
|
||||
|
||||
// After hook will get the timestamp registered on the Before hook and print the elapsed time
|
||||
func (h *Hooks) After(ctx context.Context, query string, args ...any) (context.Context, error) {
|
||||
begin := ctx.Value("begin").(time.Time)
|
||||
log.Debugf("Took: %s\n", time.Since(begin))
|
||||
cclog.Debugf("Took: %s\n", time.Since(begin))
|
||||
return ctx, nil
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
@@ -76,18 +76,18 @@ func scanJob(row interface{ Scan(...any) error }) (*schema.Job, error) {
|
||||
&job.StartTime, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads,
|
||||
&job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
||||
&job.Duration, &job.Walltime, &job.RawResources, &job.RawFootprint, &job.Energy); err != nil {
|
||||
log.Warnf("Error while scanning rows (Job): %v", err)
|
||||
cclog.Warnf("Error while scanning rows (Job): %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
|
||||
log.Warn("Error while unmarshaling raw resources json")
|
||||
cclog.Warn("Error while unmarshaling raw resources json")
|
||||
return nil, err
|
||||
}
|
||||
job.RawResources = nil
|
||||
|
||||
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
||||
log.Warnf("Error while unmarshaling raw footprint json: %v", err)
|
||||
cclog.Warnf("Error while unmarshaling raw footprint json: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
job.RawFootprint = nil
|
||||
@@ -109,7 +109,7 @@ func (r *JobRepository) Optimize() error {
|
||||
return err
|
||||
}
|
||||
case "mysql":
|
||||
log.Info("Optimize currently not supported for mysql driver")
|
||||
cclog.Info("Optimize currently not supported for mysql driver")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -160,7 +160,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
||||
|
||||
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
||||
log.Warn("Error while scanning for job metadata")
|
||||
cclog.Warn("Error while scanning for job metadata")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -169,12 +169,12 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||
log.Warn("Error while unmarshaling raw metadata json")
|
||||
cclog.Warn("Error while unmarshaling raw metadata json")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||
log.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||
return job.MetaData, nil
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
||||
r.cache.Del(cachekey)
|
||||
if job.MetaData == nil {
|
||||
if _, err = r.FetchMetadata(job); err != nil {
|
||||
log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
|
||||
cclog.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -198,7 +198,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
||||
}
|
||||
|
||||
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
|
||||
log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
|
||||
cclog.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -206,7 +206,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
||||
Set("meta_data", job.RawMetaData).
|
||||
Where("job.id = ?", job.ID).
|
||||
RunWith(r.stmtCache).Exec(); err != nil {
|
||||
log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
|
||||
cclog.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
|
||||
|
||||
if err := sq.Select("job.footprint").From("job").Where("job.id = ?", job.ID).
|
||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawFootprint); err != nil {
|
||||
log.Warn("Error while scanning for job footprint")
|
||||
cclog.Warn("Error while scanning for job footprint")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -228,11 +228,11 @@ func (r *JobRepository) FetchFootprint(job *schema.Job) (map[string]float64, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(job.RawFootprint, &job.Footprint); err != nil {
|
||||
log.Warn("Error while unmarshaling raw footprint json")
|
||||
cclog.Warn("Error while unmarshaling raw footprint json")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("Timer FetchFootprint %s", time.Since(start))
|
||||
cclog.Debugf("Timer FetchFootprint %s", time.Since(start))
|
||||
return job.Footprint, nil
|
||||
}
|
||||
|
||||
@@ -246,7 +246,7 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
|
||||
|
||||
if err := sq.Select("job.energy_footprint").From("job").Where("job.id = ?", job.ID).
|
||||
RunWith(r.stmtCache).QueryRow().Scan(&job.RawEnergyFootprint); err != nil {
|
||||
log.Warn("Error while scanning for job energy_footprint")
|
||||
cclog.Warn("Error while scanning for job energy_footprint")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -255,12 +255,12 @@ func (r *JobRepository) FetchEnergyFootprint(job *schema.Job) (map[string]float6
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(job.RawEnergyFootprint, &job.EnergyFootprint); err != nil {
|
||||
log.Warn("Error while unmarshaling raw energy footprint json")
|
||||
cclog.Warn("Error while unmarshaling raw energy footprint json")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.cache.Put(cachekey, job.EnergyFootprint, len(job.EnergyFootprint), 24*time.Hour)
|
||||
log.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
|
||||
cclog.Debugf("Timer FetchEnergyFootprint %s", time.Since(start))
|
||||
return job.EnergyFootprint, nil
|
||||
}
|
||||
|
||||
@@ -273,9 +273,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
||||
|
||||
if err != nil {
|
||||
s, _, _ := qd.ToSql()
|
||||
log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
|
||||
cclog.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err)
|
||||
} else {
|
||||
log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
cclog.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
}
|
||||
return cnt, err
|
||||
}
|
||||
@@ -286,9 +286,9 @@ func (r *JobRepository) DeleteJobById(id int64) error {
|
||||
|
||||
if err != nil {
|
||||
s, _, _ := qd.ToSql()
|
||||
log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
|
||||
cclog.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err)
|
||||
} else {
|
||||
log.Debugf("DeleteJobById(%d): Success", id)
|
||||
cclog.Debugf("DeleteJobById(%d): Success", id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, ta
|
||||
}
|
||||
return "", ErrNotFound
|
||||
} else {
|
||||
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||
return "", ErrForbidden
|
||||
}
|
||||
}
|
||||
@@ -370,7 +370,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
|
||||
err := rows.Scan(&result)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warnf("Error while scanning rows: %v", err)
|
||||
cclog.Warnf("Error while scanning rows: %v", err)
|
||||
return emptyResult, err
|
||||
}
|
||||
results = append(results, result)
|
||||
@@ -380,7 +380,7 @@ func (r *JobRepository) FindColumnValues(user *schema.User, query string, table
|
||||
return emptyResult, ErrNotFound
|
||||
|
||||
} else {
|
||||
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||
cclog.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||
return emptyResult, ErrForbidden
|
||||
}
|
||||
}
|
||||
@@ -399,7 +399,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Timer Partitions %s", time.Since(start))
|
||||
cclog.Debugf("Timer Partitions %s", time.Since(start))
|
||||
return partitions.([]string), nil
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
||||
Where("job.cluster = ?", cluster).
|
||||
RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -424,11 +424,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
||||
var resources []*schema.Resource
|
||||
var subcluster string
|
||||
if err := rows.Scan(&raw, &subcluster); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(raw, &resources); err != nil {
|
||||
log.Warn("Error while unmarshaling raw resources json")
|
||||
cclog.Warn("Error while unmarshaling raw resources json")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer AllocatedNodes %s", time.Since(start))
|
||||
cclog.Debugf("Timer AllocatedNodes %s", time.Since(start))
|
||||
return subclusters, nil
|
||||
}
|
||||
|
||||
@@ -459,20 +459,20 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
||||
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
|
||||
RunWith(r.DB).Exec()
|
||||
if err != nil {
|
||||
log.Warn("Error while stopping jobs exceeding walltime")
|
||||
cclog.Warn("Error while stopping jobs exceeding walltime")
|
||||
return err
|
||||
}
|
||||
|
||||
rowsAffected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
|
||||
cclog.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
|
||||
return err
|
||||
}
|
||||
|
||||
if rowsAffected > 0 {
|
||||
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||
cclog.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||
}
|
||||
log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||
cclog.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -482,7 +482,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
|
||||
Where(sq.Eq{"jobtag.tag_id": tagId}).Distinct()
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
jobIds := make([]int64, 0, 100)
|
||||
@@ -492,7 +492,7 @@ func (r *JobRepository) FindJobIdsByTag(tagId int64) ([]int64, error) {
|
||||
|
||||
if err := rows.Scan(&jobId); err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -511,7 +511,7 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -520,13 +520,13 @@ func (r *JobRepository) FindRunningJobs(cluster string) ([]*schema.Job, error) {
|
||||
job, err := scanJob(rows)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
log.Infof("Return job count %d", len(jobs))
|
||||
cclog.Infof("Return job count %d", len(jobs))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
@@ -551,18 +551,18 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
||||
}
|
||||
|
||||
if startTimeBegin == 0 {
|
||||
log.Infof("Find jobs before %d", startTimeEnd)
|
||||
cclog.Infof("Find jobs before %d", startTimeEnd)
|
||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||
"job.start_time < %d", startTimeEnd))
|
||||
} else {
|
||||
log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
|
||||
cclog.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
|
||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
|
||||
}
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -571,13 +571,13 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
||||
job, err := scanJob(rows)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
log.Infof("Return job count %d", len(jobs))
|
||||
cclog.Infof("Return job count %d", len(jobs))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
@@ -612,7 +612,7 @@ func (r *JobRepository) UpdateEnergy(
|
||||
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
||||
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
||||
if err != nil {
|
||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
||||
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||
return stmt, err
|
||||
}
|
||||
energyFootprint := make(map[string]float64)
|
||||
@@ -625,7 +625,7 @@ func (r *JobRepository) UpdateEnergy(
|
||||
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
|
||||
// Note: For DB data, calculate and save as kWh
|
||||
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
|
||||
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
||||
cclog.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
|
||||
// FIXME: Needs sum as stats type
|
||||
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
|
||||
// Energy: Power (in Watts) * Time (in Seconds)
|
||||
@@ -637,18 +637,18 @@ func (r *JobRepository) UpdateEnergy(
|
||||
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
|
||||
}
|
||||
} else {
|
||||
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
||||
cclog.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
|
||||
}
|
||||
|
||||
energyFootprint[fp] = metricEnergy
|
||||
totalEnergy += metricEnergy
|
||||
|
||||
// log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
|
||||
// cclog.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
|
||||
}
|
||||
|
||||
var rawFootprint []byte
|
||||
if rawFootprint, err = json.Marshal(energyFootprint); err != nil {
|
||||
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||
cclog.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
@@ -662,7 +662,7 @@ func (r *JobRepository) UpdateFootprint(
|
||||
/* Note: Only Called for Running Jobs during Intermediate Update or on Archiving */
|
||||
sc, err := archive.GetSubCluster(jobMeta.Cluster, jobMeta.SubCluster)
|
||||
if err != nil {
|
||||
log.Errorf("cannot get subcluster: %s", err.Error())
|
||||
cclog.Errorf("cannot get subcluster: %s", err.Error())
|
||||
return stmt, err
|
||||
}
|
||||
footprint := make(map[string]float64)
|
||||
@@ -676,7 +676,7 @@ func (r *JobRepository) UpdateFootprint(
|
||||
}
|
||||
|
||||
if statType != "avg" && statType != "min" && statType != "max" {
|
||||
log.Warnf("unknown statType for footprint update: %s", statType)
|
||||
cclog.Warnf("unknown statType for footprint update: %s", statType)
|
||||
return stmt, fmt.Errorf("unknown statType for footprint update: %s", statType)
|
||||
}
|
||||
|
||||
@@ -690,7 +690,7 @@ func (r *JobRepository) UpdateFootprint(
|
||||
|
||||
var rawFootprint []byte
|
||||
if rawFootprint, err = json.Marshal(footprint); err != nil {
|
||||
log.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||
cclog.Warnf("Error while marshaling footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
)
|
||||
|
||||
@@ -34,12 +34,12 @@ func (r *JobRepository) InsertJob(job *schema.Job) (int64, error) {
|
||||
res, err := r.DB.NamedExec(NamedJobCacheInsert, job)
|
||||
r.Mutex.Unlock()
|
||||
if err != nil {
|
||||
log.Warn("Error while NamedJobInsert")
|
||||
cclog.Warn("Error while NamedJobInsert")
|
||||
return 0, err
|
||||
}
|
||||
id, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
log.Warn("Error while getting last insert ID")
|
||||
cclog.Warn("Error while getting last insert ID")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running query %v", err)
|
||||
cclog.Errorf("Error while running query %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
||||
job, err := scanJob(rows)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
@@ -72,13 +72,13 @@ func (r *JobRepository) SyncJobs() ([]*schema.Job, error) {
|
||||
_, err = r.DB.Exec(
|
||||
"INSERT INTO job (job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data) SELECT job_id, cluster, subcluster, start_time, hpc_user, project, cluster_partition, array_job_id, num_nodes, num_hwthreads, num_acc, exclusive, monitoring_status, smt, job_state, duration, walltime, footprint, energy, energy_footprint, resources, meta_data FROM job_cache")
|
||||
if err != nil {
|
||||
log.Warnf("Error while Job sync: %v", err)
|
||||
cclog.Warnf("Error while Job sync: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = r.DB.Exec("DELETE FROM job_cache")
|
||||
if err != nil {
|
||||
log.Warnf("Error while Job cache clean: %v", err)
|
||||
cclog.Warnf("Error while Job cache clean: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
)
|
||||
|
||||
@@ -39,7 +39,7 @@ func (r *JobRepository) Find(
|
||||
|
||||
q = q.OrderBy("job.id DESC") // always use newest matching job by db id if more than one match
|
||||
|
||||
log.Debugf("Timer Find %s", time.Since(start))
|
||||
cclog.Debugf("Timer Find %s", time.Since(start))
|
||||
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func (r *JobRepository) FindAll(
|
||||
|
||||
rows, err := q.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -94,12 +94,12 @@ func (r *JobRepository) FindAll(
|
||||
for rows.Next() {
|
||||
job, err := scanJob(rows)
|
||||
if err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
log.Debugf("Timer FindAll %s", time.Since(start))
|
||||
cclog.Debugf("Timer FindAll %s", time.Since(start))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -122,13 +122,13 @@ func (r *JobRepository) GetJobList() ([]int64, error) {
|
||||
err := rows.Scan(&id)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
jl = append(jl, id)
|
||||
}
|
||||
|
||||
log.Infof("Return job count %d", len(jl))
|
||||
cclog.Infof("Return job count %d", len(jl))
|
||||
return jl, nil
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running query: %v", err)
|
||||
cclog.Errorf("Error while running query: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -264,7 +264,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
||||
var id, jobId, startTime sql.NullInt64
|
||||
|
||||
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
||||
|
||||
rows, err = queryRunning.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running query: %v", err)
|
||||
cclog.Errorf("Error while running query: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -288,7 +288,7 @@ func (r *JobRepository) FindConcurrentJobs(
|
||||
var id, jobId, startTime sql.NullInt64
|
||||
|
||||
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -7,7 +7,7 @@ package repository
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
)
|
||||
|
||||
type JobHook interface {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
)
|
||||
|
||||
@@ -68,7 +68,7 @@ func (r *JobRepository) QueryJobs(
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
queryString, queryVars, _ := query.ToSql()
|
||||
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (r *JobRepository) QueryJobs(
|
||||
job, err := scanJob(rows)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows (Jobs)")
|
||||
cclog.Warn("Error while scanning rows (Jobs)")
|
||||
return nil, err
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
@@ -123,7 +123,7 @@ func SecurityCheckWithUser(user *schema.User, query sq.SelectBuilder) (sq.Select
|
||||
if len(user.Projects) != 0 {
|
||||
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.hpc_user": user.Username}}), nil
|
||||
} else {
|
||||
log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||
cclog.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||
return query.Where("job.hpc_user = ?", user.Username), nil
|
||||
}
|
||||
case user.HasRole(schema.RoleUser): // User : Only personal jobs
|
||||
@@ -244,7 +244,7 @@ func buildTimeCondition(field string, cond *schema.TimeRange, query sq.SelectBui
|
||||
case "last30d":
|
||||
then = now - (60 * 60 * 24 * 30)
|
||||
default:
|
||||
log.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
|
||||
cclog.Debugf("No known named timeRange: startTime.range = %s", cond.Range)
|
||||
return query
|
||||
}
|
||||
return query.Where(field+" BETWEEN ? AND ?", then, now)
|
||||
@@ -335,7 +335,7 @@ var (
|
||||
func toSnakeCase(str string) string {
|
||||
for _, c := range str {
|
||||
if c == '\'' || c == '\\' {
|
||||
log.Panic("toSnakeCase() attack vector!")
|
||||
cclog.Panic("toSnakeCase() attack vector!")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/mysql"
|
||||
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||
@@ -54,13 +54,13 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
}
|
||||
|
||||
v, dirty, err := m.Version()
|
||||
if err != nil {
|
||||
if err == migrate.ErrNilVersion {
|
||||
log.Warn("Legacy database without version or missing database file!")
|
||||
cclog.Warn("Legacy database without version or missing database file!")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
||||
case "sqlite3":
|
||||
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
cclog.Fatal(err)
|
||||
}
|
||||
|
||||
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
||||
@@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
||||
return m, err
|
||||
}
|
||||
default:
|
||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
cclog.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
@@ -117,14 +117,14 @@ func MigrateDB(backend string, db string) error {
|
||||
v, dirty, err := m.Version()
|
||||
if err != nil {
|
||||
if err == migrate.ErrNilVersion {
|
||||
log.Warn("Legacy database without version or missing database file!")
|
||||
cclog.Warn("Legacy database without version or missing database file!")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v < Version {
|
||||
log.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
|
||||
cclog.Infof("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
@@ -133,7 +133,7 @@ func MigrateDB(backend string, db string) error {
|
||||
|
||||
if err := m.Up(); err != nil {
|
||||
if err == migrate.ErrNoChange {
|
||||
log.Info("DB already up to date!")
|
||||
cclog.Info("DB already up to date!")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func RevertDB(backend string, db string) error {
|
||||
|
||||
if err := m.Migrate(Version - 1); err != nil {
|
||||
if err == migrate.ErrNoChange {
|
||||
log.Info("DB already up to date!")
|
||||
cclog.Info("DB already up to date!")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -15,9 +15,9 @@ import (
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
@@ -59,7 +59,7 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
|
||||
|
||||
if err := sq.Select("node.meta_data").From("node").Where("node.id = ?", node.ID).
|
||||
RunWith(r.stmtCache).QueryRow().Scan(&node.RawMetaData); err != nil {
|
||||
log.Warn("Error while scanning for node metadata")
|
||||
cclog.Warn("Error while scanning for node metadata")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -68,12 +68,12 @@ func (r *NodeRepository) FetchMetadata(node *schema.Node) (map[string]string, er
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(node.RawMetaData, &node.MetaData); err != nil {
|
||||
log.Warn("Error while unmarshaling raw metadata json")
|
||||
cclog.Warn("Error while unmarshaling raw metadata json")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.cache.Put(cachekey, node.MetaData, len(node.RawMetaData), 24*time.Hour)
|
||||
log.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||
cclog.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||
return node.MetaData, nil
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
||||
r.cache.Del(cachekey)
|
||||
if node.MetaData == nil {
|
||||
if _, err = r.FetchMetadata(node); err != nil {
|
||||
log.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
|
||||
cclog.Warnf("Error while fetching metadata for node, DB ID '%v'", node.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
||||
}
|
||||
|
||||
if node.RawMetaData, err = json.Marshal(node.MetaData); err != nil {
|
||||
log.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
|
||||
cclog.Warnf("Error while marshaling metadata for node, DB ID '%v'", node.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func (r *NodeRepository) UpdateMetadata(node *schema.Node, key, val string) (err
|
||||
Set("meta_data", node.RawMetaData).
|
||||
Where("node.id = ?", node.ID).
|
||||
RunWith(r.stmtCache).Exec(); err != nil {
|
||||
log.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
|
||||
cclog.Warnf("Error while updating metadata for node, DB ID '%v'", node.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
|
||||
Where("node.id = ?", id).RunWith(r.DB).
|
||||
QueryRow().Scan(&node.ID, &node.Hostname, &node.Cluster, &node.SubCluster, &node.NodeState,
|
||||
&node.HealthState); err != nil {
|
||||
log.Warnf("Error while querying node '%v' from database", id)
|
||||
cclog.Warnf("Error while querying node '%v' from database", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func (r *NodeRepository) GetNode(id int64, withMeta bool) (*schema.Node, error)
|
||||
var err error
|
||||
var meta map[string]string
|
||||
if meta, err = r.FetchMetadata(node); err != nil {
|
||||
log.Warnf("Error while fetching metadata for node '%v'", id)
|
||||
cclog.Warnf("Error while fetching metadata for node '%v'", id)
|
||||
return nil, err
|
||||
}
|
||||
node.MetaData = meta
|
||||
@@ -146,12 +146,12 @@ func (r *NodeRepository) AddNode(node *schema.Node) (int64, error) {
|
||||
|
||||
res, err := r.DB.NamedExec(NamedNodeInsert, node)
|
||||
if err != nil {
|
||||
log.Errorf("Error while adding node '%v' to database", node.Hostname)
|
||||
cclog.Errorf("Error while adding node '%v' to database", node.Hostname)
|
||||
return 0, err
|
||||
}
|
||||
node.ID, err = res.LastInsertId()
|
||||
if err != nil {
|
||||
log.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
|
||||
cclog.Errorf("Error while getting last insert id for node '%v' from database", node.Hostname)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
||||
if err == sql.ErrNoRows {
|
||||
subcluster, err := archive.GetSubClusterByNode(cluster, hostname)
|
||||
if err != nil {
|
||||
log.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
|
||||
cclog.Errorf("Error while getting subcluster for node '%s' in cluster '%s': %v", hostname, cluster, err)
|
||||
return err
|
||||
}
|
||||
node := schema.Node{
|
||||
@@ -175,29 +175,29 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
||||
}
|
||||
_, err = r.AddNode(&node)
|
||||
if err != nil {
|
||||
log.Errorf("Error while adding node '%s' to database: %v", hostname, err)
|
||||
cclog.Errorf("Error while adding node '%s' to database: %v", hostname, err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Added node '%s' to database", hostname)
|
||||
cclog.Infof("Added node '%s' to database", hostname)
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("Error while querying node '%v' from database", id)
|
||||
cclog.Warnf("Error while querying node '%v' from database", id)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := sq.Update("node").Set("node_state", nodeState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("error while updating node '%s'", hostname)
|
||||
cclog.Errorf("error while updating node '%s'", hostname)
|
||||
return err
|
||||
}
|
||||
log.Infof("Updated node '%s' in database", hostname)
|
||||
cclog.Infof("Updated node '%s' in database", hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
// func (r *NodeRepository) UpdateHealthState(hostname string, healthState *schema.MonitoringState) error {
|
||||
// if _, err := sq.Update("node").Set("health_state", healthState).Where("node.id = ?", id).RunWith(r.DB).Exec(); err != nil {
|
||||
// log.Errorf("error while updating node '%d'", id)
|
||||
// cclog.Errorf("error while updating node '%d'", id)
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
@@ -207,10 +207,10 @@ func (r *NodeRepository) UpdateNodeState(hostname string, cluster string, nodeSt
|
||||
func (r *NodeRepository) DeleteNode(id int64) error {
|
||||
_, err := r.DB.Exec(`DELETE FROM node WHERE node.id = ?`, id)
|
||||
if err != nil {
|
||||
log.Errorf("Error while deleting node '%d' from DB", id)
|
||||
cclog.Errorf("Error while deleting node '%d' from DB", id)
|
||||
return err
|
||||
}
|
||||
log.Infof("deleted node '%d' from DB", id)
|
||||
cclog.Infof("deleted node '%d' from DB", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ func (r *NodeRepository) QueryNodes(
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
queryString, queryVars, _ := query.ToSql()
|
||||
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||
cclog.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func (r *NodeRepository) QueryNodes(
|
||||
if err := rows.Scan(&node.Hostname, &node.Cluster, &node.SubCluster,
|
||||
&node.NodeState, &node.HealthState); err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows (Nodes)")
|
||||
cclog.Warn("Error while scanning rows (Nodes)")
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, &node)
|
||||
@@ -269,7 +269,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
|
||||
|
||||
rows, err := q.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying user list")
|
||||
cclog.Warn("Error while querying user list")
|
||||
return nil, err
|
||||
}
|
||||
nodeList := make([]*schema.Node, 0, 100)
|
||||
@@ -278,7 +278,7 @@ func (r *NodeRepository) ListNodes(cluster string) ([]*schema.Node, error) {
|
||||
node := &schema.Node{}
|
||||
if err := rows.Scan(&node.Hostname, &node.Cluster,
|
||||
&node.SubCluster, &node.NodeState, &node.HealthState); err != nil {
|
||||
log.Warn("Error while scanning node list")
|
||||
cclog.Warn("Error while scanning node list")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
@@ -65,7 +65,7 @@ func BenchmarkDB_FindJobById(b *testing.B) {
|
||||
func BenchmarkDB_FindJob(b *testing.B) {
|
||||
var jobId int64 = 107266
|
||||
var startTime int64 = 1657557241
|
||||
var cluster = "fritz"
|
||||
cluster := "fritz"
|
||||
|
||||
b.Run("FindJob", func(b *testing.B) {
|
||||
db := setup(b)
|
||||
@@ -147,7 +147,7 @@ func getContext(tb testing.TB) context.Context {
|
||||
|
||||
func setup(tb testing.TB) *JobRepository {
|
||||
tb.Helper()
|
||||
log.Init("warn", true)
|
||||
cclog.Init("warn", true)
|
||||
dbfile := "testdata/job.db"
|
||||
err := MigrateDB("sqlite3", dbfile)
|
||||
noErr(tb, err)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/metricDataDispatcher"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
)
|
||||
|
||||
@@ -158,7 +158,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
||||
|
||||
rows, err := query.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying DB for job statistics")
|
||||
cclog.Warn("Error while querying DB for job statistics")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
||||
var name sql.NullString
|
||||
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
||||
if err := rows.Scan(&id, &jobs, &name, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
|
||||
cclog.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -261,7 +261,7 @@ func (r *JobRepository) JobsStats(
|
||||
|
||||
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
|
||||
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ func (r *JobRepository) JobsStats(
|
||||
})
|
||||
}
|
||||
|
||||
log.Debugf("Timer JobStats %s", time.Since(start))
|
||||
cclog.Debugf("Timer JobStats %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -301,7 +301,7 @@ func LoadJobStat(job *schema.Job, metric string, statType string) float64 {
|
||||
case "min":
|
||||
return stats.Min
|
||||
default:
|
||||
log.Errorf("Unknown stat type %s", statType)
|
||||
cclog.Errorf("Unknown stat type %s", statType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ func (r *JobRepository) JobCountGrouped(
|
||||
}
|
||||
rows, err := query.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying DB for job statistics")
|
||||
cclog.Warn("Error while querying DB for job statistics")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -332,7 +332,7 @@ func (r *JobRepository) JobCountGrouped(
|
||||
var id sql.NullString
|
||||
var cnt sql.NullInt64
|
||||
if err := rows.Scan(&id, &cnt); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
if id.Valid {
|
||||
@@ -344,7 +344,7 @@ func (r *JobRepository) JobCountGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer JobCountGrouped %s", time.Since(start))
|
||||
cclog.Debugf("Timer JobCountGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -364,7 +364,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
||||
}
|
||||
rows, err := query.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying DB for job statistics")
|
||||
cclog.Warn("Error while querying DB for job statistics")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -374,7 +374,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
||||
var id sql.NullString
|
||||
var cnt sql.NullInt64
|
||||
if err := rows.Scan(&id, &cnt); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
if id.Valid {
|
||||
@@ -393,7 +393,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
|
||||
cclog.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -411,7 +411,7 @@ func (r *JobRepository) AddJobCount(
|
||||
}
|
||||
rows, err := query.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying DB for job statistics")
|
||||
cclog.Warn("Error while querying DB for job statistics")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -420,7 +420,7 @@ func (r *JobRepository) AddJobCount(
|
||||
for rows.Next() {
|
||||
var cnt sql.NullInt64
|
||||
if err := rows.Scan(&cnt); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -438,7 +438,7 @@ func (r *JobRepository) AddJobCount(
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer AddJobCount %s", time.Since(start))
|
||||
cclog.Debugf("Timer AddJobCount %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -479,29 +479,29 @@ func (r *JobRepository) AddHistograms(
|
||||
value := fmt.Sprintf(`CAST(ROUND(((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / %d) + 1) as %s) as value`, time.Now().Unix(), targetBinSize, castType)
|
||||
stat.HistDuration, err = r.jobsDurationStatisticsHistogram(ctx, value, filter, targetBinSize, &targetBinCount)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job statistics histogram: job duration")
|
||||
cclog.Warn("Error while loading job statistics histogram: job duration")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job statistics histogram: num nodes")
|
||||
cclog.Warn("Error while loading job statistics histogram: num nodes")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job statistics histogram: num hwthreads")
|
||||
cclog.Warn("Error while loading job statistics histogram: num hwthreads")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job statistics histogram: num acc")
|
||||
cclog.Warn("Error while loading job statistics histogram: num acc")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("Timer AddHistograms %s", time.Since(start))
|
||||
cclog.Debugf("Timer AddHistograms %s", time.Since(start))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
@@ -520,7 +520,7 @@ func (r *JobRepository) AddMetricHistograms(
|
||||
if f.State != nil {
|
||||
if len(f.State) == 1 && f.State[0] == "running" {
|
||||
stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter, targetBinCount)
|
||||
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||
return stat, nil
|
||||
}
|
||||
}
|
||||
@@ -530,13 +530,13 @@ func (r *JobRepository) AddMetricHistograms(
|
||||
for _, m := range metrics {
|
||||
metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter, targetBinCount)
|
||||
if err != nil {
|
||||
log.Warnf("Error while loading job metric statistics histogram: %s", m)
|
||||
cclog.Warnf("Error while loading job metric statistics histogram: %s", m)
|
||||
continue
|
||||
}
|
||||
stat.HistMetrics = append(stat.HistMetrics, metricHisto)
|
||||
}
|
||||
|
||||
log.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||
cclog.Debugf("Timer AddMetricHistograms %s", time.Since(start))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
@@ -560,7 +560,7 @@ func (r *JobRepository) jobsStatisticsHistogram(
|
||||
|
||||
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -569,13 +569,13 @@ func (r *JobRepository) jobsStatisticsHistogram(
|
||||
for rows.Next() {
|
||||
point := model.HistoPoint{}
|
||||
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
points = append(points, &point)
|
||||
}
|
||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
return points, nil
|
||||
}
|
||||
|
||||
@@ -607,7 +607,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
||||
|
||||
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
cclog.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -615,7 +615,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
||||
for rows.Next() {
|
||||
point := model.HistoPoint{}
|
||||
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -630,7 +630,7 @@ func (r *JobRepository) jobsDurationStatisticsHistogram(
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
return points, nil
|
||||
}
|
||||
|
||||
@@ -652,7 +652,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
peak = metricConfig.Peak
|
||||
unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base
|
||||
footprintStat = metricConfig.Footprint
|
||||
log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
|
||||
cclog.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -674,7 +674,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
}
|
||||
}
|
||||
|
||||
// log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
||||
// cclog.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
|
||||
// Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
|
||||
start := time.Now()
|
||||
|
||||
@@ -709,7 +709,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
|
||||
rows, err := mainQuery.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running mainQuery: %s", err)
|
||||
cclog.Errorf("Error while running mainQuery: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -726,7 +726,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
|
||||
rpoint := model.MetricHistoPoint{}
|
||||
if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
|
||||
log.Warnf("Error while scanning rows for %s", metric)
|
||||
cclog.Warnf("Error while scanning rows for %s", metric)
|
||||
return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
|
||||
}
|
||||
|
||||
@@ -736,10 +736,10 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
e.Count = rpoint.Count
|
||||
// Only Required For Debug: Check DB returned Min/Max against Backend Init above
|
||||
// if rpoint.Min != nil {
|
||||
// log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
||||
// cclog.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
|
||||
// }
|
||||
// if rpoint.Max != nil {
|
||||
// log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
||||
// cclog.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
|
||||
// }
|
||||
break
|
||||
}
|
||||
@@ -749,7 +749,7 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
|
||||
|
||||
result := model.MetricHistoPoints{Metric: metric, Unit: unit, Stat: &footprintStat, Data: points}
|
||||
|
||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
cclog.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
@@ -762,11 +762,11 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
|
||||
// Get Jobs
|
||||
jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Error while querying jobs for footprint: %s", err)
|
||||
cclog.Errorf("Error while querying jobs for footprint: %s", err)
|
||||
return nil
|
||||
}
|
||||
if len(jobs) > 500 {
|
||||
log.Errorf("too many jobs matched (max: %d)", 500)
|
||||
cclog.Errorf("too many jobs matched (max: %d)", 500)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -782,7 +782,7 @@ func (r *JobRepository) runningJobsMetricStatisticsHistogram(
|
||||
}
|
||||
|
||||
if err := metricDataDispatcher.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||
log.Errorf("Error while loading averages for histogram: %s", err)
|
||||
cclog.Errorf("Error while loading averages for histogram: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -19,7 +19,6 @@ func TestBuildJobStatsQuery(t *testing.T) {
|
||||
noErr(t, err)
|
||||
|
||||
fmt.Printf("SQL: %s\n", sql)
|
||||
|
||||
}
|
||||
|
||||
func TestJobStats(t *testing.T) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*schema.Tag, error) {
|
||||
j, err := r.FindByIdWithUser(user, job)
|
||||
if err != nil {
|
||||
log.Warn("Error while finding job by id")
|
||||
cclog.Warn("Error while finding job by id")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -26,19 +26,19 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
|
||||
|
||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error adding tag with %s: %v", s, err)
|
||||
cclog.Errorf("Error adding tag with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := r.GetTags(user, &job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archiveTags, err := r.getArchiveTags(&job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
|
||||
func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error) {
|
||||
j, err := r.FindByIdDirect(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while finding job by id")
|
||||
cclog.Warn("Error while finding job by id")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -56,19 +56,19 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
|
||||
|
||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error adding tag with %s: %v", s, err)
|
||||
cclog.Errorf("Error adding tag with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := r.GetTagsDirect(&job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archiveTags, err := r.getArchiveTags(&job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (r *JobRepository) AddTagDirect(job int64, tag int64) ([]*schema.Tag, error
|
||||
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
|
||||
j, err := r.FindByIdWithUser(user, job)
|
||||
if err != nil {
|
||||
log.Warn("Error while finding job by id")
|
||||
cclog.Warn("Error while finding job by id")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -88,19 +88,19 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
|
||||
|
||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error removing tag with %s: %v", s, err)
|
||||
cclog.Errorf("Error removing tag with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := r.GetTags(user, &job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archiveTags, err := r.getArchiveTags(&job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -113,14 +113,14 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
|
||||
// Get Tag ID to delete
|
||||
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
||||
if !exists {
|
||||
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
return nil, fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
}
|
||||
|
||||
// Get Job
|
||||
j, err := r.FindByIdWithUser(user, job)
|
||||
if err != nil {
|
||||
log.Warn("Error while finding job by id")
|
||||
cclog.Warn("Error while finding job by id")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -129,19 +129,19 @@ func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagT
|
||||
|
||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := r.GetTags(user, &job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archiveTags, err := r.getArchiveTags(&job)
|
||||
if err != nil {
|
||||
log.Warn("Error while getting tags for job")
|
||||
cclog.Warn("Error while getting tags for job")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -152,13 +152,13 @@ func (r *JobRepository) removeTagFromArchiveJobs(jobIds []int64) {
|
||||
for _, j := range jobIds {
|
||||
tags, err := r.getArchiveTags(&j)
|
||||
if err != nil {
|
||||
log.Warnf("Error while getting tags for job %d", j)
|
||||
cclog.Warnf("Error while getting tags for job %d", j)
|
||||
continue
|
||||
}
|
||||
|
||||
job, err := r.FindByIdDirect(j)
|
||||
if err != nil {
|
||||
log.Warnf("Error while getting job %d", j)
|
||||
cclog.Warnf("Error while getting job %d", j)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagSc
|
||||
// Get Tag ID to delete
|
||||
tagID, exists := r.TagId(tagType, tagName, tagScope)
|
||||
if !exists {
|
||||
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
cclog.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
return fmt.Errorf("tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
|
||||
|
||||
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := qJobTag.ToSql()
|
||||
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||
cclog.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ func (r *JobRepository) RemoveTagById(tagID int64) error {
|
||||
|
||||
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := qTag.ToSql()
|
||||
log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
|
||||
cclog.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope strin
|
||||
res, err := q.RunWith(r.stmtCache).Exec()
|
||||
if err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error inserting tag with %s: %v", s, err)
|
||||
cclog.Errorf("Error inserting tag with %s: %v", s, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -272,7 +272,7 @@ func (r *JobRepository) CountTags(user *schema.User) (tags []schema.Tag, counts
|
||||
|
||||
// Handle Job Ownership
|
||||
if user != nil && user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
|
||||
// log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
||||
// cclog.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
||||
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
|
||||
} else if user != nil && user.HasRole(schema.RoleManager) { // MANAGER: Count own jobs plus project's jobs
|
||||
// Build ("project1", "project2", ...) list of variable length directly in SQL string
|
||||
@@ -396,7 +396,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
|
||||
rows, err := q.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error get tags with %s: %v", s, err)
|
||||
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -404,7 +404,7 @@ func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, e
|
||||
for rows.Next() {
|
||||
tag := &schema.Tag{}
|
||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
// Handle Scope Filtering: Tag Scope is Global, Private (== Username) or User is auth'd to view Admin Tags
|
||||
@@ -429,7 +429,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
|
||||
rows, err := q.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error get tags with %s: %v", s, err)
|
||||
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -437,7 +437,7 @@ func (r *JobRepository) GetTagsDirect(job *int64) ([]*schema.Tag, error) {
|
||||
for rows.Next() {
|
||||
tag := &schema.Tag{}
|
||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
@@ -456,7 +456,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
|
||||
rows, err := q.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error get tags with %s: %v", s, err)
|
||||
cclog.Errorf("Error get tags with %s: %v", s, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -464,7 +464,7 @@ func (r *JobRepository) getArchiveTags(job *int64) ([]*schema.Tag, error) {
|
||||
for rows.Next() {
|
||||
tag := &schema.Tag{}
|
||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name, &tag.Scope); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
cclog.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
@@ -488,7 +488,7 @@ func (r *JobRepository) ImportTag(jobId int64, tagType string, tagName string, t
|
||||
|
||||
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
|
||||
s, _, _ := q.ToSql()
|
||||
log.Errorf("Error adding tag on import with %s: %v", s, err)
|
||||
cclog.Errorf("Error adding tag on import with %s: %v", s, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -1,11 +1,11 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
|
||||
import (
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ func (r *JobRepository) TransactionInit() (*Transaction, error) {
|
||||
|
||||
t.tx, err = r.DB.Beginx()
|
||||
if err != nil {
|
||||
log.Warn("Error while bundling transactions")
|
||||
cclog.Warn("Error while bundling transactions")
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
@@ -30,14 +30,14 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
|
||||
var err error
|
||||
if t.tx != nil {
|
||||
if err = t.tx.Commit(); err != nil {
|
||||
log.Warn("Error while committing transactions")
|
||||
cclog.Warn("Error while committing transactions")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.tx, err = r.DB.Beginx()
|
||||
if err != nil {
|
||||
log.Warn("Error while bundling transactions")
|
||||
cclog.Warn("Error while bundling transactions")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (r *JobRepository) TransactionCommit(t *Transaction) error {
|
||||
|
||||
func (r *JobRepository) TransactionEnd(t *Transaction) error {
|
||||
if err := t.tx.Commit(); err != nil {
|
||||
log.Warn("Error while committing SQL transactions")
|
||||
cclog.Warn("Error while committing SQL transactions")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -59,13 +59,13 @@ func (r *JobRepository) TransactionAddNamed(
|
||||
) (int64, error) {
|
||||
res, err := t.tx.NamedExec(query, args)
|
||||
if err != nil {
|
||||
log.Errorf("Named Exec failed: %v", err)
|
||||
cclog.Errorf("Named Exec failed: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
id, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
log.Errorf("repository initDB(): %v", err)
|
||||
cclog.Errorf("repository initDB(): %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -73,16 +73,15 @@ func (r *JobRepository) TransactionAddNamed(
|
||||
}
|
||||
|
||||
func (r *JobRepository) TransactionAdd(t *Transaction, query string, args ...interface{}) (int64, error) {
|
||||
|
||||
res, err := t.tx.Exec(query, args...)
|
||||
if err != nil {
|
||||
log.Errorf("TransactionAdd(), Exec() Error: %v", err)
|
||||
cclog.Errorf("TransactionAdd(), Exec() Error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
id, err := res.LastInsertId()
|
||||
if err != nil {
|
||||
log.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
|
||||
cclog.Errorf("TransactionAdd(), LastInsertId() Error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -13,13 +13,13 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,7 +50,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
|
||||
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("hpc_user").
|
||||
Where("hpc_user.username = ?", username).RunWith(r.DB).
|
||||
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
|
||||
log.Warnf("Error while querying user '%v' from database", username)
|
||||
cclog.Warnf("Error while querying user '%v' from database", username)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func (r *UserRepository) GetUser(username string) (*schema.User, error) {
|
||||
user.Email = email.String
|
||||
if rawRoles.Valid {
|
||||
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
|
||||
log.Warn("Error while unmarshaling raw roles from DB")
|
||||
cclog.Warn("Error while unmarshaling raw roles from DB")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -76,14 +76,14 @@ func (r *UserRepository) GetLdapUsernames() ([]string, error) {
|
||||
var users []string
|
||||
rows, err := r.DB.Query(`SELECT username FROM hpc_user WHERE hpc_user.ldap = 1`)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying usernames")
|
||||
cclog.Warn("Error while querying usernames")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var username string
|
||||
if err := rows.Scan(&username); err != nil {
|
||||
log.Warnf("Error while scanning for user '%s'", username)
|
||||
cclog.Warnf("Error while scanning for user '%s'", username)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
||||
if user.Password != "" {
|
||||
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
log.Error("Error while encrypting new user password")
|
||||
cclog.Error("Error while encrypting new user password")
|
||||
return err
|
||||
}
|
||||
cols = append(cols, "password")
|
||||
@@ -123,21 +123,21 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
||||
}
|
||||
|
||||
if _, err := sq.Insert("hpc_user").Columns(cols...).Values(vals...).RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("Error while inserting new user '%v' into DB", user.Username)
|
||||
cclog.Errorf("Error while inserting new user '%v' into DB", user.Username)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
|
||||
cclog.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
|
||||
|
||||
defaultMetricsCfg, err := config.LoadDefaultMetricsConfig()
|
||||
if err != nil {
|
||||
log.Errorf("Error loading default metrics config: %v", err)
|
||||
cclog.Errorf("Error loading default metrics config: %v", err)
|
||||
} else if defaultMetricsCfg != nil {
|
||||
for _, cluster := range defaultMetricsCfg.Clusters {
|
||||
metricsArray := config.ParseMetricsString(cluster.DefaultMetrics)
|
||||
metricsJSON, err := json.Marshal(metricsArray)
|
||||
if err != nil {
|
||||
log.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
|
||||
cclog.Errorf("Error marshaling default metrics for cluster %s: %v", cluster.Name, err)
|
||||
continue
|
||||
}
|
||||
confKey := "job_view_selectedMetrics:" + cluster.Name
|
||||
@@ -145,9 +145,9 @@ func (r *UserRepository) AddUser(user *schema.User) error {
|
||||
Columns("username", "confkey", "value").
|
||||
Values(user.Username, confKey, string(metricsJSON)).
|
||||
RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
|
||||
cclog.Errorf("Error inserting default job view metrics for user %s and cluster %s: %v", user.Username, cluster.Name, err)
|
||||
} else {
|
||||
log.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
|
||||
cclog.Infof("Default job view metrics for user %s and cluster %s set to %s", user.Username, cluster.Name, string(metricsJSON))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
|
||||
// TODO: Discuss updatable fields
|
||||
if dbUser.Name != user.Name {
|
||||
if _, err := sq.Update("hpc_user").Set("name", user.Name).Where("hpc_user.username = ?", dbUser.Username).RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("error while updating name of user '%s'", user.Username)
|
||||
cclog.Errorf("error while updating name of user '%s'", user.Username)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -179,10 +179,10 @@ func (r *UserRepository) UpdateUser(dbUser *schema.User, user *schema.User) erro
|
||||
func (r *UserRepository) DelUser(username string) error {
|
||||
_, err := r.DB.Exec(`DELETE FROM hpc_user WHERE hpc_user.username = ?`, username)
|
||||
if err != nil {
|
||||
log.Errorf("Error while deleting user '%s' from DB", username)
|
||||
cclog.Errorf("Error while deleting user '%s' from DB", username)
|
||||
return err
|
||||
}
|
||||
log.Infof("deleted user '%s' from DB", username)
|
||||
cclog.Infof("deleted user '%s' from DB", username)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
|
||||
|
||||
rows, err := q.RunWith(r.DB).Query()
|
||||
if err != nil {
|
||||
log.Warn("Error while querying user list")
|
||||
cclog.Warn("Error while querying user list")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -206,12 +206,12 @@ func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {
|
||||
user := &schema.User{}
|
||||
var name, email sql.NullString
|
||||
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
|
||||
log.Warn("Error while scanning user list")
|
||||
cclog.Warn("Error while scanning user list")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
|
||||
log.Warn("Error while unmarshaling raw role list")
|
||||
cclog.Warn("Error while unmarshaling raw role list")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func (r *UserRepository) AddRole(
|
||||
newRole := strings.ToLower(queryrole)
|
||||
user, err := r.GetUser(username)
|
||||
if err != nil {
|
||||
log.Warnf("Could not load user '%s'", username)
|
||||
cclog.Warnf("Could not load user '%s'", username)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -249,7 +249,7 @@ func (r *UserRepository) AddRole(
|
||||
|
||||
roles, _ := json.Marshal(append(user.Roles, newRole))
|
||||
if _, err := sq.Update("hpc_user").Set("roles", roles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("error while adding new role for user '%s'", user.Username)
|
||||
cclog.Errorf("error while adding new role for user '%s'", user.Username)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -259,7 +259,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
|
||||
oldRole := strings.ToLower(queryrole)
|
||||
user, err := r.GetUser(username)
|
||||
if err != nil {
|
||||
log.Warnf("Could not load user '%s'", username)
|
||||
cclog.Warnf("Could not load user '%s'", username)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -285,7 +285,7 @@ func (r *UserRepository) RemoveRole(ctx context.Context, username string, queryr
|
||||
|
||||
mroles, _ := json.Marshal(newroles)
|
||||
if _, err := sq.Update("hpc_user").Set("roles", mroles).Where("hpc_user.username = ?", username).RunWith(r.DB).Exec(); err != nil {
|
||||
log.Errorf("Error while removing role for user '%s'", user.Username)
|
||||
cclog.Errorf("Error while removing role for user '%s'", user.Username)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -364,10 +364,10 @@ const ContextUserKey ContextKey = "user"
|
||||
func GetUserFromContext(ctx context.Context) *schema.User {
|
||||
x := ctx.Value(ContextUserKey)
|
||||
if x == nil {
|
||||
log.Warnf("no user retrieved from context")
|
||||
cclog.Warnf("no user retrieved from context")
|
||||
return nil
|
||||
}
|
||||
// log.Infof("user retrieved from context: %v", x.(*schema.User))
|
||||
// cclog.Infof("user retrieved from context: %v", x.(*schema.User))
|
||||
return x.(*schema.User)
|
||||
}
|
||||
|
||||
@@ -385,11 +385,11 @@ func (r *UserRepository) FetchUserInCtx(ctx context.Context, username string) (*
|
||||
if err == sql.ErrNoRows {
|
||||
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
|
||||
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
|
||||
// log.Warnf("User '%s' Not found in DB", username)
|
||||
// cclog.Warnf("User '%s' Not found in DB", username)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Warnf("Error while fetching user '%s'", username)
|
||||
cclog.Warnf("Error while fetching user '%s'", username)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/lrucache"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
|
||||
|
||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||
if err != nil {
|
||||
log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
||||
cclog.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
userCfgRepoInstance = &UserCfgRepo{
|
||||
@@ -70,7 +70,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
||||
|
||||
rows, err := uCfg.Lookup.Query(user.Username)
|
||||
if err != nil {
|
||||
log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
|
||||
cclog.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
|
||||
return err, 0, 0
|
||||
}
|
||||
|
||||
@@ -79,13 +79,13 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
||||
for rows.Next() {
|
||||
var key, rawval string
|
||||
if err := rows.Scan(&key, &rawval); err != nil {
|
||||
log.Warn("Error while scanning user uiconfig values")
|
||||
cclog.Warn("Error while scanning user uiconfig values")
|
||||
return err, 0, 0
|
||||
}
|
||||
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
|
||||
log.Warn("Error while unmarshaling raw user uiconfig json")
|
||||
cclog.Warn("Error while unmarshaling raw user uiconfig json")
|
||||
return err, 0, 0
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *schema.User) (map[string]interface{},
|
||||
return uiconfig, 24 * time.Hour, size
|
||||
})
|
||||
if err, ok := data.(error); ok {
|
||||
log.Error("Error in returned dataset")
|
||||
cclog.Error("Error in returned dataset")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
||||
if user == nil {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(value), &val); err != nil {
|
||||
log.Warn("Error while unmarshaling raw user config json")
|
||||
cclog.Warn("Error while unmarshaling raw user config json")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
||||
}
|
||||
|
||||
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
|
||||
log.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
|
||||
cclog.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// All rights reserved. This file is part of cc-backend.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package repository
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
cclog "github.com/ClusterCockpit/cc-lib/ccLogger"
|
||||
"github.com/ClusterCockpit/cc-lib/schema"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
@@ -39,7 +39,7 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
|
||||
} } ]
|
||||
}`
|
||||
|
||||
log.Init("info", true)
|
||||
cclog.Init("info", true)
|
||||
dbfilepath := "testdata/job.db"
|
||||
err := MigrateDB("sqlite3", dbfilepath)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user