mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 12:59:06 +01:00
Merge pull request #320 from ClusterCockpit/hotfix
Fixes for Bugfix Release 1.4.2
This commit is contained in:
commit
9489ebc7d6
2
Makefile
2
Makefile
@ -2,7 +2,7 @@ TARGET = ./cc-backend
|
|||||||
VAR = ./var
|
VAR = ./var
|
||||||
CFG = config.json .env
|
CFG = config.json .env
|
||||||
FRONTEND = ./web/frontend
|
FRONTEND = ./web/frontend
|
||||||
VERSION = 1.4.1
|
VERSION = 1.4.2
|
||||||
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
||||||
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
|
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
|
||||||
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'
|
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# `cc-backend` version 1.4.1
|
# `cc-backend` version 1.4.2
|
||||||
|
|
||||||
Supports job archive version 2 and database version 8.
|
Supports job archive version 2 and database version 8.
|
||||||
|
|
||||||
@ -12,7 +12,8 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus
|
|||||||
migration might require several hours!
|
migration might require several hours!
|
||||||
- You need to adapt the `cluster.json` configuration files in the job-archive,
|
- You need to adapt the `cluster.json` configuration files in the job-archive,
|
||||||
add new required attributes to the metric list and after that edit
|
add new required attributes to the metric list and after that edit
|
||||||
`./job-archive/version.txt` to version 2.
|
`./job-archive/version.txt` to version 2. Only metrics that have the footprint
|
||||||
|
attribute set can be filtered and show up in the footprint UI and polar plot.
|
||||||
- Continuous scrolling is default now in all job lists. You can change this back
|
- Continuous scrolling is default now in all job lists. You can change this back
|
||||||
to paging globally, also every user can configure to use paging or continuous
|
to paging globally, also every user can configure to use paging or continuous
|
||||||
scrolling individually.
|
scrolling individually.
|
||||||
|
@ -112,7 +112,7 @@ func main() {
|
|||||||
|
|
||||||
if flagInit {
|
if flagInit {
|
||||||
initEnv()
|
initEnv()
|
||||||
fmt.Print("Succesfully setup environment!\n")
|
fmt.Print("Successfully setup environment!\n")
|
||||||
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
|
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
|
||||||
fmt.Print("Add your job-archive at ./var/job-archive.\n")
|
fmt.Print("Add your job-archive at ./var/job-archive.\n")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/generated"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
|
||||||
@ -314,9 +313,6 @@ func serverShutdown() {
|
|||||||
// First shut down the server gracefully (waiting for all ongoing requests)
|
// First shut down the server gracefully (waiting for all ongoing requests)
|
||||||
server.Shutdown(context.Background())
|
server.Shutdown(context.Background())
|
||||||
|
|
||||||
// Then, wait for any async jobStarts still pending...
|
|
||||||
repository.WaitForJobStart()
|
|
||||||
|
|
||||||
// Then, wait for any async archivings still pending...
|
// Then, wait for any async archivings still pending...
|
||||||
archiver.WaitForArchiving()
|
archiver.WaitForArchiving()
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=ClusterCockpit Web Server (Go edition)
|
Description=ClusterCockpit Web Server
|
||||||
Documentation=https://github.com/ClusterCockpit/cc-backend
|
Documentation=https://github.com/ClusterCockpit/cc-backend
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
|
@ -249,9 +249,6 @@ func TestRestApi(t *testing.T) {
|
|||||||
if response.StatusCode != http.StatusCreated {
|
if response.StatusCode != http.StatusCreated {
|
||||||
t.Fatal(response.Status, recorder.Body.String())
|
t.Fatal(response.Status, recorder.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
|
|
||||||
resolver := graph.GetResolverInstance()
|
resolver := graph.GetResolverInstance()
|
||||||
job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime)
|
job, err := restapi.JobRepository.Find(&TestJobId, &TestClusterName, &TestStartTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -123,18 +123,8 @@ func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartJobApiResponse model
|
// DefaultApiResponse model
|
||||||
type StartJobApiResponse struct {
|
type DefaultJobApiResponse struct {
|
||||||
Message string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteJobApiResponse model
|
|
||||||
type DeleteJobApiResponse struct {
|
|
||||||
Message string `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateUserApiResponse model
|
|
||||||
type UpdateUserApiResponse struct {
|
|
||||||
Message string `json:"msg"`
|
Message string `json:"msg"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +331,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
withMetadata := false
|
withMetadata := false
|
||||||
filter := &model.JobFilter{}
|
filter := &model.JobFilter{}
|
||||||
page := &model.PageRequest{ItemsPerPage: 25, Page: 1}
|
page := &model.PageRequest{ItemsPerPage: 25, Page: 1}
|
||||||
order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc}
|
order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc}
|
||||||
|
|
||||||
for key, vals := range r.URL.Query() {
|
for key, vals := range r.URL.Query() {
|
||||||
switch key {
|
switch key {
|
||||||
@ -790,6 +780,11 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// aquire lock to avoid race condition between API calls
|
||||||
|
var unlockOnce sync.Once
|
||||||
|
api.RepositoryMutex.Lock()
|
||||||
|
defer unlockOnce.Do(api.RepositoryMutex.Unlock)
|
||||||
|
|
||||||
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
||||||
jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
|
jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
@ -804,12 +799,27 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
repository.TriggerJobStart(repository.JobWithUser{Job: &req, User: repository.GetUserFromContext(r.Context())})
|
id, err := api.JobRepository.Start(&req)
|
||||||
|
if err != nil {
|
||||||
|
handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// unlock here, adding Tags can be async
|
||||||
|
unlockOnce.Do(api.RepositoryMutex.Unlock)
|
||||||
|
|
||||||
|
for _, tag := range req.Tags {
|
||||||
|
if _, err := api.JobRepository.AddTagOrCreate(repository.GetUserFromContext(r.Context()), id, tag.Type, tag.Name, tag.Scope); err != nil {
|
||||||
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
|
handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime)
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(http.StatusCreated)
|
rw.WriteHeader(http.StatusCreated)
|
||||||
json.NewEncoder(rw).Encode(StartJobApiResponse{
|
json.NewEncoder(rw).Encode(DefaultJobApiResponse{
|
||||||
Message: fmt.Sprintf("Successfully triggered job start"),
|
Message: "success",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,7 +902,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(http.StatusOK)
|
rw.WriteHeader(http.StatusOK)
|
||||||
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
json.NewEncoder(rw).Encode(DefaultJobApiResponse{
|
||||||
Message: fmt.Sprintf("Successfully deleted job %s", id),
|
Message: fmt.Sprintf("Successfully deleted job %s", id),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -943,7 +953,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
|
|||||||
|
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(http.StatusOK)
|
rw.WriteHeader(http.StatusOK)
|
||||||
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
json.NewEncoder(rw).Encode(DefaultJobApiResponse{
|
||||||
Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
|
Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -987,7 +997,7 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(http.StatusOK)
|
rw.WriteHeader(http.StatusOK)
|
||||||
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
json.NewEncoder(rw).Encode(DefaultJobApiResponse{
|
||||||
Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
|
Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -36,10 +36,7 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag,
|
|||||||
|
|
||||||
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
||||||
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
||||||
if obj.State == schema.JobStateRunning {
|
// FIXME: Make the hardcoded duration configurable
|
||||||
obj.Duration = int32(time.Now().Unix() - obj.StartTimeUnix)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Exclusive != 1 && obj.Duration > 600 {
|
if obj.Exclusive != 1 && obj.Duration > 600 {
|
||||||
return r.Repo.FindConcurrentJobs(ctx, obj)
|
return r.Repo.FindConcurrentJobs(ctx, obj)
|
||||||
}
|
}
|
||||||
|
@ -82,8 +82,6 @@ func Connect(driver string, db string) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
startJobStartWorker()
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,12 +79,9 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
}
|
}
|
||||||
job.RawFootprint = nil
|
job.RawFootprint = nil
|
||||||
|
|
||||||
// if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
|
||||||
// return nil, err
|
|
||||||
// }
|
|
||||||
|
|
||||||
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
||||||
if job.Duration == 0 && job.State == schema.JobStateRunning {
|
// Always ensure accurate duration for running jobs
|
||||||
|
if job.State == schema.JobStateRunning {
|
||||||
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,6 +454,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
return subclusters, nil
|
return subclusters, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: Set duration to requested walltime?
|
||||||
func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
res, err := sq.Update("job").
|
res, err := sq.Update("job").
|
||||||
|
@ -170,8 +170,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
|||||||
query = buildTimeCondition("job.start_time", filter.StartTime, query)
|
query = buildTimeCondition("job.start_time", filter.StartTime, query)
|
||||||
}
|
}
|
||||||
if filter.Duration != nil {
|
if filter.Duration != nil {
|
||||||
now := time.Now().Unix() // There does not seam to be a portable way to get the current unix timestamp accross different DBs.
|
query = buildIntCondition("job.duration", filter.Duration, query)
|
||||||
query = query.Where("(CASE WHEN job.job_state = 'running' THEN (? - job.start_time) ELSE job.duration END) BETWEEN ? AND ?", now, filter.Duration.From, filter.Duration.To)
|
|
||||||
}
|
}
|
||||||
if filter.MinRunningFor != nil {
|
if filter.MinRunningFor != nil {
|
||||||
now := time.Now().Unix() // There does not seam to be a portable way to get the current unix timestamp accross different DBs.
|
now := time.Now().Unix() // There does not seam to be a portable way to get the current unix timestamp accross different DBs.
|
||||||
|
@ -1,83 +0,0 @@
|
|||||||
// Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
|
|
||||||
// All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JobWithUser struct {
|
|
||||||
Job *schema.JobMeta
|
|
||||||
User *schema.User
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
jobStartPending sync.WaitGroup
|
|
||||||
jobStartChannel chan JobWithUser
|
|
||||||
)
|
|
||||||
|
|
||||||
func startJobStartWorker() {
|
|
||||||
jobStartChannel = make(chan JobWithUser, 128)
|
|
||||||
|
|
||||||
go jobStartWorker()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Archiving worker thread
|
|
||||||
func jobStartWorker() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case req, ok := <-jobStartChannel:
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
jobRepo := GetJobRepository()
|
|
||||||
var id int64
|
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
id, err = jobRepo.Start(req.Job)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Attempt %d: insert into database failed: %v", i, err)
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tag := range req.Job.Tags {
|
|
||||||
if _, err := jobRepo.AddTagOrCreate(req.User, id,
|
|
||||||
tag.Type, tag.Name, tag.Scope); err != nil {
|
|
||||||
log.Errorf("adding tag to new job %d failed: %v", id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d",
|
|
||||||
id, req.Job.Cluster, req.Job.JobID, req.Job.User, req.Job.StartTime)
|
|
||||||
|
|
||||||
jobStartPending.Done()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trigger async archiving
|
|
||||||
func TriggerJobStart(req JobWithUser) {
|
|
||||||
if jobStartChannel == nil {
|
|
||||||
log.Fatal("Cannot start Job without jobStart channel. Did you Start the worker?")
|
|
||||||
}
|
|
||||||
|
|
||||||
jobStartPending.Add(1)
|
|
||||||
jobStartChannel <- req
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for background thread to finish pending archiving operations
|
|
||||||
func WaitForJobStart() {
|
|
||||||
// close channel and wait for worker to process remaining jobs
|
|
||||||
jobStartPending.Wait()
|
|
||||||
}
|
|
@ -111,7 +111,7 @@ func BenchmarkDB_QueryJobs(b *testing.B) {
|
|||||||
user := "mppi133h"
|
user := "mppi133h"
|
||||||
filter.User = &model.StringInput{Eq: &user}
|
filter.User = &model.StringInput{Eq: &user}
|
||||||
page := &model.PageRequest{ItemsPerPage: 50, Page: 1}
|
page := &model.PageRequest{ItemsPerPage: 50, Page: 1}
|
||||||
order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc}
|
order := &model.OrderByInput{Field: "startTime", Type: "col", Order: model.SortDirectionEnumDesc}
|
||||||
|
|
||||||
b.Run("QueryJobs", func(b *testing.B) {
|
b.Run("QueryJobs", func(b *testing.B) {
|
||||||
db := setup(b)
|
db := setup(b)
|
||||||
|
@ -182,6 +182,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: Lots of redundant code. Needs refactoring
|
||||||
func buildFilterPresets(query url.Values) map[string]interface{} {
|
func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||||
filterPresets := map[string]interface{}{}
|
filterPresets := map[string]interface{}{}
|
||||||
|
|
||||||
|
@ -9,12 +9,11 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<script context="module">
|
<script context="module">
|
||||||
function findJobThresholds(job, metricConfig) {
|
function findJobThresholds(job, stat, metricConfig) {
|
||||||
if (!job || !metricConfig) {
|
if (!job || !metricConfig || !stat) {
|
||||||
console.warn("Argument missing for findJobThresholds!");
|
console.warn("Argument missing for findJobThresholds!");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// metricConfig is on subCluster-Level
|
// metricConfig is on subCluster-Level
|
||||||
const defaultThresholds = {
|
const defaultThresholds = {
|
||||||
peak: metricConfig.peak,
|
peak: metricConfig.peak,
|
||||||
@ -22,13 +21,13 @@
|
|||||||
caution: metricConfig.caution,
|
caution: metricConfig.caution,
|
||||||
alert: metricConfig.alert
|
alert: metricConfig.alert
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
NEW: Footprints should be comparable: Always use Unchanged Single Node Thresholds, except for shared jobs.
|
Footprints should be comparable:
|
||||||
HW Clocks, HW Temperatures and File/Net IO Thresholds will be scaled down too, even if they are independent.
|
Always use unchanged single node thresholds for exclusive jobs and "avg" Footprints.
|
||||||
'jf.stats' is one of: avg, min, max -> Always relative to one nodes' thresholds as configured.
|
For shared jobs, scale thresholds by the fraction of the job's HWThreads to the node's HWThreads.
|
||||||
|
'stat' is one of: avg, min, max
|
||||||
*/
|
*/
|
||||||
if (job.exclusive === 1) {
|
if (job.exclusive === 1 || stat === "avg") {
|
||||||
return defaultThresholds
|
return defaultThresholds
|
||||||
} else {
|
} else {
|
||||||
const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster)
|
const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster)
|
||||||
@ -40,29 +39,6 @@
|
|||||||
alert: round(defaultThresholds.alert * jobFraction, 0),
|
alert: round(defaultThresholds.alert * jobFraction, 0),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/* OLD: Based on Metric Aggregation Setting
|
|
||||||
// Job_Exclusivity does not matter, only aggregation
|
|
||||||
if (metricConfig.aggregation === "avg") {
|
|
||||||
return defaultThresholds;
|
|
||||||
} else if (metricConfig.aggregation === "sum") {
|
|
||||||
const topol = getContext("getHardwareTopology")(job.cluster, job.subCluster)
|
|
||||||
const jobFraction = job.numHWThreads / topol.node.length;
|
|
||||||
|
|
||||||
return {
|
|
||||||
peak: round(defaultThresholds.peak * jobFraction, 0),
|
|
||||||
normal: round(defaultThresholds.normal * jobFraction, 0),
|
|
||||||
caution: round(defaultThresholds.caution * jobFraction, 0),
|
|
||||||
alert: round(defaultThresholds.alert * jobFraction, 0),
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
console.warn(
|
|
||||||
"Missing or unkown aggregation mode (sum/avg) for metric:",
|
|
||||||
metricConfig,
|
|
||||||
);
|
|
||||||
return defaultThresholds;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
@ -93,7 +69,7 @@
|
|||||||
const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "")
|
const unit = (fmc?.unit?.prefix ? fmc.unit.prefix : "") + (fmc?.unit?.base ? fmc.unit.base : "")
|
||||||
|
|
||||||
// Threshold / -Differences
|
// Threshold / -Differences
|
||||||
const fmt = findJobThresholds(job, fmc);
|
const fmt = findJobThresholds(job, jf.stat, fmc);
|
||||||
if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0);
|
if (jf.name === "flops_any") fmt.peak = round(fmt.peak * 0.85, 0);
|
||||||
|
|
||||||
// Define basic data -> Value: Use as Provided
|
// Define basic data -> Value: Use as Provided
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
import { Badge, Button, Icon } from "@sveltestrap/sveltestrap";
|
import { Badge, Button, Icon, Tooltip } from "@sveltestrap/sveltestrap";
|
||||||
import { scrambleNames, scramble } from "../utils.js";
|
import { scrambleNames, scramble } from "../utils.js";
|
||||||
import Tag from "../helper/Tag.svelte";
|
import Tag from "../helper/Tag.svelte";
|
||||||
import TagManagement from "../helper/TagManagement.svelte";
|
import TagManagement from "../helper/TagManagement.svelte";
|
||||||
@ -42,12 +42,30 @@
|
|||||||
let displayCheck = false;
|
let displayCheck = false;
|
||||||
function clipJobId(jid) {
|
function clipJobId(jid) {
|
||||||
displayCheck = true;
|
displayCheck = true;
|
||||||
|
// Navigator clipboard api needs a secure context (https)
|
||||||
|
if (navigator.clipboard && window.isSecureContext) {
|
||||||
navigator.clipboard
|
navigator.clipboard
|
||||||
.writeText(jid)
|
.writeText(jid)
|
||||||
.catch((reason) => console.error(reason));
|
.catch((reason) => console.error(reason));
|
||||||
|
} else {
|
||||||
|
// Workaround: Create, Fill, And Copy Content of Textarea
|
||||||
|
const textArea = document.createElement("textarea");
|
||||||
|
textArea.value = jid;
|
||||||
|
textArea.style.position = "absolute";
|
||||||
|
textArea.style.left = "-999999px";
|
||||||
|
document.body.prepend(textArea);
|
||||||
|
textArea.select();
|
||||||
|
try {
|
||||||
|
document.execCommand('copy');
|
||||||
|
} catch (error) {
|
||||||
|
console.error(error);
|
||||||
|
} finally {
|
||||||
|
textArea.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
setTimeout(function () {
|
setTimeout(function () {
|
||||||
displayCheck = false;
|
displayCheck = false;
|
||||||
}, 1500);
|
}, 1000);
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
@ -58,13 +76,18 @@
|
|||||||
<a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a>
|
<a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a>
|
||||||
({job.cluster})
|
({job.cluster})
|
||||||
</span>
|
</span>
|
||||||
<Button outline color="secondary" size="sm" title="Copy JobID to Clipboard" on:click={clipJobId(job.jobId)} >
|
<Button id={`${job.cluster}-${job.jobId}-clipboard`} outline color="secondary" size="sm" on:click={clipJobId(job.jobId)} >
|
||||||
{#if displayCheck}
|
{#if displayCheck}
|
||||||
<Icon name="clipboard2-check-fill"/> Copied
|
<Icon name="clipboard2-check-fill"/>
|
||||||
{:else}
|
{:else}
|
||||||
<Icon name="clipboard2"/> Job ID
|
<Icon name="clipboard2"/>
|
||||||
{/if}
|
{/if}
|
||||||
</Button>
|
</Button>
|
||||||
|
<Tooltip
|
||||||
|
target={`${job.cluster}-${job.jobId}-clipboard`}
|
||||||
|
placement="right">
|
||||||
|
{ displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' }
|
||||||
|
</Tooltip>
|
||||||
</span>
|
</span>
|
||||||
{#if job.metaData?.jobName}
|
{#if job.metaData?.jobName}
|
||||||
{#if job.metaData?.jobName.length <= 25}
|
{#if job.metaData?.jobName.length <= 25}
|
||||||
|
Loading…
Reference in New Issue
Block a user