mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2025-04-09 05:35:55 +02:00
Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
96977c6183 | ||
|
73d83164fc | ||
|
1064f5e4a8 | ||
|
5be98c7087 | ||
|
0d689c7dff | ||
|
1f24ed46a0 | ||
|
92b4159f9e | ||
|
5817b41e29 | ||
d6b132e3a6 | |||
|
318f70f34c | ||
|
e41525d40a | ||
|
a102220e52 | ||
|
e9a214c5b2 | ||
|
c53f5eb144 | ||
|
9ed64e0388 | ||
|
93040d4629 | ||
|
0144ad43f5 | ||
|
8da2fc30c3 | ||
0e27ae7795 | |||
33c6cdb9fe | |||
|
f5f36427a4 | ||
|
c964f09a4f | ||
|
bd0cc69668 | ||
|
84fffac264 | ||
|
fc0c76bd77 | ||
|
d209547968 | ||
|
0191bc3821 | ||
|
d61bf212f5 |
331
.github/workflows/Release.yml
vendored
331
.github/workflows/Release.yml
vendored
@ -1,331 +0,0 @@
|
||||
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
|
||||
|
||||
# Workflow name
|
||||
name: Release
|
||||
|
||||
# Run on tag push
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Build on AlmaLinux 8.5 using golang-1.18.2
|
||||
#
|
||||
AlmaLinux-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://hub.docker.com/_/almalinux
|
||||
container: almalinux:8.5
|
||||
# The job outputs link to the outputs of the 'rpmrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
outputs:
|
||||
rpm : ${{steps.rpmrename.outputs.RPM}}
|
||||
srpm : ${{steps.rpmrename.outputs.SRPM}}
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
|
||||
dnf --assumeyes install wget openssl-devel diffutils delve which npm
|
||||
dnf --assumeyes install 'dnf-command(builddep)'
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
|
||||
rpm -i go*.rpm
|
||||
npm install --global yarn rollup svelte rollup-plugin-svelte
|
||||
#dnf --assumeyes builddep build/package/cc-backend.spec
|
||||
|
||||
- name: RPM build ClusterCockpit
|
||||
id: rpmbuild
|
||||
run: make RPM
|
||||
|
||||
# AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
|
||||
# so the created RPM both contain the substring 'el8' in the RPM file names
|
||||
# This step replaces the substring 'el8' to 'alma85'. It uses the move operation
|
||||
# because it is unclear whether the default AlmaLinux 8.5 container contains the
|
||||
# 'rename' command. This way we also get the new names for output.
|
||||
- name: Rename RPMs (s/el8/alma85/)
|
||||
id: rpmrename
|
||||
run: |
|
||||
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
|
||||
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
|
||||
NEW_RPM="${OLD_RPM/el8/alma85}"
|
||||
NEW_SRPM=${OLD_SRPM/el8/alma85}
|
||||
mv "${OLD_RPM}" "${NEW_RPM}"
|
||||
mv "${OLD_SRPM}" "${NEW_SRPM}"
|
||||
echo "::set-output name=SRPM::${NEW_SRPM}"
|
||||
echo "::set-output name=RPM::${NEW_RPM}"
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend RPM for AlmaLinux 8.5
|
||||
path: ${{ steps.rpmrename.outputs.RPM }}
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend SRPM for AlmaLinux 8.5
|
||||
path: ${{ steps.rpmrename.outputs.SRPM }}
|
||||
|
||||
#
|
||||
# Build on UBI 8 using golang-1.18.2
|
||||
#
|
||||
UBI-8-RPM-build:
|
||||
runs-on: ubuntu-latest
|
||||
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
|
||||
container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
|
||||
# The job outputs link to the outputs of the 'rpmbuild' step
|
||||
outputs:
|
||||
rpm : ${{steps.rpmbuild.outputs.RPM}}
|
||||
srpm : ${{steps.rpmbuild.outputs.SRPM}}
|
||||
steps:
|
||||
|
||||
# Use dnf to install development packages
|
||||
- name: Install development packages
|
||||
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which
|
||||
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
|
||||
# Use dnf to install build dependencies
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
|
||||
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
|
||||
rpm -i go*.rpm
|
||||
dnf --assumeyes --disableplugin=subscription-manager install npm
|
||||
npm install --global yarn rollup svelte rollup-plugin-svelte
|
||||
#dnf --assumeyes builddep build/package/cc-backend.spec
|
||||
|
||||
- name: RPM build ClusterCockpit
|
||||
id: rpmbuild
|
||||
run: make RPM
|
||||
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save RPM as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend RPM for UBI 8
|
||||
path: ${{ steps.rpmbuild.outputs.RPM }}
|
||||
- name: Save SRPM as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend SRPM for UBI 8
|
||||
path: ${{ steps.rpmbuild.outputs.SRPM }}
|
||||
|
||||
#
|
||||
# Build on Ubuntu 20.04 using official go 1.19.1 package
|
||||
#
|
||||
Ubuntu-focal-build:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
# The job outputs link to the outputs of the 'debrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
outputs:
|
||||
deb : ${{steps.debrename.outputs.DEB}}
|
||||
steps:
|
||||
# Use apt to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
apt update && apt --assume-yes upgrade
|
||||
apt --assume-yes install build-essential sed git wget bash
|
||||
apt --assume-yes install npm
|
||||
npm install --global yarn rollup svelte rollup-plugin-svelte
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
# Use official golang package
|
||||
- name: Install Golang
|
||||
run: |
|
||||
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
|
||||
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
go version
|
||||
- name: DEB build ClusterCockpit
|
||||
id: dpkg-build
|
||||
run: |
|
||||
ls -la
|
||||
pwd
|
||||
env
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
git config --global --add safe.directory $(pwd)
|
||||
make DEB
|
||||
- name: Rename DEB (add '_ubuntu20.04')
|
||||
id: debrename
|
||||
run: |
|
||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb"
|
||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
||||
echo "::set-output name=DEB::${NEW_DEB_FILE}"
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save DEB as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend DEB for Ubuntu 20.04
|
||||
path: ${{ steps.debrename.outputs.DEB }}
|
||||
|
||||
#
|
||||
# Build on Ubuntu 20.04 using official go 1.19.1 package
|
||||
#
|
||||
Ubuntu-jammy-build:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:22.04
|
||||
# The job outputs link to the outputs of the 'debrename' step
|
||||
# Only job outputs can be used in child jobs
|
||||
outputs:
|
||||
deb : ${{steps.debrename.outputs.DEB}}
|
||||
steps:
|
||||
# Use apt to install development packages
|
||||
- name: Install development packages
|
||||
run: |
|
||||
apt update && apt --assume-yes upgrade
|
||||
apt --assume-yes install build-essential sed git wget bash npm
|
||||
npm install --global yarn rollup svelte rollup-plugin-svelte
|
||||
# Checkout git repository and submodules
|
||||
# fetch-depth must be 0 to use git describe
|
||||
# See: https://github.com/marketplace/actions/checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
# Use official golang package
|
||||
- name: Install Golang
|
||||
run: |
|
||||
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
|
||||
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
go version
|
||||
- name: DEB build ClusterCockpit
|
||||
id: dpkg-build
|
||||
run: |
|
||||
ls -la
|
||||
pwd
|
||||
env
|
||||
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
|
||||
git config --global --add safe.directory $(pwd)
|
||||
make DEB
|
||||
- name: Rename DEB (add '_ubuntu22.04')
|
||||
id: debrename
|
||||
run: |
|
||||
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
|
||||
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
|
||||
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
|
||||
echo "::set-output name=DEB::${NEW_DEB_FILE}"
|
||||
# See: https://github.com/actions/upload-artifact
|
||||
- name: Save DEB as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cc-backend DEB for Ubuntu 22.04
|
||||
path: ${{ steps.debrename.outputs.DEB }}
|
||||
|
||||
#
|
||||
# Create release with fresh RPMs
|
||||
#
|
||||
Release:
|
||||
runs-on: ubuntu-latest
|
||||
# We need the RPMs, so add dependency
|
||||
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build, Ubuntu-jammy-build]
|
||||
|
||||
steps:
|
||||
# See: https://github.com/actions/download-artifact
|
||||
- name: Download AlmaLinux 8.5 RPM
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend RPM for AlmaLinux 8.5
|
||||
- name: Download AlmaLinux 8.5 SRPM
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend SRPM for AlmaLinux 8.5
|
||||
|
||||
- name: Download UBI 8 RPM
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend RPM for UBI 8
|
||||
- name: Download UBI 8 SRPM
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend SRPM for UBI 8
|
||||
|
||||
- name: Download Ubuntu 20.04 DEB
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend DEB for Ubuntu 20.04
|
||||
|
||||
- name: Download Ubuntu 22.04 DEB
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: cc-backend DEB for Ubuntu 22.04
|
||||
|
||||
# The download actions do not publish the name of the downloaded file,
|
||||
# so we re-use the job outputs of the parent jobs. The files are all
|
||||
# downloaded to the current folder.
|
||||
# The gh-release action afterwards does not accept file lists but all
|
||||
# files have to be listed at 'files'. The step creates one output per
|
||||
# RPM package (2 per distro)
|
||||
- name: Set RPM variables
|
||||
id: files
|
||||
run: |
|
||||
ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
|
||||
ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
|
||||
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
|
||||
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
|
||||
U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}")
|
||||
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
|
||||
echo "ALMA_85_RPM::${ALMA_85_RPM}"
|
||||
echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
|
||||
echo "UBI_8_RPM::${UBI_8_RPM}"
|
||||
echo "UBI_8_SRPM::${UBI_8_SRPM}"
|
||||
echo "U_2004_DEB::${U_2004_DEB}"
|
||||
echo "U_2204_DEB::${U_2204_DEB}"
|
||||
echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
|
||||
echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
|
||||
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
|
||||
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
|
||||
echo "::set-output name=U_2004_DEB::${U_2004_DEB}"
|
||||
echo "::set-output name=U_2204_DEB::${U_2204_DEB}"
|
||||
|
||||
# See: https://github.com/softprops/action-gh-release
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: cc-backend-${{github.ref_name}}
|
||||
files: |
|
||||
${{ steps.files.outputs.ALMA_85_RPM }}
|
||||
${{ steps.files.outputs.ALMA_85_SRPM }}
|
||||
${{ steps.files.outputs.UBI_8_RPM }}
|
||||
${{ steps.files.outputs.UBI_8_SRPM }}
|
||||
${{ steps.files.outputs.U_2004_DEB }}
|
||||
${{ steps.files.outputs.U_2204_DEB }}
|
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@ -7,7 +7,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.24.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build, Vet & Test
|
||||
|
@ -137,11 +137,6 @@ type JobMetricWithName {
|
||||
metric: JobMetric!
|
||||
}
|
||||
|
||||
type JobMetricStatWithName {
|
||||
name: String!
|
||||
stats: MetricStatistics!
|
||||
}
|
||||
|
||||
type JobMetric {
|
||||
unit: Unit
|
||||
timestep: Int!
|
||||
@ -156,6 +151,30 @@ type Series {
|
||||
data: [NullableFloat!]!
|
||||
}
|
||||
|
||||
type StatsSeries {
|
||||
mean: [NullableFloat!]!
|
||||
median: [NullableFloat!]!
|
||||
min: [NullableFloat!]!
|
||||
max: [NullableFloat!]!
|
||||
}
|
||||
|
||||
type JobStatsWithScope {
|
||||
name: String!
|
||||
scope: MetricScope!
|
||||
stats: [ScopedStats!]!
|
||||
}
|
||||
|
||||
type ScopedStats {
|
||||
hostname: String!
|
||||
id: String
|
||||
data: MetricStatistics!
|
||||
}
|
||||
|
||||
type JobStats {
|
||||
name: String!
|
||||
stats: MetricStatistics!
|
||||
}
|
||||
|
||||
type Unit {
|
||||
base: String!
|
||||
prefix: String
|
||||
@ -167,13 +186,6 @@ type MetricStatistics {
|
||||
max: Float!
|
||||
}
|
||||
|
||||
type StatsSeries {
|
||||
mean: [NullableFloat!]!
|
||||
median: [NullableFloat!]!
|
||||
min: [NullableFloat!]!
|
||||
max: [NullableFloat!]!
|
||||
}
|
||||
|
||||
type MetricFootprints {
|
||||
metric: String!
|
||||
data: [NullableFloat!]!
|
||||
@ -247,7 +259,8 @@ type Query {
|
||||
|
||||
job(id: ID!): Job
|
||||
jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]!
|
||||
jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]!
|
||||
jobStats(id: ID!, metrics: [String!]): [JobStats!]!
|
||||
scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [JobStatsWithScope!]!
|
||||
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
|
||||
|
||||
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
|
||||
|
@ -12,7 +12,7 @@ var (
|
||||
)
|
||||
|
||||
func cliInit() {
|
||||
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env")
|
||||
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize sqlite database file, config.json and .env")
|
||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap")
|
||||
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
||||
@ -24,10 +24,10 @@ func cliInit() {
|
||||
flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit")
|
||||
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
||||
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,manager,api,user]:<password>`")
|
||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: <username>:[admin,support,manager,api,user]:<password>")
|
||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove a existing user. Argument format: <username>")
|
||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
||||
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
||||
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
|
||||
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug, info (default), warn, err, crit]`")
|
||||
flag.Parse()
|
||||
}
|
||||
|
@ -5,7 +5,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||
@ -62,24 +61,23 @@ const configString = `
|
||||
|
||||
func initEnv() {
|
||||
if util.CheckFileExists("var") {
|
||||
fmt.Print("Directory ./var already exists. Exiting!\n")
|
||||
os.Exit(0)
|
||||
log.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
|
||||
}
|
||||
|
||||
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
|
||||
log.Fatalf("Writing config.json failed: %s", err.Error())
|
||||
log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
|
||||
log.Fatalf("Writing .env failed: %s", err.Error())
|
||||
log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
if err := os.Mkdir("var", 0o777); err != nil {
|
||||
log.Fatalf("Mkdir var failed: %s", err.Error())
|
||||
log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
||||
if err != nil {
|
||||
log.Fatalf("Initialize job.db failed: %s", err.Error())
|
||||
log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -61,15 +61,23 @@ func main() {
|
||||
// Apply config flags for pkg/log
|
||||
log.Init(flagLogLevel, flagLogDateTime)
|
||||
|
||||
// If init flag set, run tasks here before any file dependencies cause errors
|
||||
if flagInit {
|
||||
initEnv()
|
||||
log.Exit("Successfully setup environment!\n" +
|
||||
"Please review config.json and .env and adjust it to your needs.\n" +
|
||||
"Add your job-archive at ./var/job-archive.")
|
||||
}
|
||||
|
||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||
if flagGops {
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
log.Fatalf("gops/agent.Listen failed: %s", err.Error())
|
||||
log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) {
|
||||
log.Fatalf("parsing './.env' file failed: %s", err.Error())
|
||||
log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
// Initialize sub-modules and handle command line flags.
|
||||
@ -87,37 +95,29 @@ func main() {
|
||||
if flagMigrateDB {
|
||||
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||
}
|
||||
|
||||
if flagRevertDB {
|
||||
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
|
||||
}
|
||||
|
||||
if flagForceDB {
|
||||
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
|
||||
}
|
||||
os.Exit(0)
|
||||
log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
|
||||
}
|
||||
|
||||
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
||||
|
||||
if flagInit {
|
||||
initEnv()
|
||||
fmt.Print("Successfully setup environment!\n")
|
||||
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
|
||||
fmt.Print("Add your job-archive at ./var/job-archive.\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if !config.Keys.DisableAuthentication {
|
||||
|
||||
auth.Init()
|
||||
@ -125,20 +125,27 @@ func main() {
|
||||
if flagNewUser != "" {
|
||||
parts := strings.SplitN(flagNewUser, ":", 3)
|
||||
if len(parts) != 3 || len(parts[0]) == 0 {
|
||||
log.Fatal("invalid argument format for user creation")
|
||||
log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
|
||||
"Want: <username>:[admin,support,manager,api,user]:<password>\n"+
|
||||
"Have: %s\n", flagNewUser)
|
||||
}
|
||||
|
||||
ur := repository.GetUserRepository()
|
||||
if err := ur.AddUser(&schema.User{
|
||||
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
||||
}); err != nil {
|
||||
log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err)
|
||||
log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
|
||||
} else {
|
||||
log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
|
||||
if flagDelUser != "" {
|
||||
ur := repository.GetUserRepository()
|
||||
if err := ur.DelUser(flagDelUser); err != nil {
|
||||
log.Fatalf("deleting user failed: %v", err)
|
||||
log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
|
||||
} else {
|
||||
log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
|
||||
}
|
||||
}
|
||||
|
||||
@ -146,60 +153,64 @@ func main() {
|
||||
|
||||
if flagSyncLDAP {
|
||||
if authHandle.LdapAuth == nil {
|
||||
log.Fatal("cannot sync: LDAP authentication is not configured")
|
||||
log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
|
||||
}
|
||||
|
||||
if err := authHandle.LdapAuth.Sync(); err != nil {
|
||||
log.Fatalf("LDAP sync failed: %v", err)
|
||||
log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
|
||||
}
|
||||
log.Info("LDAP sync successfull")
|
||||
log.Print("Sync LDAP: LDAP synchronization successfull.")
|
||||
}
|
||||
|
||||
if flagGenJWT != "" {
|
||||
ur := repository.GetUserRepository()
|
||||
user, err := ur.GetUser(flagGenJWT)
|
||||
if err != nil {
|
||||
log.Fatalf("could not get user from JWT: %v", err)
|
||||
log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
|
||||
}
|
||||
|
||||
if !user.HasRole(schema.RoleApi) {
|
||||
log.Warnf("user '%s' does not have the API role", user.Username)
|
||||
log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
|
||||
}
|
||||
|
||||
jwt, err := authHandle.JwtAuth.ProvideJWT(user)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err)
|
||||
log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
|
||||
}
|
||||
|
||||
fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt)
|
||||
log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
|
||||
}
|
||||
|
||||
} else if flagNewUser != "" || flagDelUser != "" {
|
||||
log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled")
|
||||
log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
|
||||
}
|
||||
|
||||
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
||||
log.Fatalf("failed to initialize archive: %s", err.Error())
|
||||
log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
if err := metricdata.Init(); err != nil {
|
||||
log.Fatalf("failed to initialize metricdata repository: %s", err.Error())
|
||||
log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
|
||||
}
|
||||
|
||||
if flagReinitDB {
|
||||
if err := importer.InitDB(); err != nil {
|
||||
log.Fatalf("failed to re-initialize repository DB: %s", err.Error())
|
||||
log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
|
||||
} else {
|
||||
log.Print("Init DB: Sucessfully re-initialized repository DB.")
|
||||
}
|
||||
}
|
||||
|
||||
if flagImportJob != "" {
|
||||
if err := importer.HandleImportFlag(flagImportJob); err != nil {
|
||||
log.Fatalf("job import failed: %s", err.Error())
|
||||
log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
|
||||
} else {
|
||||
log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
|
||||
}
|
||||
}
|
||||
|
||||
if !flagServer {
|
||||
return
|
||||
log.Exit("No errors, server flag not set. Exiting cc-backend.")
|
||||
}
|
||||
|
||||
archiver.Start(repository.GetJobRepository())
|
||||
|
@ -64,7 +64,7 @@ func serverInit() {
|
||||
case string:
|
||||
return fmt.Errorf("MAIN > Panic: %s", e)
|
||||
case error:
|
||||
return fmt.Errorf("MAIN > Panic caused by: %w", e)
|
||||
return fmt.Errorf("MAIN > Panic caused by: %s", e.Error())
|
||||
}
|
||||
|
||||
return errors.New("MAIN > Internal server error (panic)")
|
||||
@ -268,7 +268,7 @@ func serverStart() {
|
||||
// Start http or https server
|
||||
listener, err := net.Listen("tcp", config.Keys.Addr)
|
||||
if err != nil {
|
||||
log.Fatalf("starting http listener failed: %v", err)
|
||||
log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
||||
@ -281,7 +281,7 @@ func serverStart() {
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalf("loading X509 keypair failed: %v", err)
|
||||
log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
|
||||
}
|
||||
listener = tls.NewListener(listener, &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
@ -292,20 +292,20 @@ func serverStart() {
|
||||
MinVersion: tls.VersionTLS12,
|
||||
PreferServerCipherSuites: true,
|
||||
})
|
||||
fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr)
|
||||
log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
|
||||
} else {
|
||||
fmt.Printf("HTTP server listening at %s...", config.Keys.Addr)
|
||||
log.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
|
||||
}
|
||||
//
|
||||
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||
// be established first, then the user can be changed, and after that,
|
||||
// the actual http server can be started.
|
||||
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||
log.Fatalf("error while preparing server start: %s", err.Error())
|
||||
log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("starting server failed: %v", err)
|
||||
log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
5
go.mod
5
go.mod
@ -1,6 +1,7 @@
|
||||
module github.com/ClusterCockpit/cc-backend
|
||||
|
||||
go 1.23.5
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.66
|
||||
@ -10,7 +11,7 @@ require (
|
||||
github.com/go-co-op/gocron/v2 v2.16.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/go-sql-driver/mysql v1.9.0
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/golang-migrate/migrate/v4 v4.18.2
|
||||
github.com/google/gops v0.3.28
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
@ -78,7 +79,7 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/net v0.36.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
|
8
go.sum
8
go.sum
@ -83,8 +83,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
|
||||
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@ -279,8 +279,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1423,8 +1423,6 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
key, value := r.FormValue("key"), r.FormValue("value")
|
||||
|
||||
// fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
|
||||
|
||||
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||
return
|
||||
|
@ -7,9 +7,9 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
|
||||
@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
|
||||
raw, err := os.ReadFile(flagConfigFile)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Fatalf("CONFIG ERROR: %v", err)
|
||||
log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
||||
log.Fatalf("Validate config: %v\n", err)
|
||||
log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||
dec.DisallowUnknownFields()
|
||||
if err := dec.Decode(&Keys); err != nil {
|
||||
log.Fatalf("could not decode: %v", err)
|
||||
log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
|
||||
}
|
||||
|
||||
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
||||
log.Fatal("At least one cluster required in config!")
|
||||
log.Abort("Config Init: At least one cluster required in config. Exited with error.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -81,11 +81,6 @@ type JobLinkResultList struct {
|
||||
Count *int `json:"count,omitempty"`
|
||||
}
|
||||
|
||||
type JobMetricStatWithName struct {
|
||||
Name string `json:"name"`
|
||||
Stats *schema.MetricStatistics `json:"stats"`
|
||||
}
|
||||
|
||||
type JobMetricWithName struct {
|
||||
Name string `json:"name"`
|
||||
Scope schema.MetricScope `json:"scope"`
|
||||
@ -100,6 +95,17 @@ type JobResultList struct {
|
||||
HasNextPage *bool `json:"hasNextPage,omitempty"`
|
||||
}
|
||||
|
||||
type JobStats struct {
|
||||
Name string `json:"name"`
|
||||
Stats *schema.MetricStatistics `json:"stats"`
|
||||
}
|
||||
|
||||
type JobStatsWithScope struct {
|
||||
Name string `json:"name"`
|
||||
Scope schema.MetricScope `json:"scope"`
|
||||
Stats []*ScopedStats `json:"stats"`
|
||||
}
|
||||
|
||||
type JobsStatistics struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
@ -173,6 +179,12 @@ type PageRequest struct {
|
||||
Page int `json:"page"`
|
||||
}
|
||||
|
||||
type ScopedStats struct {
|
||||
Hostname string `json:"hostname"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
Data *schema.MetricStatistics `json:"data"`
|
||||
}
|
||||
|
||||
type StringInput struct {
|
||||
Eq *string `json:"eq,omitempty"`
|
||||
Neq *string `json:"neq,omitempty"`
|
||||
|
@ -301,24 +301,23 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
|
||||
return res, err
|
||||
}
|
||||
|
||||
// JobMetricStats is the resolver for the jobMetricStats field.
|
||||
func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) {
|
||||
|
||||
// JobStats is the resolver for the jobStats field.
|
||||
func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.JobStats, error) {
|
||||
job, err := r.Query().Job(ctx, id)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying job for metrics")
|
||||
log.Warnf("Error while querying job %s for metadata", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx)
|
||||
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job stat data")
|
||||
log.Warnf("Error while loading jobStats data for job id %s", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := []*model.JobMetricStatWithName{}
|
||||
res := []*model.JobStats{}
|
||||
for name, md := range data {
|
||||
res = append(res, &model.JobMetricStatWithName{
|
||||
res = append(res, &model.JobStats{
|
||||
Name: name,
|
||||
Stats: &md,
|
||||
})
|
||||
@ -327,6 +326,44 @@ func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics [
|
||||
return res, err
|
||||
}
|
||||
|
||||
// ScopedJobStats is the resolver for the scopedJobStats field.
|
||||
func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobStatsWithScope, error) {
|
||||
job, err := r.Query().Job(ctx, id)
|
||||
if err != nil {
|
||||
log.Warnf("Error while querying job %s for metadata", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
|
||||
if err != nil {
|
||||
log.Warnf("Error while loading scopedJobStats data for job id %s", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]*model.JobStatsWithScope, 0)
|
||||
for name, scoped := range data {
|
||||
for scope, stats := range scoped {
|
||||
|
||||
mdlStats := make([]*model.ScopedStats, 0)
|
||||
for _, stat := range stats {
|
||||
mdlStats = append(mdlStats, &model.ScopedStats{
|
||||
Hostname: stat.Hostname,
|
||||
ID: stat.Id,
|
||||
Data: stat.Data,
|
||||
})
|
||||
}
|
||||
|
||||
res = append(res, &model.JobStatsWithScope{
|
||||
Name: name,
|
||||
Scope: scope,
|
||||
Stats: mdlStats,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// JobsFootprints is the resolver for the jobsFootprints field.
|
||||
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
||||
// NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!
|
||||
@ -354,30 +391,28 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !config.Keys.UiDefaults["job_list_usePaging"].(bool) {
|
||||
hasNextPage := false
|
||||
// page.Page += 1 : Simple, but expensive
|
||||
// Example Page 4 @ 10 IpP : Does item 41 exist?
|
||||
// Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
|
||||
nextPage := &model.PageRequest{
|
||||
ItemsPerPage: 1,
|
||||
Page: ((page.Page * page.ItemsPerPage) + 1),
|
||||
}
|
||||
|
||||
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying next jobs")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(nextJobs) == 1 {
|
||||
hasNextPage = true
|
||||
}
|
||||
|
||||
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
|
||||
} else {
|
||||
return &model.JobResultList{Items: jobs, Count: &count}, nil
|
||||
// Note: Even if App-Default 'config.Keys.UiDefaults["job_list_usePaging"]' is set, always return hasNextPage boolean.
|
||||
// Users can decide in frontend to use continuous scroll, even if app-default is paging!
|
||||
/*
|
||||
Example Page 4 @ 10 IpP : Does item 41 exist?
|
||||
Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
|
||||
*/
|
||||
nextPage := &model.PageRequest{
|
||||
ItemsPerPage: 1,
|
||||
Page: ((page.Page * page.ItemsPerPage) + 1),
|
||||
}
|
||||
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying next jobs")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasNextPage := false
|
||||
if len(nextJobs) == 1 {
|
||||
hasNextPage = true
|
||||
}
|
||||
|
||||
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
|
||||
}
|
||||
|
||||
// JobsStatistics is the resolver for the jobsStatistics field.
|
||||
|
@ -224,8 +224,34 @@ func LoadAverages(
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used for polar plots in frontend
|
||||
func LoadStatData(
|
||||
// Used for statsTable in frontend: Return scoped statistics by metric.
|
||||
func LoadScopedJobStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context,
|
||||
) (schema.ScopedJobStats, error) {
|
||||
|
||||
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
|
||||
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
|
||||
}
|
||||
|
||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
|
||||
}
|
||||
|
||||
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
|
||||
if err != nil {
|
||||
log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return scopedStats, nil
|
||||
}
|
||||
|
||||
// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric.
|
||||
func LoadJobStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
ctx context.Context,
|
||||
@ -237,12 +263,12 @@ func LoadStatData(
|
||||
data := make(map[string]schema.MetricStatistics, len(metrics))
|
||||
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
|
||||
if err != nil {
|
||||
return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
|
||||
return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
|
||||
}
|
||||
|
||||
stats, err := repo.LoadStats(job, metrics, ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
|
||||
log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
|
||||
return data, err
|
||||
}
|
||||
|
||||
|
@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest(
|
||||
) (*ApiQueryResponse, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
||||
log.Warn("Error while encoding request body")
|
||||
log.Errorf("Error while encoding request body: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
|
||||
if err != nil {
|
||||
log.Warn("Error while building request body")
|
||||
log.Errorf("Error while building request body: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if ccms.jwt != "" {
|
||||
@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
|
||||
|
||||
res, err := ccms.client.Do(req)
|
||||
if err != nil {
|
||||
log.Error("Error while performing request")
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest(
|
||||
|
||||
var resBody ApiQueryResponse
|
||||
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
|
||||
log.Warn("Error while decoding result body")
|
||||
log.Errorf("Error while decoding result body: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData(
|
||||
) (schema.JobData, error) {
|
||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
|
||||
if err != nil {
|
||||
log.Warn("Error while building queries")
|
||||
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData(
|
||||
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
log.Error("Error while performing request")
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -557,16 +557,9 @@ func (ccms *CCMetricStore) LoadStats(
|
||||
ctx context.Context,
|
||||
) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
|
||||
// metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
|
||||
// resolution := 9000
|
||||
|
||||
// for _, mc := range metricConfigs {
|
||||
// resolution = min(resolution, mc.Timestep)
|
||||
// }
|
||||
|
||||
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
|
||||
if err != nil {
|
||||
log.Warn("Error while building query")
|
||||
log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -581,7 +574,7 @@ func (ccms *CCMetricStore) LoadStats(
|
||||
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
log.Error("Error while performing request")
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -591,9 +584,8 @@ func (ccms *CCMetricStore) LoadStats(
|
||||
metric := ccms.toLocalName(query.Metric)
|
||||
data := res[0]
|
||||
if data.Error != nil {
|
||||
log.Infof("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
||||
log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
||||
continue
|
||||
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
||||
}
|
||||
|
||||
metricdata, ok := stats[metric]
|
||||
@ -603,9 +595,8 @@ func (ccms *CCMetricStore) LoadStats(
|
||||
}
|
||||
|
||||
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
|
||||
log.Infof("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
|
||||
log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
|
||||
continue
|
||||
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
|
||||
}
|
||||
|
||||
metricdata[query.Hostname] = schema.MetricStatistics{
|
||||
@ -618,7 +609,98 @@ func (ccms *CCMetricStore) LoadStats(
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known!
|
||||
// Used for Job-View Statistics Table
|
||||
func (ccms *CCMetricStore) LoadScopedStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context,
|
||||
) (schema.ScopedJobStats, error) {
|
||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
|
||||
if err != nil {
|
||||
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := ApiQueryRequest{
|
||||
Cluster: job.Cluster,
|
||||
From: job.StartTime.Unix(),
|
||||
To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
|
||||
Queries: queries,
|
||||
WithStats: true,
|
||||
WithData: false,
|
||||
}
|
||||
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var errors []string
|
||||
scopedJobStats := make(schema.ScopedJobStats)
|
||||
|
||||
for i, row := range resBody.Results {
|
||||
query := req.Queries[i]
|
||||
metric := ccms.toLocalName(query.Metric)
|
||||
scope := assignedScope[i]
|
||||
|
||||
if _, ok := scopedJobStats[metric]; !ok {
|
||||
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
|
||||
}
|
||||
|
||||
if _, ok := scopedJobStats[metric][scope]; !ok {
|
||||
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
|
||||
}
|
||||
|
||||
for ndx, res := range row {
|
||||
if res.Error != nil {
|
||||
/* Build list for "partial errors", if any */
|
||||
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
|
||||
continue
|
||||
}
|
||||
|
||||
id := (*string)(nil)
|
||||
if query.Type != nil {
|
||||
id = new(string)
|
||||
*id = query.TypeIds[ndx]
|
||||
}
|
||||
|
||||
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
|
||||
// "schema.Float()" because regular float64 can not be JSONed when NaN.
|
||||
res.Avg = schema.Float(0)
|
||||
res.Min = schema.Float(0)
|
||||
res.Max = schema.Float(0)
|
||||
}
|
||||
|
||||
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
|
||||
Hostname: query.Hostname,
|
||||
Id: id,
|
||||
Data: &schema.MetricStatistics{
|
||||
Avg: float64(res.Avg),
|
||||
Min: float64(res.Min),
|
||||
Max: float64(res.Max),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
|
||||
if len(scopedJobStats[metric][scope]) == 0 {
|
||||
delete(scopedJobStats[metric], scope)
|
||||
if len(scopedJobStats[metric]) == 0 {
|
||||
delete(scopedJobStats, metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) != 0 {
|
||||
/* Returns list for "partial errors" */
|
||||
return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||
}
|
||||
return scopedJobStats, nil
|
||||
}
|
||||
|
||||
// Used for Systems-View Node-Overview
|
||||
func (ccms *CCMetricStore) LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
@ -652,7 +734,7 @@ func (ccms *CCMetricStore) LoadNodeData(
|
||||
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("Error while performing request %#v\n", err))
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -710,6 +792,7 @@ func (ccms *CCMetricStore) LoadNodeData(
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Used for Systems-View Node-List
|
||||
func (ccms *CCMetricStore) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
metrics []string,
|
||||
@ -768,7 +851,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
||||
|
||||
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
|
||||
if err != nil {
|
||||
log.Warn("Error while building queries")
|
||||
log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
}
|
||||
|
||||
@ -783,7 +866,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
|
||||
|
||||
resBody, err := ccms.doRequest(ctx, &req)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("Error while performing request %#v\n", err))
|
||||
log.Errorf("Error while performing request: %s", err.Error())
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
}
|
||||
|
||||
@ -888,7 +971,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
|
||||
if subCluster != "" {
|
||||
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
|
||||
if scterr != nil {
|
||||
// TODO: Log
|
||||
log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
|
||||
return nil, nil, scterr
|
||||
}
|
||||
}
|
||||
@ -898,7 +981,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
|
||||
mc := archive.GetMetricConfig(cluster, metric)
|
||||
if mc == nil {
|
||||
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||
log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||
log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -64,6 +66,8 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
||||
ctx context.Context,
|
||||
resolution int) (schema.JobData, error) {
|
||||
|
||||
log.Infof("InfluxDB 2 Backend: Resolution Scaling not Implemented, will return default timestep. Requested Resolution %d", resolution)
|
||||
|
||||
measurementsConds := make([]string, 0, len(metrics))
|
||||
for _, m := range metrics {
|
||||
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
|
||||
@ -86,7 +90,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
||||
query := ""
|
||||
switch scope {
|
||||
case "node":
|
||||
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows
|
||||
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows <-- Resolution could be added here?
|
||||
// log.Info("Scope 'node' requested. ")
|
||||
query = fmt.Sprintf(`
|
||||
from(bucket: "%s")
|
||||
@ -116,6 +120,12 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
||||
// idb.bucket,
|
||||
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
||||
// measurementsCond, hostsCond)
|
||||
case "hwthread":
|
||||
log.Info(" Scope 'hwthread' requested, but not yet supported: Will return 'node' scope only. ")
|
||||
continue
|
||||
case "accelerator":
|
||||
log.Info(" Scope 'accelerator' requested, but not yet supported: Will return 'node' scope only. ")
|
||||
continue
|
||||
default:
|
||||
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
||||
continue
|
||||
@ -173,6 +183,11 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
||||
}
|
||||
case "socket":
|
||||
continue
|
||||
case "accelerator":
|
||||
continue
|
||||
case "hwthread":
|
||||
// See below @ core
|
||||
continue
|
||||
case "core":
|
||||
continue
|
||||
// Include Series.Id in hostSeries
|
||||
@ -301,6 +316,53 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Used in Job-View StatsTable
|
||||
// UNTESTED
|
||||
func (idb *InfluxDBv2DataRepository) LoadScopedStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.ScopedJobStats, error) {
|
||||
|
||||
// Assumption: idb.loadData() only returns series node-scope - use node scope for statsTable
|
||||
scopedJobStats := make(schema.ScopedJobStats)
|
||||
data, err := idb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job for scopedJobStats")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for metric, metricData := range data {
|
||||
for _, scope := range scopes {
|
||||
if scope != schema.MetricScopeNode {
|
||||
logOnce.Do(func() {
|
||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scopedJobStats[metric]; !ok {
|
||||
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
|
||||
}
|
||||
|
||||
if _, ok := scopedJobStats[metric][scope]; !ok {
|
||||
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
|
||||
}
|
||||
|
||||
for _, series := range metricData[scope].Series {
|
||||
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
|
||||
Hostname: series.Hostname,
|
||||
Data: &series.Statistics,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scopedJobStats, nil
|
||||
}
|
||||
|
||||
// Used in Systems-View @ Node-Overview
|
||||
// UNTESTED
|
||||
func (idb *InfluxDBv2DataRepository) LoadNodeData(
|
||||
cluster string,
|
||||
metrics, nodes []string,
|
||||
@ -308,12 +370,123 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
|
||||
from, to time.Time,
|
||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||
|
||||
// TODO : Implement to be used in Analysis- und System/Node-View
|
||||
log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
|
||||
// Note: scopes[] Array will be ignored, only return node scope
|
||||
|
||||
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
|
||||
// CONVERT ARGS TO INFLUX
|
||||
measurementsConds := make([]string, 0)
|
||||
for _, m := range metrics {
|
||||
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
|
||||
}
|
||||
measurementsCond := strings.Join(measurementsConds, " or ")
|
||||
|
||||
hostsConds := make([]string, 0)
|
||||
if nodes == nil {
|
||||
var allNodes []string
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
allNodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
for _, node := range allNodes {
|
||||
nodes = append(nodes, node)
|
||||
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
|
||||
}
|
||||
} else {
|
||||
for _, node := range nodes {
|
||||
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
|
||||
}
|
||||
}
|
||||
hostsCond := strings.Join(hostsConds, " or ")
|
||||
|
||||
// BUILD AND PERFORM QUERY
|
||||
query := fmt.Sprintf(`
|
||||
from(bucket: "%s")
|
||||
|> range(start: %s, stop: %s)
|
||||
|> filter(fn: (r) => (%s) and (%s) )
|
||||
|> drop(columns: ["_start", "_stop"])
|
||||
|> group(columns: ["hostname", "_measurement"])
|
||||
|> aggregateWindow(every: 60s, fn: mean)
|
||||
|> drop(columns: ["_time"])`,
|
||||
idb.bucket,
|
||||
idb.formatTime(from), idb.formatTime(to),
|
||||
measurementsCond, hostsCond)
|
||||
|
||||
rows, err := idb.queryClient.Query(ctx, query)
|
||||
if err != nil {
|
||||
log.Error("Error while performing query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// HANDLE QUERY RETURN
|
||||
// Collect Float Arrays for Node@Metric -> No Scope Handling!
|
||||
influxData := make(map[string]map[string][]schema.Float)
|
||||
for rows.Next() {
|
||||
row := rows.Record()
|
||||
host, field := row.ValueByKey("hostname").(string), row.Measurement()
|
||||
|
||||
influxHostData, ok := influxData[host]
|
||||
if !ok {
|
||||
influxHostData = make(map[string][]schema.Float)
|
||||
influxData[host] = influxHostData
|
||||
}
|
||||
|
||||
influxFieldData, ok := influxData[host][field]
|
||||
if !ok {
|
||||
influxFieldData = make([]schema.Float, 0)
|
||||
influxData[host][field] = influxFieldData
|
||||
}
|
||||
|
||||
val, ok := row.Value().(float64)
|
||||
if ok {
|
||||
influxData[host][field] = append(influxData[host][field], schema.Float(val))
|
||||
} else {
|
||||
influxData[host][field] = append(influxData[host][field], schema.Float(0))
|
||||
}
|
||||
}
|
||||
|
||||
// BUILD FUNCTION RETURN
|
||||
data := make(map[string]map[string][]*schema.JobMetric)
|
||||
for node, metricData := range influxData {
|
||||
|
||||
nodeData, ok := data[node]
|
||||
if !ok {
|
||||
nodeData = make(map[string][]*schema.JobMetric)
|
||||
data[node] = nodeData
|
||||
}
|
||||
|
||||
for metric, floatArray := range metricData {
|
||||
avg, min, max := 0.0, 0.0, 0.0
|
||||
for _, val := range floatArray {
|
||||
avg += float64(val)
|
||||
min = math.Min(min, float64(val))
|
||||
max = math.Max(max, float64(val))
|
||||
}
|
||||
|
||||
stats := schema.MetricStatistics{
|
||||
Avg: (math.Round((avg/float64(len(floatArray)))*100) / 100),
|
||||
Min: (math.Round(min*100) / 100),
|
||||
Max: (math.Round(max*100) / 100),
|
||||
}
|
||||
|
||||
mc := archive.GetMetricConfig(cluster, metric)
|
||||
nodeData[metric] = append(nodeData[metric], &schema.JobMetric{
|
||||
Unit: mc.Unit,
|
||||
Timestep: mc.Timestep,
|
||||
Series: []schema.Series{
|
||||
{
|
||||
Hostname: node,
|
||||
Statistics: stats,
|
||||
Data: floatArray,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Used in Systems-View @ Node-List
|
||||
// UNTESTED
|
||||
func (idb *InfluxDBv2DataRepository) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
metrics []string,
|
||||
@ -324,10 +497,79 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData(
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
|
||||
// Assumption: idb.loadData() only returns series node-scope - use node scope for NodeList
|
||||
|
||||
// 0) Init additional vars
|
||||
var totalNodes int = 0
|
||||
var hasNextPage bool = false
|
||||
// TODO : Implement to be used in NodeList-View
|
||||
log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
|
||||
|
||||
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
|
||||
// 1) Get list of all nodes
|
||||
var nodes []string
|
||||
if subCluster != "" {
|
||||
scNodes := archive.NodeLists[cluster][subCluster]
|
||||
nodes = scNodes.PrintList()
|
||||
} else {
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
nodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
}
|
||||
|
||||
// 2) Filter nodes
|
||||
if nodeFilter != "" {
|
||||
filteredNodes := []string{}
|
||||
for _, node := range nodes {
|
||||
if strings.Contains(node, nodeFilter) {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
nodes = filteredNodes
|
||||
}
|
||||
|
||||
// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ...
|
||||
totalNodes = len(nodes)
|
||||
sort.Strings(nodes)
|
||||
|
||||
// 3) Apply paging
|
||||
if len(nodes) > page.ItemsPerPage {
|
||||
start := (page.Page - 1) * page.ItemsPerPage
|
||||
end := start + page.ItemsPerPage
|
||||
if end > len(nodes) {
|
||||
end = len(nodes)
|
||||
hasNextPage = false
|
||||
} else {
|
||||
hasNextPage = true
|
||||
}
|
||||
nodes = nodes[start:end]
|
||||
}
|
||||
|
||||
// 4) Fetch And Convert Data, use idb.LoadNodeData() for query
|
||||
|
||||
rawNodeData, err := idb.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("Error while loading influx nodeData for nodeListData %#v\n", err))
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
}
|
||||
|
||||
data := make(map[string]schema.JobData)
|
||||
for node, nodeData := range rawNodeData {
|
||||
// Init Nested Map Data Structures If Not Found
|
||||
hostData, ok := data[node]
|
||||
if !ok {
|
||||
hostData = make(schema.JobData)
|
||||
data[node] = hostData
|
||||
}
|
||||
|
||||
for metric, nodeMetricData := range nodeData {
|
||||
metricData, ok := hostData[metric]
|
||||
if !ok {
|
||||
metricData = make(map[schema.MetricScope]*schema.JobMetric)
|
||||
data[node][metric] = metricData
|
||||
}
|
||||
|
||||
data[node][metric][schema.MetricScopeNode] = nodeMetricData[0] // Only Node Scope Returned from loadNodeData
|
||||
}
|
||||
}
|
||||
|
||||
return data, totalNodes, hasNextPage, nil
|
||||
}
|
||||
|
@ -24,9 +24,12 @@ type MetricDataRepository interface {
|
||||
// Return the JobData for the given job, only with the requested metrics.
|
||||
LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error)
|
||||
|
||||
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now.
|
||||
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only.
|
||||
LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error)
|
||||
|
||||
// Return a map of metrics to a map of scopes to the scoped metric statistics of the job.
|
||||
LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error)
|
||||
|
||||
// Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node.
|
||||
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)
|
||||
|
||||
|
@ -448,6 +448,51 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Implemented by NHR@FAU; Used in Job-View StatsTable
|
||||
func (pdb *PrometheusDataRepository) LoadScopedStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.ScopedJobStats, error) {
|
||||
|
||||
// Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable
|
||||
scopedJobStats := make(schema.ScopedJobStats)
|
||||
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job for scopedJobStats")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for metric, metricData := range data {
|
||||
for _, scope := range scopes {
|
||||
if scope != schema.MetricScopeNode {
|
||||
logOnce.Do(func() {
|
||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scopedJobStats[metric]; !ok {
|
||||
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
|
||||
}
|
||||
|
||||
if _, ok := scopedJobStats[metric][scope]; !ok {
|
||||
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
|
||||
}
|
||||
|
||||
for _, series := range metricData[scope].Series {
|
||||
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
|
||||
Hostname: series.Hostname,
|
||||
Data: &series.Statistics,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scopedJobStats, nil
|
||||
}
|
||||
|
||||
// Implemented by NHR@FAU; Used in NodeList-View
|
||||
func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
cluster, subCluster, nodeFilter string,
|
||||
metrics []string,
|
||||
@ -458,10 +503,132 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
|
||||
ctx context.Context,
|
||||
) (map[string]schema.JobData, int, bool, error) {
|
||||
|
||||
// Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList
|
||||
|
||||
// 0) Init additional vars
|
||||
var totalNodes int = 0
|
||||
var hasNextPage bool = false
|
||||
// TODO : Implement to be used in NodeList-View
|
||||
log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
|
||||
|
||||
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository")
|
||||
// 1) Get list of all nodes
|
||||
var nodes []string
|
||||
if subCluster != "" {
|
||||
scNodes := archive.NodeLists[cluster][subCluster]
|
||||
nodes = scNodes.PrintList()
|
||||
} else {
|
||||
subClusterNodeLists := archive.NodeLists[cluster]
|
||||
for _, nodeList := range subClusterNodeLists {
|
||||
nodes = append(nodes, nodeList.PrintList()...)
|
||||
}
|
||||
}
|
||||
|
||||
// 2) Filter nodes
|
||||
if nodeFilter != "" {
|
||||
filteredNodes := []string{}
|
||||
for _, node := range nodes {
|
||||
if strings.Contains(node, nodeFilter) {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
nodes = filteredNodes
|
||||
}
|
||||
|
||||
// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ...
|
||||
totalNodes = len(nodes)
|
||||
sort.Strings(nodes)
|
||||
|
||||
// 3) Apply paging
|
||||
if len(nodes) > page.ItemsPerPage {
|
||||
start := (page.Page - 1) * page.ItemsPerPage
|
||||
end := start + page.ItemsPerPage
|
||||
if end > len(nodes) {
|
||||
end = len(nodes)
|
||||
hasNextPage = false
|
||||
} else {
|
||||
hasNextPage = true
|
||||
}
|
||||
nodes = nodes[start:end]
|
||||
}
|
||||
|
||||
// 4) Fetch Data, based on pdb.LoadNodeData()
|
||||
|
||||
t0 := time.Now()
|
||||
// Map of hosts of jobData
|
||||
data := make(map[string]schema.JobData)
|
||||
|
||||
// query db for each metric
|
||||
// TODO: scopes seems to be always empty
|
||||
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
||||
scopes = append(scopes, schema.MetricScopeNode)
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope != schema.MetricScopeNode {
|
||||
logOnce.Do(func() {
|
||||
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
metricConfig := archive.GetMetricConfig(cluster, metric)
|
||||
if metricConfig == nil {
|
||||
log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
|
||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
|
||||
}
|
||||
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
||||
if err != nil {
|
||||
log.Warn("Error while formatting prometheus query")
|
||||
return nil, totalNodes, hasNextPage, err
|
||||
}
|
||||
|
||||
// ranged query over all nodes
|
||||
r := promv1.Range{
|
||||
Start: from,
|
||||
End: to,
|
||||
Step: time.Duration(metricConfig.Timestep * 1e9),
|
||||
}
|
||||
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||
if err != nil {
|
||||
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
||||
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
log.Warnf("Warnings: %v\n", warnings)
|
||||
}
|
||||
|
||||
step := int64(metricConfig.Timestep)
|
||||
steps := int64(to.Sub(from).Seconds()) / step
|
||||
|
||||
// iter rows of host, metric, values
|
||||
for _, row := range result.(promm.Matrix) {
|
||||
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
||||
|
||||
hostdata, ok := data[hostname]
|
||||
if !ok {
|
||||
hostdata = make(schema.JobData)
|
||||
data[hostname] = hostdata
|
||||
}
|
||||
|
||||
metricdata, ok := hostdata[metric]
|
||||
if !ok {
|
||||
metricdata = make(map[schema.MetricScope]*schema.JobMetric)
|
||||
data[hostname][metric] = metricdata
|
||||
}
|
||||
|
||||
// output per host, metric and scope
|
||||
scopeData, ok := metricdata[scope]
|
||||
if !ok {
|
||||
scopeData = &schema.JobMetric{
|
||||
Unit: metricConfig.Unit,
|
||||
Timestep: metricConfig.Timestep,
|
||||
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
||||
}
|
||||
data[hostname][metric][scope] = scopeData
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
t1 := time.Since(t0)
|
||||
log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
|
||||
return data, totalNodes, hasNextPage, nil
|
||||
}
|
||||
|
@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData(
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadStats(
|
||||
job *schema.Job,
|
||||
metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
metrics []string,
|
||||
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||
|
||||
panic("TODO")
|
||||
}
|
||||
|
||||
func (tmdr *TestMetricDataRepository) LoadScopedStats(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
ctx context.Context) (schema.ScopedJobStats, error) {
|
||||
|
||||
panic("TODO")
|
||||
}
|
||||
|
@ -59,17 +59,15 @@ func Connect(driver string, db string) {
|
||||
} else {
|
||||
dbHandle, err = sqlx.Open("sqlite3", opts.URL)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case "mysql":
|
||||
opts.URL += "?multiStatements=true"
|
||||
dbHandle, err = sqlx.Open("mysql", opts.URL)
|
||||
if err != nil {
|
||||
log.Fatalf("sqlx.Open() error: %v", err)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unsupported database driver: %s", driver)
|
||||
log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
|
||||
}
|
||||
|
||||
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
|
||||
@ -80,7 +78,7 @@ func Connect(driver string, db string) {
|
||||
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||
err = checkDBVersion(driver, dbHandle.DB)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unsupported database backend: %s", backend)
|
||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
}
|
||||
|
||||
v, dirty, err := m.Version()
|
||||
@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
|
||||
return m, err
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unsupported database backend: %s", backend)
|
||||
log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
|
||||
|
||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||
if err != nil {
|
||||
log.Fatalf("db.DB.Preparex() error: %v", err)
|
||||
log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
userCfgRepoInstance = &UserCfgRepo{
|
||||
|
@ -40,7 +40,7 @@ func Start() {
|
||||
jobRepo = repository.GetJobRepository()
|
||||
s, err = gocron.NewScheduler()
|
||||
if err != nil {
|
||||
log.Fatalf("Error while creating gocron scheduler: %s", err.Error())
|
||||
log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
if config.Keys.StopJobsExceedingWalltime > 0 {
|
||||
|
@ -27,6 +27,8 @@ type ArchiveBackend interface {
|
||||
|
||||
LoadJobData(job *schema.Job) (schema.JobData, error)
|
||||
|
||||
LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error)
|
||||
|
||||
LoadClusterCfg(name string) (*schema.Cluster, error)
|
||||
|
||||
StoreJobMeta(jobMeta *schema.JobMeta) error
|
||||
@ -87,7 +89,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
||||
var version uint64
|
||||
version, err = ar.Init(rawConfig)
|
||||
if err != nil {
|
||||
log.Error("Error while initializing archiveBackend")
|
||||
log.Errorf("Error while initializing archiveBackend: %s", err.Error())
|
||||
return
|
||||
}
|
||||
log.Infof("Load archive version %d", version)
|
||||
@ -110,7 +112,7 @@ func LoadAveragesFromArchive(
|
||||
) error {
|
||||
metaFile, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -125,7 +127,7 @@ func LoadAveragesFromArchive(
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper to metricdataloader.LoadStatData().
|
||||
// Helper to metricdataloader.LoadJobStats().
|
||||
func LoadStatsFromArchive(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
@ -133,7 +135,7 @@ func LoadStatsFromArchive(
|
||||
data := make(map[string]schema.MetricStatistics, len(metrics))
|
||||
metaFile, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||
return data, err
|
||||
}
|
||||
|
||||
@ -154,10 +156,26 @@ func LoadStatsFromArchive(
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Helper to metricdataloader.LoadScopedJobStats().
|
||||
func LoadScopedStatsFromArchive(
|
||||
job *schema.Job,
|
||||
metrics []string,
|
||||
scopes []schema.MetricScope,
|
||||
) (schema.ScopedJobStats, error) {
|
||||
|
||||
data, err := ar.LoadJobStats(job)
|
||||
if err != nil {
|
||||
log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
||||
metaFile, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -173,7 +191,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
|
||||
|
||||
jobMeta, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -193,7 +211,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
|
||||
|
||||
jobMeta, err := ar.LoadJobMeta(job)
|
||||
if err != nil {
|
||||
log.Warn("Error while loading job metadata from archiveBackend")
|
||||
log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
|
||||
f, err := os.Open(filename)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("fsBackend LoadJobStats()- %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if isCompressed {
|
||||
r, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
log.Errorf(" %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.Data, r); err != nil {
|
||||
return nil, fmt.Errorf("validate job data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return DecodeJobStats(r, filename)
|
||||
} else {
|
||||
if config.Keys.Validate {
|
||||
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
|
||||
return nil, fmt.Errorf("validate job data: %v", err)
|
||||
}
|
||||
}
|
||||
return DecodeJobStats(bufio.NewReader(f), filename)
|
||||
}
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
|
||||
|
||||
var config FsArchiveConfig
|
||||
@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
||||
return loadJobData(filename, isCompressed)
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) {
|
||||
var isCompressed bool = true
|
||||
filename := getPath(job, fsa.path, "data.json.gz")
|
||||
|
||||
if !util.CheckFileExists(filename) {
|
||||
filename = getPath(job, fsa.path, "data.json")
|
||||
isCompressed = false
|
||||
}
|
||||
|
||||
return loadJobStats(filename, isCompressed)
|
||||
}
|
||||
|
||||
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
|
||||
filename := getPath(job, fsa.path, "meta.json")
|
||||
return loadJobMeta(filename)
|
||||
|
@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||
return data.(schema.JobData), nil
|
||||
}
|
||||
|
||||
func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
|
||||
jobData, err := DecodeJobData(r, k)
|
||||
// Convert schema.JobData to schema.ScopedJobStats
|
||||
if jobData != nil {
|
||||
scopedJobStats := make(schema.ScopedJobStats)
|
||||
for metric, metricData := range jobData {
|
||||
if _, ok := scopedJobStats[metric]; !ok {
|
||||
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
|
||||
}
|
||||
|
||||
for scope, jobMetric := range metricData {
|
||||
if _, ok := scopedJobStats[metric][scope]; !ok {
|
||||
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
|
||||
}
|
||||
|
||||
for _, series := range jobMetric.Series {
|
||||
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
|
||||
Hostname: series.Hostname,
|
||||
Id: series.Id,
|
||||
Data: &series.Statistics,
|
||||
})
|
||||
}
|
||||
|
||||
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
|
||||
if len(scopedJobStats[metric][scope]) == 0 {
|
||||
delete(scopedJobStats[metric], scope)
|
||||
if len(scopedJobStats[metric]) == 0 {
|
||||
delete(scopedJobStats, metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return scopedJobStats, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
||||
var d schema.JobMeta
|
||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||
|
236
pkg/log/log.go
236
pkg/log/log.go
@ -46,12 +46,12 @@ var loglevel string = "info"
|
||||
/* CONFIG */
|
||||
|
||||
func Init(lvl string, logdate bool) {
|
||||
|
||||
// Discard I/O for all writers below selected loglevel; <CRITICAL> is always written.
|
||||
switch lvl {
|
||||
case "crit":
|
||||
ErrWriter = io.Discard
|
||||
fallthrough
|
||||
case "err", "fatal":
|
||||
case "err":
|
||||
WarnWriter = io.Discard
|
||||
fallthrough
|
||||
case "warn":
|
||||
@ -63,8 +63,7 @@ func Init(lvl string, logdate bool) {
|
||||
// Nothing to do...
|
||||
break
|
||||
default:
|
||||
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl)
|
||||
//SetLogLevel("debug")
|
||||
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel)
|
||||
}
|
||||
|
||||
if !logdate {
|
||||
@ -84,109 +83,138 @@ func Init(lvl string, logdate bool) {
|
||||
loglevel = lvl
|
||||
}
|
||||
|
||||
/* PRINT */
|
||||
|
||||
// Private helper
|
||||
func printStr(v ...interface{}) string {
|
||||
return fmt.Sprint(v...)
|
||||
}
|
||||
|
||||
// Uses Info() -> If errorpath required at some point:
|
||||
// Will need own writer with 'Output(2, out)' to correctly render path
|
||||
func Print(v ...interface{}) {
|
||||
Info(v...)
|
||||
}
|
||||
|
||||
func Debug(v ...interface{}) {
|
||||
DebugLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
func Info(v ...interface{}) {
|
||||
InfoLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
func Warn(v ...interface{}) {
|
||||
WarnLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
func Error(v ...interface{}) {
|
||||
ErrLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Writes panic stacktrace, but keeps application alive
|
||||
func Panic(v ...interface{}) {
|
||||
ErrLog.Output(2, printStr(v...))
|
||||
panic("Panic triggered ...")
|
||||
}
|
||||
|
||||
func Crit(v ...interface{}) {
|
||||
CritLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Writes critical log, stops application
|
||||
func Fatal(v ...interface{}) {
|
||||
CritLog.Output(2, printStr(v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
/* PRINT FORMAT*/
|
||||
|
||||
// Private helper
|
||||
func printfStr(format string, v ...interface{}) string {
|
||||
return fmt.Sprintf(format, v...)
|
||||
}
|
||||
|
||||
// Uses Infof() -> If errorpath required at some point:
|
||||
// Will need own writer with 'Output(2, out)' to correctly render path
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Infof(format, v...)
|
||||
}
|
||||
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
DebugLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
func Infof(format string, v ...interface{}) {
|
||||
InfoLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
WarnLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
ErrLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Writes panic stacktrace, but keeps application alive
|
||||
func Panicf(format string, v ...interface{}) {
|
||||
ErrLog.Output(2, printfStr(format, v...))
|
||||
panic("Panic triggered ...")
|
||||
}
|
||||
|
||||
func Critf(format string, v ...interface{}) {
|
||||
CritLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Writes crit log, stops application
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
CritLog.Output(2, printfStr(format, v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
/* HELPER */
|
||||
|
||||
func Loglevel() string {
|
||||
return loglevel
|
||||
}
|
||||
|
||||
/* SPECIAL */
|
||||
/* PRIVATE HELPER */
|
||||
|
||||
// func Finfof(w io.Writer, format string, v ...interface{}) {
|
||||
// if w != io.Discard {
|
||||
// if logDateTime {
|
||||
// currentTime := time.Now()
|
||||
// fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...)
|
||||
// } else {
|
||||
// fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// Return unformatted string
|
||||
func printStr(v ...interface{}) string {
|
||||
return fmt.Sprint(v...)
|
||||
}
|
||||
|
||||
// Return formatted string
|
||||
func printfStr(format string, v ...interface{}) string {
|
||||
return fmt.Sprintf(format, v...)
|
||||
}
|
||||
|
||||
/* PRINT */
|
||||
|
||||
// Prints to STDOUT without string formatting; application continues.
|
||||
// Used for special cases not requiring log information like date or location.
|
||||
func Print(v ...interface{}) {
|
||||
fmt.Fprintln(os.Stdout, v...)
|
||||
}
|
||||
|
||||
// Prints to STDOUT without string formatting; application exits with error code 0.
|
||||
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
||||
func Exit(v ...interface{}) {
|
||||
fmt.Fprintln(os.Stdout, v...)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Prints to STDOUT without string formatting; application exits with error code 1.
|
||||
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
||||
func Abort(v ...interface{}) {
|
||||
fmt.Fprintln(os.Stdout, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Prints to DEBUG writer without string formatting; application continues.
|
||||
// Used for logging additional information, primarily for development.
|
||||
func Debug(v ...interface{}) {
|
||||
DebugLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Prints to INFO writer without string formatting; application continues.
|
||||
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
||||
func Info(v ...interface{}) {
|
||||
InfoLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Prints to WARNING writer without string formatting; application continues.
|
||||
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
||||
func Warn(v ...interface{}) {
|
||||
WarnLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Prints to ERROR writer without string formatting; application continues.
|
||||
// Used for logging errors, but code still can return default(s) or nil.
|
||||
func Error(v ...interface{}) {
|
||||
ErrLog.Output(2, printStr(v...))
|
||||
}
|
||||
|
||||
// Prints to CRITICAL writer without string formatting; application exits with error code 1.
|
||||
// Used for terminating on unexpected errors with date and code location.
|
||||
func Fatal(v ...interface{}) {
|
||||
CritLog.Output(2, printStr(v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Prints to PANIC function without string formatting; application exits with panic.
|
||||
// Used for terminating on unexpected errors with stacktrace.
|
||||
func Panic(v ...interface{}) {
|
||||
panic(printStr(v...))
|
||||
}
|
||||
|
||||
/* PRINT FORMAT*/
|
||||
|
||||
// Prints to STDOUT with string formatting; application continues.
|
||||
// Used for special cases not requiring log information like date or location.
|
||||
func Printf(format string, v ...interface{}) {
|
||||
fmt.Fprintf(os.Stdout, format, v...)
|
||||
}
|
||||
|
||||
// Prints to STDOUT with string formatting; application exits with error code 0.
|
||||
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
|
||||
func Exitf(format string, v ...interface{}) {
|
||||
fmt.Fprintf(os.Stdout, format, v...)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Prints to STDOUT with string formatting; application exits with error code 1.
|
||||
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
|
||||
func Abortf(format string, v ...interface{}) {
|
||||
fmt.Fprintf(os.Stdout, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Prints to DEBUG writer with string formatting; application continues.
|
||||
// Used for logging additional information, primarily for development.
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
DebugLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Prints to INFO writer with string formatting; application continues.
|
||||
// Used for logging additional information, e.g. notable returns or common fail-cases.
|
||||
func Infof(format string, v ...interface{}) {
|
||||
InfoLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Prints to WARNING writer with string formatting; application continues.
|
||||
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
WarnLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Prints to ERROR writer with string formatting; application continues.
|
||||
// Used for logging errors, but code still can return default(s) or nil.
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
ErrLog.Output(2, printfStr(format, v...))
|
||||
}
|
||||
|
||||
// Prints to CRITICAL writer with string formatting; application exits with error code 1.
|
||||
// Used for terminating on unexpected errors with date and code location.
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
CritLog.Output(2, printfStr(format, v...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Prints to PANIC function with string formatting; application exits with panic.
|
||||
// Used for terminating on unexpected errors with stacktrace.
|
||||
func Panicf(format string, v ...interface{}) {
|
||||
panic(printfStr(format, v...))
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
type JobData map[string]map[MetricScope]*JobMetric
|
||||
type ScopedJobStats map[string]map[MetricScope][]*ScopedStats
|
||||
|
||||
type JobMetric struct {
|
||||
StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"`
|
||||
@ -30,6 +31,12 @@ type Series struct {
|
||||
Statistics MetricStatistics `json:"statistics"`
|
||||
}
|
||||
|
||||
type ScopedStats struct {
|
||||
Hostname string `json:"hostname"`
|
||||
Id *string `json:"id,omitempty"`
|
||||
Data *MetricStatistics `json:"data"`
|
||||
}
|
||||
|
||||
type MetricStatistics struct {
|
||||
Avg float64 `json:"avg"`
|
||||
Min float64 `json:"min"`
|
||||
|
@ -22,8 +22,7 @@ func parseDate(in string) int64 {
|
||||
if in != "" {
|
||||
t, err := time.ParseInLocation(shortForm, in, loc)
|
||||
if err != nil {
|
||||
fmt.Printf("date parse error %v", err)
|
||||
os.Exit(0)
|
||||
log.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error())
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
6
web/frontend/package-lock.json
generated
6
web/frontend/package-lock.json
generated
@ -59,9 +59,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/runtime": {
|
||||
"version": "7.26.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz",
|
||||
"integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==",
|
||||
"version": "7.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz",
|
||||
"integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"regenerator-runtime": "^0.14.0"
|
||||
|
@ -40,7 +40,7 @@
|
||||
import JobRoofline from "./job/JobRoofline.svelte";
|
||||
import EnergySummary from "./job/EnergySummary.svelte";
|
||||
import PlotGrid from "./generic/PlotGrid.svelte";
|
||||
import StatsTable from "./job/StatsTable.svelte";
|
||||
import StatsTab from "./job/StatsTab.svelte";
|
||||
|
||||
export let dbid;
|
||||
export let username;
|
||||
@ -53,10 +53,8 @@
|
||||
|
||||
let isMetricsSelectionOpen = false,
|
||||
selectedMetrics = [],
|
||||
selectedScopes = [];
|
||||
|
||||
let plots = {},
|
||||
statsTable
|
||||
selectedScopes = [],
|
||||
plots = {};
|
||||
|
||||
let availableMetrics = new Set(),
|
||||
missingMetrics = [],
|
||||
@ -127,28 +125,17 @@
|
||||
let job = $initq.data.job;
|
||||
if (!job) return;
|
||||
|
||||
const pendingMetrics = [
|
||||
...(
|
||||
(
|
||||
ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
|
||||
ccconfig[`job_view_selectedMetrics:${job.cluster}`]
|
||||
) ||
|
||||
$initq.data.globalMetrics
|
||||
.reduce((names, gm) => {
|
||||
if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) {
|
||||
names.push(gm.name);
|
||||
}
|
||||
return names;
|
||||
}, [])
|
||||
),
|
||||
...(
|
||||
(
|
||||
ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
|
||||
ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`]
|
||||
) ||
|
||||
ccconfig[`job_view_nodestats_selectedMetrics`]
|
||||
),
|
||||
];
|
||||
const pendingMetrics = (
|
||||
ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
|
||||
ccconfig[`job_view_selectedMetrics:${job.cluster}`]
|
||||
) ||
|
||||
$initq.data.globalMetrics
|
||||
.reduce((names, gm) => {
|
||||
if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) {
|
||||
names.push(gm.name);
|
||||
}
|
||||
return names;
|
||||
}, [])
|
||||
|
||||
// Select default Scopes to load: Check before if any metric has accelerator scope by default
|
||||
const accScopeDefault = [...pendingMetrics].some(function (m) {
|
||||
@ -231,7 +218,7 @@
|
||||
<Col xs={12} md={6} xl={3} class="mb-3 mb-xxl-0">
|
||||
{#if $initq.error}
|
||||
<Card body color="danger">{$initq.error.message}</Card>
|
||||
{:else if $initq.data}
|
||||
{:else if $initq?.data}
|
||||
<Card class="overflow-auto" style="height: 400px;">
|
||||
<TabContent> <!-- on:tab={(e) => (status = e.detail)} -->
|
||||
{#if $initq.data?.job?.metaData?.message}
|
||||
@ -305,7 +292,7 @@
|
||||
<Card class="mb-3">
|
||||
<CardBody>
|
||||
<Row class="mb-2">
|
||||
{#if $initq.data}
|
||||
{#if $initq?.data}
|
||||
<Col xs="auto">
|
||||
<Button outline on:click={() => (isMetricsSelectionOpen = true)} color="primary">
|
||||
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
|
||||
@ -318,7 +305,7 @@
|
||||
{#if $jobMetrics.error}
|
||||
<Row class="mt-2">
|
||||
<Col>
|
||||
{#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2}
|
||||
{#if $initq?.data && ($initq.data.job?.monitoringStatus == 0 || $initq.data.job?.monitoringStatus == 2)}
|
||||
<Card body color="warning">Not monitored or archiving failed</Card>
|
||||
<br />
|
||||
{/if}
|
||||
@ -343,7 +330,6 @@
|
||||
{#if item.data}
|
||||
<Metric
|
||||
bind:this={plots[item.metric]}
|
||||
on:more-loaded={({ detail }) => statsTable.moreLoaded(detail)}
|
||||
job={$initq.data.job}
|
||||
metricName={item.metric}
|
||||
metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit}
|
||||
@ -365,7 +351,7 @@
|
||||
<!-- Statistcics Table -->
|
||||
<Row class="mb-3">
|
||||
<Col>
|
||||
{#if $initq.data}
|
||||
{#if $initq?.data}
|
||||
<Card>
|
||||
<TabContent>
|
||||
{#if somethingMissing}
|
||||
@ -398,22 +384,8 @@
|
||||
</div>
|
||||
</TabPane>
|
||||
{/if}
|
||||
<TabPane
|
||||
tabId="stats"
|
||||
tab="Statistics Table"
|
||||
class="overflow-x-auto"
|
||||
active={!somethingMissing}
|
||||
>
|
||||
{#if $jobMetrics?.data?.jobMetrics}
|
||||
{#key $jobMetrics.data.jobMetrics}
|
||||
<StatsTable
|
||||
bind:this={statsTable}
|
||||
job={$initq.data.job}
|
||||
jobMetrics={$jobMetrics.data.jobMetrics}
|
||||
/>
|
||||
{/key}
|
||||
{/if}
|
||||
</TabPane>
|
||||
<!-- Includes <TabPane> Statistics Table with Independent GQL Query -->
|
||||
<StatsTab job={$initq.data.job} clusters={$initq.data.clusters} tabActive={!somethingMissing}/>
|
||||
<TabPane tabId="job-script" tab="Job Script">
|
||||
<div class="pre-wrapper">
|
||||
{#if $initq.data.job.metaData?.jobScript}
|
||||
@ -440,7 +412,7 @@
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
{#if $initq.data}
|
||||
{#if $initq?.data}
|
||||
<MetricSelection
|
||||
cluster={$initq.data.job.cluster}
|
||||
subCluster={$initq.data.job.subCluster}
|
||||
|
@ -150,11 +150,6 @@
|
||||
|
||||
// On additional scope request
|
||||
if (selectedScope == "load-all") {
|
||||
// Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition)
|
||||
const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node")
|
||||
if (statsTableData.length > 0) {
|
||||
dispatch("more-loaded", statsTableData);
|
||||
}
|
||||
// Set selected scope to min of returned scopes
|
||||
selectedScope = minScope(scopes)
|
||||
nodeOnly = (selectedScope == "node") // "node" still only scope after load-all
|
||||
|
145
web/frontend/src/job/StatsTab.svelte
Normal file
145
web/frontend/src/job/StatsTab.svelte
Normal file
@ -0,0 +1,145 @@
|
||||
<!--
|
||||
@component Job-View subcomponent; Wraps the statsTable in a TabPane and contains GQL query for scoped statsData
|
||||
|
||||
Properties:
|
||||
- `job Object`: The job object
|
||||
- `clusters Object`: The clusters object
|
||||
- `tabActive bool`: Boolean if StatsTabe Tab is Active on Creation
|
||||
-->
|
||||
|
||||
<script>
|
||||
import {
|
||||
queryStore,
|
||||
gql,
|
||||
getContextClient
|
||||
} from "@urql/svelte";
|
||||
import { getContext } from "svelte";
|
||||
import {
|
||||
Card,
|
||||
Button,
|
||||
Row,
|
||||
Col,
|
||||
TabPane,
|
||||
Spinner,
|
||||
Icon
|
||||
} from "@sveltestrap/sveltestrap";
|
||||
import MetricSelection from "../generic/select/MetricSelection.svelte";
|
||||
import StatsTable from "./statstab/StatsTable.svelte";
|
||||
|
||||
export let job;
|
||||
export let clusters;
|
||||
export let tabActive;
|
||||
|
||||
let loadScopes = false;
|
||||
let selectedScopes = [];
|
||||
let selectedMetrics = [];
|
||||
let availableMetrics = new Set(); // For Info Only, filled by MetricSelection Component
|
||||
let isMetricSelectionOpen = false;
|
||||
|
||||
const client = getContextClient();
|
||||
const query = gql`
|
||||
query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!) {
|
||||
scopedJobStats(id: $dbid, metrics: $selectedMetrics, scopes: $selectedScopes) {
|
||||
name
|
||||
scope
|
||||
stats {
|
||||
hostname
|
||||
id
|
||||
data {
|
||||
min
|
||||
avg
|
||||
max
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
$: scopedStats = queryStore({
|
||||
client: client,
|
||||
query: query,
|
||||
variables: { dbid: job.id, selectedMetrics, selectedScopes },
|
||||
});
|
||||
|
||||
$: if (loadScopes) {
|
||||
selectedScopes = ["node", "socket", "core", "hwthread", "accelerator"];
|
||||
}
|
||||
|
||||
// Handle Job Query on Init -> is not executed anymore
|
||||
getContext("on-init")(() => {
|
||||
if (!job) return;
|
||||
|
||||
const pendingMetrics = (
|
||||
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
|
||||
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`]
|
||||
) || getContext("cc-config")["job_view_nodestats_selectedMetrics"];
|
||||
|
||||
// Select default Scopes to load: Check before if any metric has accelerator scope by default
|
||||
const accScopeDefault = [...pendingMetrics].some(function (m) {
|
||||
const cluster = clusters.find((c) => c.name == job.cluster);
|
||||
const subCluster = cluster.subClusters.find((sc) => sc.name == job.subCluster);
|
||||
return subCluster.metricConfig.find((smc) => smc.name == m)?.scope === "accelerator";
|
||||
});
|
||||
|
||||
const pendingScopes = ["node"]
|
||||
if (job.numNodes === 1) {
|
||||
pendingScopes.push("socket")
|
||||
pendingScopes.push("core")
|
||||
pendingScopes.push("hwthread")
|
||||
if (accScopeDefault) { pendingScopes.push("accelerator") }
|
||||
}
|
||||
|
||||
selectedMetrics = [...pendingMetrics];
|
||||
selectedScopes = [...pendingScopes];
|
||||
});
|
||||
|
||||
</script>
|
||||
|
||||
<TabPane tabId="stats" tab="Statistics Table" class="overflow-x-auto" active={tabActive}>
|
||||
<Row>
|
||||
<Col class="m-2">
|
||||
<Button outline on:click={() => (isMetricSelectionOpen = true)} class="px-2" color="primary" style="margin-right:0.5rem">
|
||||
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
|
||||
</Button>
|
||||
{#if job.numNodes > 1}
|
||||
<Button class="px-2 ml-auto" color="success" outline on:click={() => (loadScopes = !loadScopes)} disabled={loadScopes}>
|
||||
{#if !loadScopes}
|
||||
<Icon name="plus-square-fill" style="margin-right:0.25rem"/> Add More Scopes
|
||||
{:else}
|
||||
<Icon name="check-square-fill" style="margin-right:0.25rem"/> OK: Scopes Added
|
||||
{/if}
|
||||
</Button>
|
||||
{/if}
|
||||
</Col>
|
||||
</Row>
|
||||
<hr class="mb-1 mt-1"/>
|
||||
<!-- ROW1: Status-->
|
||||
{#if $scopedStats.fetching}
|
||||
<Row>
|
||||
<Col class="m-3" style="text-align: center;">
|
||||
<Spinner secondary/>
|
||||
</Col>
|
||||
</Row>
|
||||
{:else if $scopedStats.error}
|
||||
<Row>
|
||||
<Col class="m-2">
|
||||
<Card body color="danger">{$scopedStats.error.message}</Card>
|
||||
</Col>
|
||||
</Row>
|
||||
{:else}
|
||||
<StatsTable
|
||||
hosts={job.resources.map((r) => r.hostname).sort()}
|
||||
data={$scopedStats?.data?.scopedJobStats}
|
||||
{selectedMetrics}
|
||||
/>
|
||||
{/if}
|
||||
</TabPane>
|
||||
|
||||
<MetricSelection
|
||||
cluster={job.cluster}
|
||||
subCluster={job.subCluster}
|
||||
configName="job_view_nodestats_selectedMetrics"
|
||||
bind:allMetrics={availableMetrics}
|
||||
bind:metrics={selectedMetrics}
|
||||
bind:isOpen={isMetricSelectionOpen}
|
||||
/>
|
@ -1,178 +0,0 @@
|
||||
<!--
|
||||
@component Job-View subcomponent; display table of metric data statistics with selectable scopes
|
||||
|
||||
Properties:
|
||||
- `job Object`: The job object
|
||||
- `jobMetrics [Object]`: The jobs metricdata
|
||||
|
||||
Exported:
|
||||
- `moreLoaded`: Adds additional scopes requested from Metric.svelte in Job-View
|
||||
-->
|
||||
|
||||
<script>
|
||||
import { getContext } from "svelte";
|
||||
import {
|
||||
Button,
|
||||
Table,
|
||||
Input,
|
||||
InputGroup,
|
||||
InputGroupText,
|
||||
Icon,
|
||||
Row,
|
||||
Col
|
||||
} from "@sveltestrap/sveltestrap";
|
||||
import { maxScope } from "../generic/utils.js";
|
||||
import StatsTableEntry from "./StatsTableEntry.svelte";
|
||||
import MetricSelection from "../generic/select/MetricSelection.svelte";
|
||||
|
||||
export let job;
|
||||
export let jobMetrics;
|
||||
|
||||
const sortedJobMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort()
|
||||
const scopesForMetric = (metric) =>
|
||||
jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope);
|
||||
|
||||
let hosts = job.resources.map((r) => r.hostname).sort(),
|
||||
selectedScopes = {},
|
||||
sorting = {},
|
||||
isMetricSelectionOpen = false,
|
||||
availableMetrics = new Set(),
|
||||
selectedMetrics = (
|
||||
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
|
||||
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`]
|
||||
) || getContext("cc-config")["job_view_nodestats_selectedMetrics"];
|
||||
|
||||
for (let metric of sortedJobMetrics) {
|
||||
// Not Exclusive or Multi-Node: get maxScope directly (mostly: node)
|
||||
// -> Else: Load smallest available granularity as default as per availability
|
||||
const availableScopes = scopesForMetric(metric);
|
||||
if (job.exclusive != 1 || job.numNodes == 1) {
|
||||
if (availableScopes.includes("accelerator")) {
|
||||
selectedScopes[metric] = "accelerator";
|
||||
} else if (availableScopes.includes("core")) {
|
||||
selectedScopes[metric] = "core";
|
||||
} else if (availableScopes.includes("socket")) {
|
||||
selectedScopes[metric] = "socket";
|
||||
} else {
|
||||
selectedScopes[metric] = "node";
|
||||
}
|
||||
} else {
|
||||
selectedScopes[metric] = maxScope(availableScopes);
|
||||
}
|
||||
|
||||
sorting[metric] = {
|
||||
min: { dir: "up", active: false },
|
||||
avg: { dir: "up", active: false },
|
||||
max: { dir: "up", active: false },
|
||||
};
|
||||
}
|
||||
|
||||
function sortBy(metric, stat) {
|
||||
let s = sorting[metric][stat];
|
||||
if (s.active) {
|
||||
s.dir = s.dir == "up" ? "down" : "up";
|
||||
} else {
|
||||
for (let metric in sorting)
|
||||
for (let stat in sorting[metric]) sorting[metric][stat].active = false;
|
||||
s.active = true;
|
||||
}
|
||||
|
||||
let series = jobMetrics.find(
|
||||
(jm) => jm.name == metric && jm.scope == "node",
|
||||
)?.metric.series;
|
||||
sorting = { ...sorting };
|
||||
hosts = hosts.sort((h1, h2) => {
|
||||
let s1 = series.find((s) => s.hostname == h1)?.statistics;
|
||||
let s2 = series.find((s) => s.hostname == h2)?.statistics;
|
||||
if (s1 == null || s2 == null) return -1;
|
||||
|
||||
return s.dir != "up" ? s1[stat] - s2[stat] : s2[stat] - s1[stat];
|
||||
});
|
||||
}
|
||||
|
||||
export function moreLoaded(moreJobMetrics) {
|
||||
moreJobMetrics.forEach(function (newMetric) {
|
||||
if (!jobMetrics.some((m) => m.scope == newMetric.scope)) {
|
||||
jobMetrics = [...jobMetrics, newMetric]
|
||||
}
|
||||
});
|
||||
};
|
||||
</script>
|
||||
|
||||
<Row>
|
||||
<Col class="m-2">
|
||||
<Button outline on:click={() => (isMetricSelectionOpen = true)} class="w-auto px-2" color="primary">
|
||||
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
|
||||
</Button>
|
||||
</Col>
|
||||
</Row>
|
||||
<hr class="mb-1 mt-1"/>
|
||||
<Table class="mb-0">
|
||||
<thead>
|
||||
<!-- Header Row 1: Selectors -->
|
||||
<tr>
|
||||
<th/>
|
||||
{#each selectedMetrics as metric}
|
||||
<!-- To Match Row-2 Header Field Count-->
|
||||
<th colspan={selectedScopes[metric] == "node" ? 3 : 4}>
|
||||
<InputGroup>
|
||||
<InputGroupText>
|
||||
{metric}
|
||||
</InputGroupText>
|
||||
<Input type="select" bind:value={selectedScopes[metric]}>
|
||||
{#each scopesForMetric(metric, jobMetrics) as scope}
|
||||
<option value={scope}>{scope}</option>
|
||||
{/each}
|
||||
</Input>
|
||||
</InputGroup>
|
||||
</th>
|
||||
{/each}
|
||||
</tr>
|
||||
<!-- Header Row 2: Fields -->
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
{#each selectedMetrics as metric}
|
||||
{#if selectedScopes[metric] != "node"}
|
||||
<th>Id</th>
|
||||
{/if}
|
||||
{#each ["min", "avg", "max"] as stat}
|
||||
<th on:click={() => sortBy(metric, stat)}>
|
||||
{stat}
|
||||
{#if selectedScopes[metric] == "node"}
|
||||
<Icon
|
||||
name="caret-{sorting[metric][stat].dir}{sorting[metric][stat]
|
||||
.active
|
||||
? '-fill'
|
||||
: ''}"
|
||||
/>
|
||||
{/if}
|
||||
</th>
|
||||
{/each}
|
||||
{/each}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#each hosts as host (host)}
|
||||
<tr>
|
||||
<th scope="col">{host}</th>
|
||||
{#each selectedMetrics as metric (metric)}
|
||||
<StatsTableEntry
|
||||
{host}
|
||||
{metric}
|
||||
scope={selectedScopes[metric]}
|
||||
{jobMetrics}
|
||||
/>
|
||||
{/each}
|
||||
</tr>
|
||||
{/each}
|
||||
</tbody>
|
||||
</Table>
|
||||
|
||||
<MetricSelection
|
||||
cluster={job.cluster}
|
||||
subCluster={job.subCluster}
|
||||
configName="job_view_nodestats_selectedMetrics"
|
||||
bind:allMetrics={availableMetrics}
|
||||
bind:metrics={selectedMetrics}
|
||||
bind:isOpen={isMetricSelectionOpen}
|
||||
/>
|
@ -40,14 +40,14 @@
|
||||
const client = getContextClient();
|
||||
const polarQuery = gql`
|
||||
query ($dbid: ID!, $selectedMetrics: [String!]!) {
|
||||
jobMetricStats(id: $dbid, metrics: $selectedMetrics) {
|
||||
jobStats(id: $dbid, metrics: $selectedMetrics) {
|
||||
name
|
||||
stats {
|
||||
min
|
||||
avg
|
||||
max
|
||||
}
|
||||
min
|
||||
avg
|
||||
max
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
@ -66,7 +66,7 @@
|
||||
{:else}
|
||||
<Polar
|
||||
{polarMetrics}
|
||||
polarData={$polarData.data.jobMetricStats}
|
||||
polarData={$polarData.data.jobStats}
|
||||
/>
|
||||
{/if}
|
||||
</CardBody>
|
139
web/frontend/src/job/statstab/StatsTable.svelte
Normal file
139
web/frontend/src/job/statstab/StatsTable.svelte
Normal file
@ -0,0 +1,139 @@
|
||||
<!--:
|
||||
@component Job-View subcomponent; display table of metric data statistics with selectable scopes
|
||||
|
||||
Properties:
|
||||
- `data Object`: The data object
|
||||
- `selectedMetrics [String]`: The selected metrics
|
||||
- `hosts [String]`: The list of hostnames of this job
|
||||
-->
|
||||
|
||||
<script>
|
||||
import {
|
||||
Table,
|
||||
Input,
|
||||
InputGroup,
|
||||
InputGroupText,
|
||||
Icon,
|
||||
} from "@sveltestrap/sveltestrap";
|
||||
import StatsTableEntry from "./StatsTableEntry.svelte";
|
||||
|
||||
export let data = [];
|
||||
export let selectedMetrics = [];
|
||||
export let hosts = [];
|
||||
|
||||
let sorting = {};
|
||||
let availableScopes = {};
|
||||
let selectedScopes = {};
|
||||
|
||||
const scopesForMetric = (metric) =>
|
||||
data?.filter((jm) => jm.name == metric)?.map((jm) => jm.scope) || [];
|
||||
const setScopeForMetric = (metric, scope) =>
|
||||
selectedScopes[metric] = scope
|
||||
|
||||
$: if (data && selectedMetrics) {
|
||||
for (let metric of selectedMetrics) {
|
||||
availableScopes[metric] = scopesForMetric(metric);
|
||||
// Set Initial Selection, but do not use selectedScopes: Skips reactivity
|
||||
if (availableScopes[metric].includes("accelerator")) {
|
||||
setScopeForMetric(metric, "accelerator");
|
||||
} else if (availableScopes[metric].includes("core")) {
|
||||
setScopeForMetric(metric, "core");
|
||||
} else if (availableScopes[metric].includes("socket")) {
|
||||
setScopeForMetric(metric, "socket");
|
||||
} else {
|
||||
setScopeForMetric(metric, "node");
|
||||
}
|
||||
|
||||
sorting[metric] = {
|
||||
min: { dir: "up", active: false },
|
||||
avg: { dir: "up", active: false },
|
||||
max: { dir: "up", active: false },
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function sortBy(metric, stat) {
|
||||
let s = sorting[metric][stat];
|
||||
if (s.active) {
|
||||
s.dir = s.dir == "up" ? "down" : "up";
|
||||
} else {
|
||||
for (let metric in sorting)
|
||||
for (let stat in sorting[metric]) sorting[metric][stat].active = false;
|
||||
s.active = true;
|
||||
}
|
||||
|
||||
let stats = data.find(
|
||||
(d) => d.name == metric && d.scope == "node",
|
||||
)?.stats || [];
|
||||
sorting = { ...sorting };
|
||||
hosts = hosts.sort((h1, h2) => {
|
||||
let s1 = stats.find((s) => s.hostname == h1)?.data;
|
||||
let s2 = stats.find((s) => s.hostname == h2)?.data;
|
||||
if (s1 == null || s2 == null) return -1;
|
||||
|
||||
return s.dir != "up" ? s1[stat] - s2[stat] : s2[stat] - s1[stat];
|
||||
});
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
<Table class="mb-0">
|
||||
<thead>
|
||||
<!-- Header Row 1: Selectors -->
|
||||
<tr>
|
||||
<th/>
|
||||
{#each selectedMetrics as metric}
|
||||
<!-- To Match Row-2 Header Field Count-->
|
||||
<th colspan={selectedScopes[metric] == "node" ? 3 : 4}>
|
||||
<InputGroup>
|
||||
<InputGroupText>
|
||||
{metric}
|
||||
</InputGroupText>
|
||||
<Input type="select" bind:value={selectedScopes[metric]} disabled={availableScopes[metric].length === 1}>
|
||||
{#each (availableScopes[metric] || []) as scope}
|
||||
<option value={scope}>{scope}</option>
|
||||
{/each}
|
||||
</Input>
|
||||
</InputGroup>
|
||||
</th>
|
||||
{/each}
|
||||
</tr>
|
||||
<!-- Header Row 2: Fields -->
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
{#each selectedMetrics as metric}
|
||||
{#if selectedScopes[metric] != "node"}
|
||||
<th>Id</th>
|
||||
{/if}
|
||||
{#each ["min", "avg", "max"] as stat}
|
||||
<th on:click={() => sortBy(metric, stat)}>
|
||||
{stat}
|
||||
{#if selectedScopes[metric] == "node"}
|
||||
<Icon
|
||||
name="caret-{sorting[metric][stat].dir}{sorting[metric][stat]
|
||||
.active
|
||||
? '-fill'
|
||||
: ''}"
|
||||
/>
|
||||
{/if}
|
||||
</th>
|
||||
{/each}
|
||||
{/each}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#each hosts as host (host)}
|
||||
<tr>
|
||||
<th scope="col">{host}</th>
|
||||
{#each selectedMetrics as metric (metric)}
|
||||
<StatsTableEntry
|
||||
{data}
|
||||
{host}
|
||||
{metric}
|
||||
scope={selectedScopes[metric]}
|
||||
/>
|
||||
{/each}
|
||||
</tr>
|
||||
{/each}
|
||||
</tbody>
|
||||
</Table>
|
90
web/frontend/src/job/StatsTableEntry.svelte → web/frontend/src/job/statstab/StatsTableEntry.svelte
90
web/frontend/src/job/StatsTableEntry.svelte → web/frontend/src/job/statstab/StatsTableEntry.svelte
@ -1,11 +1,11 @@
|
||||
<!--
|
||||
@component Job-View subcomponent; Single Statistics entry component fpr statstable
|
||||
@component Job-View subcomponent; Single Statistics entry component for statstable
|
||||
|
||||
Properties:
|
||||
- `host String`: The hostname (== node)
|
||||
- `metric String`: The metric name
|
||||
- `scope String`: The selected scope
|
||||
- `jobMetrics [Object]`: The jobs metricdata
|
||||
- `data [Object]`: The jobs statsdata
|
||||
-->
|
||||
|
||||
<script>
|
||||
@ -14,59 +14,59 @@
|
||||
export let host;
|
||||
export let metric;
|
||||
export let scope;
|
||||
export let jobMetrics;
|
||||
export let data;
|
||||
|
||||
function compareNumbers(a, b) {
|
||||
return a.id - b.id;
|
||||
}
|
||||
|
||||
function sortByField(field) {
|
||||
let s = sorting[field];
|
||||
if (s.active) {
|
||||
s.dir = s.dir == "up" ? "down" : "up";
|
||||
} else {
|
||||
for (let field in sorting) sorting[field].active = false;
|
||||
s.active = true;
|
||||
}
|
||||
|
||||
sorting = { ...sorting };
|
||||
series = series.sort((a, b) => {
|
||||
if (a == null || b == null) return -1;
|
||||
|
||||
if (field === "id") {
|
||||
return s.dir != "up" ? a[field] - b[field] : b[field] - a[field];
|
||||
} else {
|
||||
return s.dir != "up"
|
||||
? a.statistics[field] - b.statistics[field]
|
||||
: b.statistics[field] - a.statistics[field];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let sorting = {
|
||||
let entrySorting = {
|
||||
id: { dir: "down", active: true },
|
||||
min: { dir: "up", active: false },
|
||||
avg: { dir: "up", active: false },
|
||||
max: { dir: "up", active: false },
|
||||
};
|
||||
|
||||
$: series = jobMetrics
|
||||
.find((jm) => jm.name == metric && jm.scope == scope)
|
||||
?.metric.series.filter((s) => s.hostname == host && s.statistics != null)
|
||||
?.sort(compareNumbers);
|
||||
function compareNumbers(a, b) {
|
||||
return a.id - b.id;
|
||||
}
|
||||
|
||||
function sortByField(field) {
|
||||
let s = entrySorting[field];
|
||||
if (s.active) {
|
||||
s.dir = s.dir == "up" ? "down" : "up";
|
||||
} else {
|
||||
for (let field in entrySorting) entrySorting[field].active = false;
|
||||
s.active = true;
|
||||
}
|
||||
|
||||
entrySorting = { ...entrySorting };
|
||||
stats = stats.sort((a, b) => {
|
||||
if (a == null || b == null) return -1;
|
||||
|
||||
if (field === "id") {
|
||||
return s.dir != "up" ? a[field].localeCompare(b[field]) : b[field].localeCompare(a[field])
|
||||
} else {
|
||||
return s.dir != "up"
|
||||
? a.data[field] - b.data[field]
|
||||
: b.data[field] - a.data[field];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
$: stats = data
|
||||
?.find((d) => d.name == metric && d.scope == scope)
|
||||
?.stats.filter((s) => s.hostname == host && s.data != null)
|
||||
?.sort(compareNumbers) || [];
|
||||
</script>
|
||||
|
||||
{#if series == null || series.length == 0}
|
||||
{#if stats == null || stats.length == 0}
|
||||
<td colspan={scope == "node" ? 3 : 4}><i>No data</i></td>
|
||||
{:else if series.length == 1 && scope == "node"}
|
||||
{:else if stats.length == 1 && scope == "node"}
|
||||
<td>
|
||||
{series[0].statistics.min}
|
||||
{stats[0].data.min}
|
||||
</td>
|
||||
<td>
|
||||
{series[0].statistics.avg}
|
||||
{stats[0].data.avg}
|
||||
</td>
|
||||
<td>
|
||||
{series[0].statistics.max}
|
||||
{stats[0].data.max}
|
||||
</td>
|
||||
{:else}
|
||||
<td colspan="4">
|
||||
@ -76,19 +76,19 @@
|
||||
<th on:click={() => sortByField(field)}>
|
||||
Sort
|
||||
<Icon
|
||||
name="caret-{sorting[field].dir}{sorting[field].active
|
||||
name="caret-{entrySorting[field].dir}{entrySorting[field].active
|
||||
? '-fill'
|
||||
: ''}"
|
||||
/>
|
||||
</th>
|
||||
{/each}
|
||||
</tr>
|
||||
{#each series as s, i}
|
||||
{#each stats as s, i}
|
||||
<tr>
|
||||
<th>{s.id ?? i}</th>
|
||||
<td>{s.statistics.min}</td>
|
||||
<td>{s.statistics.avg}</td>
|
||||
<td>{s.statistics.max}</td>
|
||||
<td>{s.data.min}</td>
|
||||
<td>{s.data.avg}</td>
|
||||
<td>{s.data.max}</td>
|
||||
</tr>
|
||||
{/each}
|
||||
</table>
|
@ -26,8 +26,7 @@ var frontendFiles embed.FS
|
||||
func ServeFiles() http.Handler {
|
||||
publicFiles, err := fs.Sub(frontendFiles, "frontend/public")
|
||||
if err != nil {
|
||||
log.Fatalf("WEB/WEB > cannot find frontend public files")
|
||||
panic(err)
|
||||
log.Abortf("Serve Files: Could not find 'frontend/public' file directory.\nError: %s\n", err.Error())
|
||||
}
|
||||
return http.FileServer(http.FS(publicFiles))
|
||||
}
|
||||
@ -75,8 +74,7 @@ func init() {
|
||||
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path))
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatalf("WEB/WEB > cannot find frontend template files")
|
||||
panic(err)
|
||||
log.Abortf("Web init(): Could not find frontend template files.\nError: %s\n", err.Error())
|
||||
}
|
||||
|
||||
_ = base
|
||||
|
Loading…
x
Reference in New Issue
Block a user