Merge latest state branch 'dev' into migrate_svelte5

This commit is contained in:
Christoph Kluge 2025-05-13 18:25:54 +02:00
commit b3135c982f
92 changed files with 6082 additions and 3223 deletions

View File

@ -1,331 +0,0 @@
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions
# Workflow name
name: Release
# Run on tag push
on:
push:
tags:
- '**'
jobs:
#
# Build on AlmaLinux 8.5 using golang-1.18.2
#
AlmaLinux-RPM-build:
runs-on: ubuntu-latest
# See: https://hub.docker.com/_/almalinux
container: almalinux:8.5
# The job outputs link to the outputs of the 'rpmrename' step
# Only job outputs can be used in child jobs
outputs:
rpm : ${{steps.rpmrename.outputs.RPM}}
srpm : ${{steps.rpmrename.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: |
dnf --assumeyes group install "Development Tools" "RPM Development Tools"
dnf --assumeyes install wget openssl-devel diffutils delve which npm
dnf --assumeyes install 'dnf-command(builddep)'
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use dnf to install build dependencies
- name: Install build dependencies
run: |
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
rpm -i go*.rpm
npm install --global yarn rollup svelte rollup-plugin-svelte
#dnf --assumeyes builddep build/package/cc-backend.spec
- name: RPM build ClusterCockpit
id: rpmbuild
run: make RPM
# AlmaLinux 8.5 is a derivate of RedHat Enterprise Linux 8 (UBI8),
# so the created RPM both contain the substring 'el8' in the RPM file names
# This step replaces the substring 'el8' to 'alma85'. It uses the move operation
# because it is unclear whether the default AlmaLinux 8.5 container contains the
# 'rename' command. This way we also get the new names for output.
- name: Rename RPMs (s/el8/alma85/)
id: rpmrename
run: |
OLD_RPM="${{steps.rpmbuild.outputs.RPM}}"
OLD_SRPM="${{steps.rpmbuild.outputs.SRPM}}"
NEW_RPM="${OLD_RPM/el8/alma85}"
NEW_SRPM=${OLD_SRPM/el8/alma85}
mv "${OLD_RPM}" "${NEW_RPM}"
mv "${OLD_SRPM}" "${NEW_SRPM}"
echo "::set-output name=SRPM::${NEW_SRPM}"
echo "::set-output name=RPM::${NEW_RPM}"
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend RPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.RPM }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend SRPM for AlmaLinux 8.5
path: ${{ steps.rpmrename.outputs.SRPM }}
#
# Build on UBI 8 using golang-1.18.2
#
UBI-8-RPM-build:
runs-on: ubuntu-latest
# See: https://catalog.redhat.com/software/containers/ubi8/ubi/5c359854d70cc534b3a3784e?container-tabs=gti
container: registry.access.redhat.com/ubi8/ubi:8.5-226.1645809065
# The job outputs link to the outputs of the 'rpmbuild' step
outputs:
rpm : ${{steps.rpmbuild.outputs.RPM}}
srpm : ${{steps.rpmbuild.outputs.SRPM}}
steps:
# Use dnf to install development packages
- name: Install development packages
run: dnf --assumeyes --disableplugin=subscription-manager install rpm-build go-srpm-macros rpm-build-libs rpm-libs gcc make python38 git wget openssl-devel diffutils delve which
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use dnf to install build dependencies
- name: Install build dependencies
run: |
wget -q http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-bin-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/golang-src-1.18.2-1.module_el8.7.0+1173+5d37c0fd.noarch.rpm \
http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/go-toolset-1.18.2-1.module_el8.7.0+1173+5d37c0fd.x86_64.rpm
rpm -i go*.rpm
dnf --assumeyes --disableplugin=subscription-manager install npm
npm install --global yarn rollup svelte rollup-plugin-svelte
#dnf --assumeyes builddep build/package/cc-backend.spec
- name: RPM build ClusterCockpit
id: rpmbuild
run: make RPM
# See: https://github.com/actions/upload-artifact
- name: Save RPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend RPM for UBI 8
path: ${{ steps.rpmbuild.outputs.RPM }}
- name: Save SRPM as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend SRPM for UBI 8
path: ${{ steps.rpmbuild.outputs.SRPM }}
#
# Build on Ubuntu 20.04 using official go 1.19.1 package
#
Ubuntu-focal-build:
runs-on: ubuntu-latest
container: ubuntu:20.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
deb : ${{steps.debrename.outputs.DEB}}
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash
apt --assume-yes install npm
npm install --global yarn rollup svelte rollup-plugin-svelte
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
- name: Install Golang
run: |
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
go version
- name: DEB build ClusterCockpit
id: dpkg-build
run: |
ls -la
pwd
env
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
git config --global --add safe.directory $(pwd)
make DEB
- name: Rename DEB (add '_ubuntu20.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu20.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "::set-output name=DEB::${NEW_DEB_FILE}"
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend DEB for Ubuntu 20.04
path: ${{ steps.debrename.outputs.DEB }}
#
# Build on Ubuntu 20.04 using official go 1.19.1 package
#
Ubuntu-jammy-build:
runs-on: ubuntu-latest
container: ubuntu:22.04
# The job outputs link to the outputs of the 'debrename' step
# Only job outputs can be used in child jobs
outputs:
deb : ${{steps.debrename.outputs.DEB}}
steps:
# Use apt to install development packages
- name: Install development packages
run: |
apt update && apt --assume-yes upgrade
apt --assume-yes install build-essential sed git wget bash npm
npm install --global yarn rollup svelte rollup-plugin-svelte
# Checkout git repository and submodules
# fetch-depth must be 0 to use git describe
# See: https://github.com/marketplace/actions/checkout
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
# Use official golang package
- name: Install Golang
run: |
wget -q https://go.dev/dl/go1.19.1.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.19.1.linux-amd64.tar.gz
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
go version
- name: DEB build ClusterCockpit
id: dpkg-build
run: |
ls -la
pwd
env
export PATH=/usr/local/go/bin:/usr/local/go/pkg/tool/linux_amd64:$PATH
git config --global --add safe.directory $(pwd)
make DEB
- name: Rename DEB (add '_ubuntu22.04')
id: debrename
run: |
OLD_DEB_NAME=$(echo "${{steps.dpkg-build.outputs.DEB}}" | rev | cut -d '.' -f 2- | rev)
NEW_DEB_FILE="${OLD_DEB_NAME}_ubuntu22.04.deb"
mv "${{steps.dpkg-build.outputs.DEB}}" "${NEW_DEB_FILE}"
echo "::set-output name=DEB::${NEW_DEB_FILE}"
# See: https://github.com/actions/upload-artifact
- name: Save DEB as artifact
uses: actions/upload-artifact@v2
with:
name: cc-backend DEB for Ubuntu 22.04
path: ${{ steps.debrename.outputs.DEB }}
#
# Create release with fresh RPMs
#
Release:
runs-on: ubuntu-latest
# We need the RPMs, so add dependency
needs: [AlmaLinux-RPM-build, UBI-8-RPM-build, Ubuntu-focal-build, Ubuntu-jammy-build]
steps:
# See: https://github.com/actions/download-artifact
- name: Download AlmaLinux 8.5 RPM
uses: actions/download-artifact@v2
with:
name: cc-backend RPM for AlmaLinux 8.5
- name: Download AlmaLinux 8.5 SRPM
uses: actions/download-artifact@v2
with:
name: cc-backend SRPM for AlmaLinux 8.5
- name: Download UBI 8 RPM
uses: actions/download-artifact@v2
with:
name: cc-backend RPM for UBI 8
- name: Download UBI 8 SRPM
uses: actions/download-artifact@v2
with:
name: cc-backend SRPM for UBI 8
- name: Download Ubuntu 20.04 DEB
uses: actions/download-artifact@v2
with:
name: cc-backend DEB for Ubuntu 20.04
- name: Download Ubuntu 22.04 DEB
uses: actions/download-artifact@v2
with:
name: cc-backend DEB for Ubuntu 22.04
# The download actions do not publish the name of the downloaded file,
# so we re-use the job outputs of the parent jobs. The files are all
# downloaded to the current folder.
# The gh-release action afterwards does not accept file lists but all
# files have to be listed at 'files'. The step creates one output per
# RPM package (2 per distro)
- name: Set RPM variables
id: files
run: |
ALMA_85_RPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.rpm}}")
ALMA_85_SRPM=$(basename "${{ needs.AlmaLinux-RPM-build.outputs.srpm}}")
UBI_8_RPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.rpm}}")
UBI_8_SRPM=$(basename "${{ needs.UBI-8-RPM-build.outputs.srpm}}")
U_2004_DEB=$(basename "${{ needs.Ubuntu-focal-build.outputs.deb}}")
U_2204_DEB=$(basename "${{ needs.Ubuntu-jammy-build.outputs.deb}}")
echo "ALMA_85_RPM::${ALMA_85_RPM}"
echo "ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "UBI_8_RPM::${UBI_8_RPM}"
echo "UBI_8_SRPM::${UBI_8_SRPM}"
echo "U_2004_DEB::${U_2004_DEB}"
echo "U_2204_DEB::${U_2204_DEB}"
echo "::set-output name=ALMA_85_RPM::${ALMA_85_RPM}"
echo "::set-output name=ALMA_85_SRPM::${ALMA_85_SRPM}"
echo "::set-output name=UBI_8_RPM::${UBI_8_RPM}"
echo "::set-output name=UBI_8_SRPM::${UBI_8_SRPM}"
echo "::set-output name=U_2004_DEB::${U_2004_DEB}"
echo "::set-output name=U_2204_DEB::${U_2204_DEB}"
# See: https://github.com/softprops/action-gh-release
- name: Release
uses: softprops/action-gh-release@v1
if: startsWith(github.ref, 'refs/tags/')
with:
name: cc-backend-${{github.ref_name}}
files: |
${{ steps.files.outputs.ALMA_85_RPM }}
${{ steps.files.outputs.ALMA_85_SRPM }}
${{ steps.files.outputs.UBI_8_RPM }}
${{ steps.files.outputs.UBI_8_SRPM }}
${{ steps.files.outputs.U_2004_DEB }}
${{ steps.files.outputs.U_2204_DEB }}

View File

@ -7,7 +7,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: 1.22.x go-version: 1.24.x
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Build, Vet & Test - name: Build, Vet & Test

View File

@ -2,7 +2,7 @@ TARGET = ./cc-backend
VAR = ./var VAR = ./var
CFG = config.json .env CFG = config.json .env
FRONTEND = ./web/frontend FRONTEND = ./web/frontend
VERSION = 1.4.2 VERSION = 1.4.4
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'

View File

@ -1,13 +1,27 @@
# `cc-backend` version 1.4.2 # `cc-backend` version 1.4.4
Supports job archive version 2 and database version 8. Supports job archive version 2 and database version 8.
This is a small bug fix release of `cc-backend`, the API backend and frontend This is a bug fix release of `cc-backend`, the API backend and frontend
implementation of ClusterCockpit. implementation of ClusterCockpit.
For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/).
## Breaking changes ## Breaking changes
The option `apiAllowedIPs` is now a required configuration attribute in
`config.json`. This option restricts access to the admin API.
To retain the previous behavior that the API is per default accessible from
everywhere set:
```json
"apiAllowedIPs": [
"*"
]
```
## Breaking changes for minor release 1.4.x
- You need to perform a database migration. Depending on your database size the - You need to perform a database migration. Depending on your database size the
migration might require several hours! migration might require several hours!
- You need to adapt the `cluster.json` configuration files in the job-archive, - You need to adapt the `cluster.json` configuration files in the job-archive,
@ -22,20 +36,7 @@ For release specific notes visit the [ClusterCockpit Documentation](https://clus
## New features ## New features
- Tags have a scope now. Tags created by a basic user are only visible by that - Enable to delete tags from the web interface
user. Tags created by an admin/support role can be configured to be visible by
all users (global scope) or only be admin/support role.
- Re-sampling support for running (requires a recent `cc-metric-store`) and
archived jobs. This greatly speeds up loading of large or very long jobs. You
need to add the new configuration key `enable-resampling` to the `config.json`
file.
- For finished jobs a total job energy is shown in the job view.
- Continuous scrolling in job lists is default now.
- All database queries (especially for sqlite) were optimized resulting in
dramatically faster load times.
- A performance and energy footprint can be freely configured on a per
subcluster base. One can filter for footprint statistics for running and
finished jobs.
## Known issues ## Known issues

View File

@ -137,11 +137,6 @@ type JobMetricWithName {
metric: JobMetric! metric: JobMetric!
} }
type JobMetricStatWithName {
name: String!
stats: MetricStatistics!
}
type JobMetric { type JobMetric {
unit: Unit unit: Unit
timestep: Int! timestep: Int!
@ -156,6 +151,43 @@ type Series {
data: [NullableFloat!]! data: [NullableFloat!]!
} }
type StatsSeries {
mean: [NullableFloat!]!
median: [NullableFloat!]!
min: [NullableFloat!]!
max: [NullableFloat!]!
}
type NamedStatsWithScope {
name: String!
scope: MetricScope!
stats: [ScopedStats!]!
}
type ScopedStats {
hostname: String!
id: String
data: MetricStatistics!
}
type JobStats {
id: Int!
jobId: String!
startTime: Int!
duration: Int!
cluster: String!
subCluster: String!
numNodes: Int!
numHWThreads: Int
numAccelerators: Int
stats: [NamedStats!]!
}
type NamedStats {
name: String!
data: MetricStatistics!
}
type Unit { type Unit {
base: String! base: String!
prefix: String prefix: String
@ -167,13 +199,6 @@ type MetricStatistics {
max: Float! max: Float!
} }
type StatsSeries {
mean: [NullableFloat!]!
median: [NullableFloat!]!
min: [NullableFloat!]!
max: [NullableFloat!]!
}
type MetricFootprints { type MetricFootprints {
metric: String! metric: String!
data: [NullableFloat!]! data: [NullableFloat!]!
@ -247,11 +272,13 @@ type Query {
job(id: ID!): Job job(id: ID!): Job
jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]! jobMetrics(id: ID!, metrics: [String!], scopes: [MetricScope!], resolution: Int): [JobMetricWithName!]!
jobMetricStats(id: ID!, metrics: [String!]): [JobMetricStatWithName!]! jobStats(id: ID!, metrics: [String!]): [NamedStats!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints scopedJobStats(id: ID!, metrics: [String!], scopes: [MetricScope!]): [NamedStatsWithScope!]!
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]! jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate, numDurationBins: String, numMetricBins: Int): [JobsStatistics!]!
jobsMetricStats(filter: [JobFilter!], metrics: [String!]): [JobStats!]!
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
@ -264,6 +291,7 @@ type Mutation {
deleteTag(id: ID!): ID! deleteTag(id: ID!): ID!
addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]! addTagsToJob(job: ID!, tagIds: [ID!]!): [Tag!]!
removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]! removeTagsFromJob(job: ID!, tagIds: [ID!]!): [Tag!]!
removeTagFromList(tagIds: [ID!]!): [Int!]!
updateConfiguration(name: String!, value: String!): String updateConfiguration(name: String!, value: String!): String
} }
@ -273,6 +301,7 @@ type TimeRangeOutput { range: String, from: Time!, to: Time! }
input JobFilter { input JobFilter {
tags: [ID!] tags: [ID!]
dbId: [ID!]
jobId: StringInput jobId: StringInput
arrayJobId: Int arrayJobId: Int
user: StringInput user: StringInput

View File

@ -15,9 +15,8 @@
"version": "1.0.0" "version": "1.0.0"
}, },
"host": "localhost:8080", "host": "localhost:8080",
"basePath": "/api",
"paths": { "paths": {
"/clusters/": { "/api/clusters/": {
"get": { "get": {
"security": [ "security": [
{ {
@ -74,7 +73,7 @@
} }
} }
}, },
"/jobs/": { "/api/jobs/": {
"get": { "get": {
"security": [ "security": [
{ {
@ -169,7 +168,7 @@
} }
} }
}, },
"/jobs/delete_job/": { "/api/jobs/delete_job/": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -244,7 +243,7 @@
} }
} }
}, },
"/jobs/delete_job/{id}": { "/api/jobs/delete_job/{id}": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -314,7 +313,7 @@
} }
} }
}, },
"/jobs/delete_job_before/{ts}": { "/api/jobs/delete_job_before/{ts}": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -384,7 +383,7 @@
} }
} }
}, },
"/jobs/edit_meta/{id}": { "/api/jobs/edit_meta/{id}": {
"post": { "post": {
"security": [ "security": [
{ {
@ -454,7 +453,7 @@
} }
} }
}, },
"/jobs/start_job/": { "/api/jobs/start_job/": {
"post": { "post": {
"security": [ "security": [
{ {
@ -523,7 +522,7 @@
} }
} }
}, },
"/jobs/stop_job/": { "/api/jobs/stop_job/": {
"post": { "post": {
"security": [ "security": [
{ {
@ -595,7 +594,7 @@
} }
} }
}, },
"/jobs/tag_job/{id}": { "/api/jobs/tag_job/{id}": {
"post": { "post": {
"security": [ "security": [
{ {
@ -668,7 +667,7 @@
} }
} }
}, },
"/jobs/{id}": { "/api/jobs/{id}": {
"get": { "get": {
"security": [ "security": [
{ {
@ -827,185 +826,14 @@
} }
} }
}, },
"/notice/": { "/api/users/": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies the content of notice.txt, shown as notice box on the homepage.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates or empties the notice box content",
"parameters": [
{
"type": "string",
"description": "Priority 1: New content to display",
"name": "new-content",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/user/{id}": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates an existing user",
"parameters": [
{
"type": "string",
"description": "Database ID of User",
"name": "id",
"in": "path",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 1: Role to add",
"name": "add-role",
"in": "formData"
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 2: Role to remove",
"name": "remove-role",
"in": "formData"
},
{
"type": "string",
"description": "Priority 3: Project to add",
"name": "add-project",
"in": "formData"
},
{
"type": "string",
"description": "Priority 4: Project to remove",
"name": "remove-project",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/users/": {
"get": { "get": {
"security": [ "security": [
{ {
"ApiKeyAuth": [] "ApiKeyAuth": []
} }
], ],
"description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.",
"produces": [ "produces": [
"application/json" "application/json"
], ],
@ -1057,70 +885,111 @@
} }
} }
} }
}, }
"post": { },
"/jobs/tag_job/{id}": {
"delete": {
"security": [ "security": [
{ {
"ApiKeyAuth": [] "ApiKeyAuth": []
} }
], ],
"description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", "description": "Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nIf tagged job is already finished: Tag will be removed from respective archive files.",
"consumes": [ "consumes": [
"multipart/form-data" "application/json"
],
"produces": [
"application/json"
],
"tags": [
"Job add and modify"
],
"summary": "Removes one or more tags from a job",
"parameters": [
{
"type": "integer",
"description": "Job Database ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Array of tag-objects to remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/api.ApiTag"
}
}
}
],
"responses": {
"200": {
"description": "Updated job resource",
"schema": {
"$ref": "#/definitions/schema.Job"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"404": {
"description": "Job or tag does not exist",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
}
}
}
},
"/tags/": {
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Removes tags by type and name. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nTag wills be removed from respective archive files.",
"consumes": [
"application/json"
], ],
"produces": [ "produces": [
"text/plain" "text/plain"
], ],
"tags": [ "tags": [
"User" "Tag remove"
], ],
"summary": "Adds a new user", "summary": "Removes all tags and job-relations for type:name tuple",
"parameters": [ "parameters": [
{ {
"type": "string", "description": "Array of tag-objects to remove",
"description": "Unique user ID", "name": "request",
"name": "username", "in": "body",
"in": "formData", "required": true,
"required": true "schema": {
}, "type": "array",
{ "items": {
"type": "string", "$ref": "#/definitions/api.ApiTag"
"description": "User password", }
"name": "password", }
"in": "formData",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "User role",
"name": "role",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "Managed project, required for new manager role user",
"name": "project",
"in": "formData"
},
{
"type": "string",
"description": "Users name",
"name": "name",
"in": "formData"
},
{
"type": "string",
"description": "Users email",
"name": "email",
"in": "formData"
} }
], ],
"responses": { "responses": {
@ -1133,93 +1002,25 @@
"400": { "400": {
"description": "Bad Request", "description": "Bad Request",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
} }
}, },
"401": { "401": {
"description": "Unauthorized", "description": "Unauthorized",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
} }
}, },
"403": { "404": {
"description": "Forbidden", "description": "Job or tag does not exist",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
}
},
"422": {
"description": "Unprocessable Entity: creating user failed",
"schema": {
"type": "string"
} }
}, },
"500": { "500": {
"description": "Internal Server Error", "description": "Internal Server Error",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
}
}
}
},
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Deletes a user",
"parameters": [
{
"type": "string",
"description": "User ID to delete",
"name": "username",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "User deleted successfully"
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: deleting user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
} }
} }
} }

View File

@ -1,4 +1,3 @@
basePath: /api
definitions: definitions:
api.ApiReturnedUser: api.ApiReturnedUser:
properties: properties:
@ -671,7 +670,7 @@ info:
title: ClusterCockpit REST API title: ClusterCockpit REST API
version: 1.0.0 version: 1.0.0
paths: paths:
/clusters/: /api/clusters/:
get: get:
description: Get a list of all cluster configs. Specific cluster can be requested description: Get a list of all cluster configs. Specific cluster can be requested
using query parameter. using query parameter.
@ -708,7 +707,7 @@ paths:
summary: Lists all cluster configs summary: Lists all cluster configs
tags: tags:
- Cluster query - Cluster query
/jobs/: /api/jobs/:
get: get:
description: |- description: |-
Get a list of all jobs. Filters can be applied using query parameters. Get a list of all jobs. Filters can be applied using query parameters.
@ -773,7 +772,7 @@ paths:
summary: Lists all jobs summary: Lists all jobs
tags: tags:
- Job query - Job query
/jobs/{id}: /api/jobs/{id}:
get: get:
description: |- description: |-
Job to get is specified by database ID Job to get is specified by database ID
@ -882,7 +881,7 @@ paths:
summary: Get job meta and configurable metric data summary: Get job meta and configurable metric data
tags: tags:
- Job query - Job query
/jobs/delete_job/: /api/jobs/delete_job/:
delete: delete:
consumes: consumes:
- application/json - application/json
@ -932,7 +931,7 @@ paths:
summary: Remove a job from the sql database summary: Remove a job from the sql database
tags: tags:
- Job remove - Job remove
/jobs/delete_job/{id}: /api/jobs/delete_job/{id}:
delete: delete:
description: Job to remove is specified by database ID. This will not remove description: Job to remove is specified by database ID. This will not remove
the job from the job archive. the job from the job archive.
@ -979,7 +978,7 @@ paths:
summary: Remove a job from the sql database summary: Remove a job from the sql database
tags: tags:
- Job remove - Job remove
/jobs/delete_job_before/{ts}: /api/jobs/delete_job_before/{ts}:
delete: delete:
description: Remove all jobs with start time before timestamp. The jobs will description: Remove all jobs with start time before timestamp. The jobs will
not be removed from the job archive. not be removed from the job archive.
@ -1026,7 +1025,7 @@ paths:
summary: Remove a job from the sql database summary: Remove a job from the sql database
tags: tags:
- Job remove - Job remove
/jobs/edit_meta/{id}: /api/jobs/edit_meta/{id}:
post: post:
consumes: consumes:
- application/json - application/json
@ -1073,7 +1072,7 @@ paths:
summary: Edit meta-data json summary: Edit meta-data json
tags: tags:
- Job add and modify - Job add and modify
/jobs/start_job/: /api/jobs/start_job/:
post: post:
consumes: consumes:
- application/json - application/json
@ -1120,7 +1119,7 @@ paths:
summary: Adds a new job as "running" summary: Adds a new job as "running"
tags: tags:
- Job add and modify - Job add and modify
/jobs/stop_job/: /api/jobs/stop_job/:
post: post:
description: |- description: |-
Job to stop is specified by request body. All fields are required in this case. Job to stop is specified by request body. All fields are required in this case.
@ -1168,7 +1167,7 @@ paths:
summary: Marks job as completed and triggers archiving summary: Marks job as completed and triggers archiving
tags: tags:
- Job add and modify - Job add and modify
/jobs/tag_job/{id}: /api/jobs/tag_job/{id}:
post: post:
consumes: consumes:
- application/json - application/json
@ -1218,173 +1217,11 @@ paths:
summary: Adds one or more tags to a job summary: Adds one or more tags to a job
tags: tags:
- Job add and modify - Job add and modify
/notice/: /api/users/:
post:
consumes:
- multipart/form-data
description: |-
Modifies the content of notice.txt, shown as notice box on the homepage.
If more than one formValue is set then only the highest priority field is used.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: 'Priority 1: New content to display'
in: formData
name: new-content
type: string
produces:
- text/plain
responses:
"200":
description: Success Response Message
schema:
type: string
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: The user could not be updated'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Updates or empties the notice box content
tags:
- User
/user/{id}:
post:
consumes:
- multipart/form-data
description: |-
Modifies user defined by username (id) in one of four possible ways.
If more than one formValue is set then only the highest priority field is used.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: Database ID of User
in: path
name: id
required: true
type: string
- description: 'Priority 1: Role to add'
enum:
- admin
- support
- manager
- user
- api
in: formData
name: add-role
type: string
- description: 'Priority 2: Role to remove'
enum:
- admin
- support
- manager
- user
- api
in: formData
name: remove-role
type: string
- description: 'Priority 3: Project to add'
in: formData
name: add-project
type: string
- description: 'Priority 4: Project to remove'
in: formData
name: remove-project
type: string
produces:
- text/plain
responses:
"200":
description: Success Response Message
schema:
type: string
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: The user could not be updated'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Updates an existing user
tags:
- User
/users/:
delete:
consumes:
- multipart/form-data
description: |-
User defined by username in form data will be deleted from database.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: User ID to delete
in: formData
name: username
required: true
type: string
produces:
- text/plain
responses:
"200":
description: User deleted successfully
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: deleting user failed'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Deletes a user
tags:
- User
get: get:
description: |- description: |-
Returns a JSON-encoded list of users. Returns a JSON-encoded list of users.
Required query-parameter defines if all users or only users with additional special roles are returned. Required query-parameter defines if all users or only users with additional special roles are returned.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters: parameters:
- description: If returned list should contain all users or only users with - description: If returned list should contain all users or only users with
additional special roles additional special roles
@ -1422,46 +1259,73 @@ paths:
summary: Returns a list of users summary: Returns a list of users
tags: tags:
- User - User
post: /jobs/tag_job/{id}:
delete:
consumes: consumes:
- multipart/form-data - application/json
description: |- description: |-
User specified in form data will be saved to database. Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.
Only accessible from IPs registered with apiAllowedIPs configuration option. Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
If tagged job is already finished: Tag will be removed from respective archive files.
parameters: parameters:
- description: Unique user ID - description: Job Database ID
in: formData in: path
name: username name: id
required: true required: true
type: string type: integer
- description: User password - description: Array of tag-objects to remove
in: formData in: body
name: password name: request
required: true required: true
type: string schema:
- description: User role items:
enum: $ref: '#/definitions/api.ApiTag'
- admin type: array
- support produces:
- manager - application/json
- user responses:
- api "200":
in: formData description: Updated job resource
name: role schema:
$ref: '#/definitions/schema.Job'
"400":
description: Bad Request
schema:
$ref: '#/definitions/api.ErrorResponse'
"401":
description: Unauthorized
schema:
$ref: '#/definitions/api.ErrorResponse'
"404":
description: Job or tag does not exist
schema:
$ref: '#/definitions/api.ErrorResponse'
"500":
description: Internal Server Error
schema:
$ref: '#/definitions/api.ErrorResponse'
security:
- ApiKeyAuth: []
summary: Removes one or more tags from a job
tags:
- Job add and modify
/tags/:
delete:
consumes:
- application/json
description: |-
Removes tags by type and name. Name and Type of Tag(s) must match.
Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
Tag wills be removed from respective archive files.
parameters:
- description: Array of tag-objects to remove
in: body
name: request
required: true required: true
type: string schema:
- description: Managed project, required for new manager role user items:
in: formData $ref: '#/definitions/api.ApiTag'
name: project type: array
type: string
- description: Users name
in: formData
name: name
type: string
- description: Users email
in: formData
name: email
type: string
produces: produces:
- text/plain - text/plain
responses: responses:
@ -1472,28 +1336,24 @@ paths:
"400": "400":
description: Bad Request description: Bad Request
schema: schema:
type: string $ref: '#/definitions/api.ErrorResponse'
"401": "401":
description: Unauthorized description: Unauthorized
schema: schema:
type: string $ref: '#/definitions/api.ErrorResponse'
"403": "404":
description: Forbidden description: Job or tag does not exist
schema: schema:
type: string $ref: '#/definitions/api.ErrorResponse'
"422":
description: 'Unprocessable Entity: creating user failed'
schema:
type: string
"500": "500":
description: Internal Server Error description: Internal Server Error
schema: schema:
type: string $ref: '#/definitions/api.ErrorResponse'
security: security:
- ApiKeyAuth: [] - ApiKeyAuth: []
summary: Adds a new user summary: Removes all tags and job-relations for type:name tuple
tags: tags:
- User - Tag remove
securityDefinitions: securityDefinitions:
ApiKeyAuth: ApiKeyAuth:
in: header in: header

View File

@ -12,7 +12,7 @@ var (
) )
func cliInit() { func cliInit() {
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env") flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize sqlite database file, config.json and .env")
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap") flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'hpc_user' table with ldap")
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling") flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
@ -24,10 +24,10 @@ func cliInit() {
flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit") flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit")
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,manager,api,user]:<password>`") flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: <username>:[admin,support,manager,api,user]:<password>")
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`") flag.StringVar(&flagDelUser, "del-user", "", "Remove a existing user. Argument format: <username>")
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`") flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`") flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`") flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug, info (default), warn, err, crit]`")
flag.Parse() flag.Parse()
} }

View File

@ -5,7 +5,6 @@
package main package main
import ( import (
"fmt"
"os" "os"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
@ -33,6 +32,18 @@ const configString = `
"jwts": { "jwts": {
"max-age": "2000h" "max-age": "2000h"
}, },
"apiAllowedIPs": [
"*"
],
"enable-resampling": {
"trigger": 30,
"resolutions": [
600,
300,
120,
60
]
},
"clusters": [ "clusters": [
{ {
"name": "name", "name": "name",
@ -62,24 +73,23 @@ const configString = `
func initEnv() { func initEnv() {
if util.CheckFileExists("var") { if util.CheckFileExists("var") {
fmt.Print("Directory ./var already exists. Exiting!\n") log.Exit("Directory ./var already exists. Cautiously exiting application initialization.")
os.Exit(0)
} }
if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil { if err := os.WriteFile("config.json", []byte(configString), 0o666); err != nil {
log.Fatalf("Writing config.json failed: %s", err.Error()) log.Abortf("Could not write default ./config.json with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil { if err := os.WriteFile(".env", []byte(envString), 0o666); err != nil {
log.Fatalf("Writing .env failed: %s", err.Error()) log.Abortf("Could not write default ./.env file with permissions '0o666'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
if err := os.Mkdir("var", 0o777); err != nil { if err := os.Mkdir("var", 0o777); err != nil {
log.Fatalf("Mkdir var failed: %s", err.Error()) log.Abortf("Could not create default ./var folder with permissions '0o777'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
err := repository.MigrateDB("sqlite3", "./var/job.db") err := repository.MigrateDB("sqlite3", "./var/job.db")
if err != nil { if err != nil {
log.Fatalf("Initialize job.db failed: %s", err.Error()) log.Abortf("Could not initialize default sqlite3 database as './var/job.db'. Application initialization failed, exited.\nError: %s\n", err.Error())
} }
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv" "github.com/ClusterCockpit/cc-backend/pkg/runtimeEnv"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/google/gops/agent" "github.com/google/gops/agent"
"github.com/joho/godotenv"
_ "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql"
_ "github.com/mattn/go-sqlite3" _ "github.com/mattn/go-sqlite3"
@ -61,15 +62,24 @@ func main() {
// Apply config flags for pkg/log // Apply config flags for pkg/log
log.Init(flagLogLevel, flagLogDateTime) log.Init(flagLogLevel, flagLogDateTime)
// If init flag set, run tasks here before any file dependencies cause errors
if flagInit {
initEnv()
log.Exit("Successfully setup environment!\n" +
"Please review config.json and .env and adjust it to your needs.\n" +
"Add your job-archive at ./var/job-archive.")
}
// See https://github.com/google/gops (Runtime overhead is almost zero) // See https://github.com/google/gops (Runtime overhead is almost zero)
if flagGops { if flagGops {
if err := agent.Listen(agent.Options{}); err != nil { if err := agent.Listen(agent.Options{}); err != nil {
log.Fatalf("gops/agent.Listen failed: %s", err.Error()) log.Abortf("Could not start gops agent with 'gops/agent.Listen(agent.Options{})'. Application startup failed, exited.\nError: %s\n", err.Error())
} }
} }
if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) { err := godotenv.Load()
log.Fatalf("parsing './.env' file failed: %s", err.Error()) if err != nil {
log.Abortf("Could not parse existing .env file at location './.env'. Application startup failed, exited.\nError: %s\n", err.Error())
} }
// Initialize sub-modules and handle command line flags. // Initialize sub-modules and handle command line flags.
@ -87,37 +97,29 @@ func main() {
if flagMigrateDB { if flagMigrateDB {
err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB) err := repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
log.Fatal(err) log.Abortf("MigrateDB Failed: Could not migrate '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
} }
os.Exit(0) log.Exitf("MigrateDB Success: Migrated '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
} }
if flagRevertDB { if flagRevertDB {
err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB) err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
log.Fatal(err) log.Abortf("RevertDB Failed: Could not revert '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1), err.Error())
} }
os.Exit(0) log.Exitf("RevertDB Success: Reverted '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, (repository.Version - 1))
} }
if flagForceDB { if flagForceDB {
err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB) err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB)
if err != nil { if err != nil {
log.Fatal(err) log.Abortf("ForceDB Failed: Could not force '%s' database at location '%s' to version %d.\nError: %s\n", config.Keys.DBDriver, config.Keys.DB, repository.Version, err.Error())
} }
os.Exit(0) log.Exitf("ForceDB Success: Forced '%s' database at location '%s' to version %d.\n", config.Keys.DBDriver, config.Keys.DB, repository.Version)
} }
repository.Connect(config.Keys.DBDriver, config.Keys.DB) repository.Connect(config.Keys.DBDriver, config.Keys.DB)
if flagInit {
initEnv()
fmt.Print("Successfully setup environment!\n")
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
fmt.Print("Add your job-archive at ./var/job-archive.\n")
os.Exit(0)
}
if !config.Keys.DisableAuthentication { if !config.Keys.DisableAuthentication {
auth.Init() auth.Init()
@ -125,20 +127,27 @@ func main() {
if flagNewUser != "" { if flagNewUser != "" {
parts := strings.SplitN(flagNewUser, ":", 3) parts := strings.SplitN(flagNewUser, ":", 3)
if len(parts) != 3 || len(parts[0]) == 0 { if len(parts) != 3 || len(parts[0]) == 0 {
log.Fatal("invalid argument format for user creation") log.Abortf("Add User: Could not parse supplied argument format: No changes.\n"+
"Want: <username>:[admin,support,manager,api,user]:<password>\n"+
"Have: %s\n", flagNewUser)
} }
ur := repository.GetUserRepository() ur := repository.GetUserRepository()
if err := ur.AddUser(&schema.User{ if err := ur.AddUser(&schema.User{
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","), Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
}); err != nil { }); err != nil {
log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err) log.Abortf("Add User: Could not add new user authentication for '%s' and roles '%s'.\nError: %s\n", parts[0], parts[1], err.Error())
} else {
log.Printf("Add User: Added new user '%s' with roles '%s'.\n", parts[0], parts[1])
} }
} }
if flagDelUser != "" { if flagDelUser != "" {
ur := repository.GetUserRepository() ur := repository.GetUserRepository()
if err := ur.DelUser(flagDelUser); err != nil { if err := ur.DelUser(flagDelUser); err != nil {
log.Fatalf("deleting user failed: %v", err) log.Abortf("Delete User: Could not delete user '%s' from DB.\nError: %s\n", flagDelUser, err.Error())
} else {
log.Printf("Delete User: Deleted user '%s' from DB.\n", flagDelUser)
} }
} }
@ -146,60 +155,64 @@ func main() {
if flagSyncLDAP { if flagSyncLDAP {
if authHandle.LdapAuth == nil { if authHandle.LdapAuth == nil {
log.Fatal("cannot sync: LDAP authentication is not configured") log.Abort("Sync LDAP: LDAP authentication is not configured, could not synchronize. No changes, exited.")
} }
if err := authHandle.LdapAuth.Sync(); err != nil { if err := authHandle.LdapAuth.Sync(); err != nil {
log.Fatalf("LDAP sync failed: %v", err) log.Abortf("Sync LDAP: Could not synchronize, failed with error.\nError: %s\n", err.Error())
} }
log.Info("LDAP sync successfull") log.Print("Sync LDAP: LDAP synchronization successfull.")
} }
if flagGenJWT != "" { if flagGenJWT != "" {
ur := repository.GetUserRepository() ur := repository.GetUserRepository()
user, err := ur.GetUser(flagGenJWT) user, err := ur.GetUser(flagGenJWT)
if err != nil { if err != nil {
log.Fatalf("could not get user from JWT: %v", err) log.Abortf("JWT: Could not get supplied user '%s' from DB. No changes, exited.\nError: %s\n", flagGenJWT, err.Error())
} }
if !user.HasRole(schema.RoleApi) { if !user.HasRole(schema.RoleApi) {
log.Warnf("user '%s' does not have the API role", user.Username) log.Warnf("JWT: User '%s' does not have the role 'api'. REST API endpoints will return error!\n", user.Username)
} }
jwt, err := authHandle.JwtAuth.ProvideJWT(user) jwt, err := authHandle.JwtAuth.ProvideJWT(user)
if err != nil { if err != nil {
log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err) log.Abortf("JWT: User '%s' found in DB, but failed to provide JWT.\nError: %s\n", user.Username, err.Error())
} }
fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt) log.Printf("JWT: Successfully generated JWT for user '%s': %s\n", user.Username, jwt)
} }
} else if flagNewUser != "" || flagDelUser != "" { } else if flagNewUser != "" || flagDelUser != "" {
log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled") log.Abort("Error: Arguments '--add-user' and '--del-user' can only be used if authentication is enabled. No changes, exited.")
} }
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil { if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
log.Fatalf("failed to initialize archive: %s", err.Error()) log.Abortf("Init: Failed to initialize archive.\nError: %s\n", err.Error())
} }
if err := metricdata.Init(); err != nil { if err := metricdata.Init(); err != nil {
log.Fatalf("failed to initialize metricdata repository: %s", err.Error()) log.Abortf("Init: Failed to initialize metricdata repository.\nError %s\n", err.Error())
} }
if flagReinitDB { if flagReinitDB {
if err := importer.InitDB(); err != nil { if err := importer.InitDB(); err != nil {
log.Fatalf("failed to re-initialize repository DB: %s", err.Error()) log.Abortf("Init DB: Failed to re-initialize repository DB.\nError: %s\n", err.Error())
} else {
log.Print("Init DB: Sucessfully re-initialized repository DB.")
} }
} }
if flagImportJob != "" { if flagImportJob != "" {
if err := importer.HandleImportFlag(flagImportJob); err != nil { if err := importer.HandleImportFlag(flagImportJob); err != nil {
log.Fatalf("job import failed: %s", err.Error()) log.Abortf("Import Job: Job import failed.\nError: %s\n", err.Error())
} else {
log.Printf("Import Job: Imported Job '%s' into DB.\n", flagImportJob)
} }
} }
if !flagServer { if !flagServer {
return log.Exit("No errors, server flag not set. Exiting cc-backend.")
} }
archiver.Start(repository.GetJobRepository()) archiver.Start(repository.GetJobRepository())

View File

@ -18,6 +18,7 @@ import (
"time" "time"
"github.com/99designs/gqlgen/graphql/handler" "github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/handler/transport"
"github.com/99designs/gqlgen/graphql/playground" "github.com/99designs/gqlgen/graphql/playground"
"github.com/ClusterCockpit/cc-backend/internal/api" "github.com/ClusterCockpit/cc-backend/internal/api"
"github.com/ClusterCockpit/cc-backend/internal/archiver" "github.com/ClusterCockpit/cc-backend/internal/archiver"
@ -53,18 +54,29 @@ func serverInit() {
// Setup the http.Handler/Router used by the server // Setup the http.Handler/Router used by the server
graph.Init() graph.Init()
resolver := graph.GetResolverInstance() resolver := graph.GetResolverInstance()
graphQLEndpoint := handler.NewDefaultServer( graphQLServer := handler.New(
generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) generated.NewExecutableSchema(generated.Config{Resolvers: resolver}))
// graphQLServer.AddTransport(transport.SSE{})
graphQLServer.AddTransport(transport.POST{})
// graphQLServer.AddTransport(transport.Websocket{
// KeepAlivePingInterval: 10 * time.Second,
// Upgrader: websocket.Upgrader{
// CheckOrigin: func(r *http.Request) bool {
// return true
// },
// },
// })
if os.Getenv("DEBUG") != "1" { if os.Getenv("DEBUG") != "1" {
// Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed. // Having this handler means that a error message is returned via GraphQL instead of the connection simply beeing closed.
// The problem with this is that then, no more stacktrace is printed to stderr. // The problem with this is that then, no more stacktrace is printed to stderr.
graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error { graphQLServer.SetRecoverFunc(func(ctx context.Context, err any) error {
switch e := err.(type) { switch e := err.(type) {
case string: case string:
return fmt.Errorf("MAIN > Panic: %s", e) return fmt.Errorf("MAIN > Panic: %s", e)
case error: case error:
return fmt.Errorf("MAIN > Panic caused by: %w", e) return fmt.Errorf("MAIN > Panic caused by: %s", e.Error())
} }
return errors.New("MAIN > Internal server error (panic)") return errors.New("MAIN > Internal server error (panic)")
@ -78,7 +90,7 @@ func serverInit() {
router = mux.NewRouter() router = mux.NewRouter()
buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date} buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date}
info := map[string]interface{}{} info := map[string]any{}
info["hasOpenIDConnect"] = false info["hasOpenIDConnect"] = false
if config.Keys.OpenIDConfig != nil { if config.Keys.OpenIDConfig != nil {
@ -208,7 +220,7 @@ func serverInit() {
router.PathPrefix("/swagger/").Handler(httpSwagger.Handler( router.PathPrefix("/swagger/").Handler(httpSwagger.Handler(
httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet) httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet)
} }
secured.Handle("/query", graphQLEndpoint) secured.Handle("/query", graphQLServer)
// Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project. // Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project.
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) { secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
@ -268,7 +280,7 @@ func serverStart() {
// Start http or https server // Start http or https server
listener, err := net.Listen("tcp", config.Keys.Addr) listener, err := net.Listen("tcp", config.Keys.Addr)
if err != nil { if err != nil {
log.Fatalf("starting http listener failed: %v", err) log.Abortf("Server Start: Starting http listener on '%s' failed.\nError: %s\n", config.Keys.Addr, err.Error())
} }
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
@ -281,7 +293,7 @@ func serverStart() {
cert, err := tls.LoadX509KeyPair( cert, err := tls.LoadX509KeyPair(
config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
if err != nil { if err != nil {
log.Fatalf("loading X509 keypair failed: %v", err) log.Abortf("Server Start: Loading X509 keypair failed. Check options 'https-cert-file' and 'https-key-file' in 'config.json'.\nError: %s\n", err.Error())
} }
listener = tls.NewListener(listener, &tls.Config{ listener = tls.NewListener(listener, &tls.Config{
Certificates: []tls.Certificate{cert}, Certificates: []tls.Certificate{cert},
@ -292,20 +304,20 @@ func serverStart() {
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true, PreferServerCipherSuites: true,
}) })
fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr) log.Printf("HTTPS server listening at %s...\n", config.Keys.Addr)
} else { } else {
fmt.Printf("HTTP server listening at %s...", config.Keys.Addr) log.Printf("HTTP server listening at %s...\n", config.Keys.Addr)
} }
// //
// Because this program will want to bind to a privileged port (like 80), the listener must // Because this program will want to bind to a privileged port (like 80), the listener must
// be established first, then the user can be changed, and after that, // be established first, then the user can be changed, and after that,
// the actual http server can be started. // the actual http server can be started.
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
log.Fatalf("error while preparing server start: %s", err.Error()) log.Abortf("Server Start: Error while preparing server start.\nError: %s\n", err.Error())
} }
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed { if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
log.Fatalf("starting server failed: %v", err) log.Abortf("Server Start: Starting server failed.\nError: %s\n", err.Error())
} }
} }

View File

@ -17,6 +17,9 @@
60 60
] ]
}, },
"apiAllowedIPs": [
"*"
],
"emission-constant": 317, "emission-constant": 317,
"clusters": [ "clusters": [
{ {

View File

@ -1,50 +1,62 @@
{ {
"addr": "0.0.0.0:443", "addr": "0.0.0.0:443",
"ldap": { "ldap": {
"url": "ldaps://test", "url": "ldaps://test",
"user_base": "ou=people,ou=hpc,dc=test,dc=de", "user_base": "ou=people,ou=hpc,dc=test,dc=de",
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de", "search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de", "user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
"user_filter": "(&(objectclass=posixAccount))" "user_filter": "(&(objectclass=posixAccount))"
}, },
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem", "https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem", "https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
"user": "clustercockpit", "user": "clustercockpit",
"group": "clustercockpit", "group": "clustercockpit",
"archive": { "archive": {
"kind": "file", "kind": "file",
"path": "./var/job-archive" "path": "./var/job-archive"
}, },
"validate": true, "validate": false,
"clusters": [ "apiAllowedIPs": [
{ "*"
"name": "test", ],
"metricDataRepository": { "clusters": [
"kind": "cc-metric-store", {
"url": "http://localhost:8082", "name": "test",
"token": "eyJhbGciOiJF-E-pQBQ" "metricDataRepository": {
}, "kind": "cc-metric-store",
"filterRanges": { "url": "http://localhost:8082",
"numNodes": { "token": "eyJhbGciOiJF-E-pQBQ"
"from": 1, },
"to": 64 "filterRanges": {
}, "numNodes": {
"duration": { "from": 1,
"from": 0, "to": 64
"to": 86400 },
}, "duration": {
"startTime": { "from": 0,
"from": "2022-01-01T00:00:00Z", "to": 86400
"to": null },
} "startTime": {
} "from": "2022-01-01T00:00:00Z",
"to": null
} }
], }
"jwts": { }
"cookieName": "", ],
"validateUser": false, "jwts": {
"max-age": "2000h", "cookieName": "",
"trustedIssuer": "" "validateUser": false,
}, "max-age": "2000h",
"short-running-jobs-duration": 300 "trustedIssuer": ""
},
"enable-resampling": {
"trigger": 30,
"resolutions": [
600,
300,
120,
60
]
},
"short-running-jobs-duration": 300
} }

7
go.mod
View File

@ -2,6 +2,8 @@ module github.com/ClusterCockpit/cc-backend
go 1.23.5 go 1.23.5
toolchain go1.24.1
require ( require (
github.com/99designs/gqlgen v0.17.66 github.com/99designs/gqlgen v0.17.66
github.com/ClusterCockpit/cc-units v0.4.0 github.com/ClusterCockpit/cc-units v0.4.0
@ -10,7 +12,7 @@ require (
github.com/go-co-op/gocron/v2 v2.16.0 github.com/go-co-op/gocron/v2 v2.16.0
github.com/go-ldap/ldap/v3 v3.4.10 github.com/go-ldap/ldap/v3 v3.4.10
github.com/go-sql-driver/mysql v1.9.0 github.com/go-sql-driver/mysql v1.9.0
github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang-jwt/jwt/v5 v5.2.2
github.com/golang-migrate/migrate/v4 v4.18.2 github.com/golang-migrate/migrate/v4 v4.18.2
github.com/google/gops v0.3.28 github.com/google/gops v0.3.28
github.com/gorilla/handlers v1.5.2 github.com/gorilla/handlers v1.5.2
@ -56,6 +58,7 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
@ -78,7 +81,7 @@ require (
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
golang.org/x/mod v0.23.0 // indirect golang.org/x/mod v0.23.0 // indirect
golang.org/x/net v0.35.0 // indirect golang.org/x/net v0.36.0 // indirect
golang.org/x/sync v0.11.0 // indirect golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect golang.org/x/text v0.22.0 // indirect

10
go.sum
View File

@ -83,8 +83,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8=
github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -137,6 +137,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@ -279,8 +281,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -45,6 +45,9 @@ func setup(t *testing.T) *api.RestApi {
"jwts": { "jwts": {
"max-age": "2m" "max-age": "2m"
}, },
"apiAllowedIPs": [
"*"
],
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",

View File

@ -23,7 +23,7 @@ const docTemplate = `{
"host": "{{.Host}}", "host": "{{.Host}}",
"basePath": "{{.BasePath}}", "basePath": "{{.BasePath}}",
"paths": { "paths": {
"/clusters/": { "/api/clusters/": {
"get": { "get": {
"security": [ "security": [
{ {
@ -80,7 +80,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/": { "/api/jobs/": {
"get": { "get": {
"security": [ "security": [
{ {
@ -175,7 +175,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/delete_job/": { "/api/jobs/delete_job/": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -250,7 +250,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/delete_job/{id}": { "/api/jobs/delete_job/{id}": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -320,7 +320,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/delete_job_before/{ts}": { "/api/jobs/delete_job_before/{ts}": {
"delete": { "delete": {
"security": [ "security": [
{ {
@ -390,7 +390,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/edit_meta/{id}": { "/api/jobs/edit_meta/{id}": {
"post": { "post": {
"security": [ "security": [
{ {
@ -460,7 +460,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/start_job/": { "/api/jobs/start_job/": {
"post": { "post": {
"security": [ "security": [
{ {
@ -529,7 +529,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/stop_job/": { "/api/jobs/stop_job/": {
"post": { "post": {
"security": [ "security": [
{ {
@ -601,7 +601,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/tag_job/{id}": { "/api/jobs/tag_job/{id}": {
"post": { "post": {
"security": [ "security": [
{ {
@ -674,7 +674,7 @@ const docTemplate = `{
} }
} }
}, },
"/jobs/{id}": { "/api/jobs/{id}": {
"get": { "get": {
"security": [ "security": [
{ {
@ -833,185 +833,14 @@ const docTemplate = `{
} }
} }
}, },
"/notice/": { "/api/users/": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies the content of notice.txt, shown as notice box on the homepage.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates or empties the notice box content",
"parameters": [
{
"type": "string",
"description": "Priority 1: New content to display",
"name": "new-content",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/user/{id}": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates an existing user",
"parameters": [
{
"type": "string",
"description": "Database ID of User",
"name": "id",
"in": "path",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 1: Role to add",
"name": "add-role",
"in": "formData"
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 2: Role to remove",
"name": "remove-role",
"in": "formData"
},
{
"type": "string",
"description": "Priority 3: Project to add",
"name": "add-project",
"in": "formData"
},
{
"type": "string",
"description": "Priority 4: Project to remove",
"name": "remove-project",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/users/": {
"get": { "get": {
"security": [ "security": [
{ {
"ApiKeyAuth": [] "ApiKeyAuth": []
} }
], ],
"description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", "description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.",
"produces": [ "produces": [
"application/json" "application/json"
], ],
@ -1063,70 +892,111 @@ const docTemplate = `{
} }
} }
} }
}, }
"post": { },
"/jobs/tag_job/{id}": {
"delete": {
"security": [ "security": [
{ {
"ApiKeyAuth": [] "ApiKeyAuth": []
} }
], ],
"description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.", "description": "Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nIf tagged job is already finished: Tag will be removed from respective archive files.",
"consumes": [ "consumes": [
"multipart/form-data" "application/json"
],
"produces": [
"application/json"
],
"tags": [
"Job add and modify"
],
"summary": "Removes one or more tags from a job",
"parameters": [
{
"type": "integer",
"description": "Job Database ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Array of tag-objects to remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/api.ApiTag"
}
}
}
],
"responses": {
"200": {
"description": "Updated job resource",
"schema": {
"$ref": "#/definitions/schema.Job"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"404": {
"description": "Job or tag does not exist",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"$ref": "#/definitions/api.ErrorResponse"
}
}
}
}
},
"/tags/": {
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Removes tags by type and name. Name and Type of Tag(s) must match.\nTag Scope is required for matching, options: \"global\", \"admin\". Private tags can not be deleted via API.\nTag wills be removed from respective archive files.",
"consumes": [
"application/json"
], ],
"produces": [ "produces": [
"text/plain" "text/plain"
], ],
"tags": [ "tags": [
"User" "Tag remove"
], ],
"summary": "Adds a new user", "summary": "Removes all tags and job-relations for type:name tuple",
"parameters": [ "parameters": [
{ {
"type": "string", "description": "Array of tag-objects to remove",
"description": "Unique user ID", "name": "request",
"name": "username", "in": "body",
"in": "formData", "required": true,
"required": true "schema": {
}, "type": "array",
{ "items": {
"type": "string", "$ref": "#/definitions/api.ApiTag"
"description": "User password", }
"name": "password", }
"in": "formData",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "User role",
"name": "role",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "Managed project, required for new manager role user",
"name": "project",
"in": "formData"
},
{
"type": "string",
"description": "Users name",
"name": "name",
"in": "formData"
},
{
"type": "string",
"description": "Users email",
"name": "email",
"in": "formData"
} }
], ],
"responses": { "responses": {
@ -1139,93 +1009,25 @@ const docTemplate = `{
"400": { "400": {
"description": "Bad Request", "description": "Bad Request",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
} }
}, },
"401": { "401": {
"description": "Unauthorized", "description": "Unauthorized",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
} }
}, },
"403": { "404": {
"description": "Forbidden", "description": "Job or tag does not exist",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
}
},
"422": {
"description": "Unprocessable Entity: creating user failed",
"schema": {
"type": "string"
} }
}, },
"500": { "500": {
"description": "Internal Server Error", "description": "Internal Server Error",
"schema": { "schema": {
"type": "string" "$ref": "#/definitions/api.ErrorResponse"
}
}
}
},
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Deletes a user",
"parameters": [
{
"type": "string",
"description": "User ID to delete",
"name": "username",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "User deleted successfully"
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: deleting user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
} }
} }
} }
@ -2191,7 +1993,7 @@ const docTemplate = `{
var SwaggerInfo = &swag.Spec{ var SwaggerInfo = &swag.Spec{
Version: "1.0.0", Version: "1.0.0",
Host: "localhost:8080", Host: "localhost:8080",
BasePath: "/api", BasePath: "",
Schemes: []string{}, Schemes: []string{},
Title: "ClusterCockpit REST API", Title: "ClusterCockpit REST API",
Description: "API for batch job control.", Description: "API for batch job control.",

View File

@ -46,7 +46,6 @@ import (
// @license.url https://opensource.org/licenses/MIT // @license.url https://opensource.org/licenses/MIT
// @host localhost:8080 // @host localhost:8080
// @basePath /api
// @securityDefinitions.apikey ApiKeyAuth // @securityDefinitions.apikey ApiKeyAuth
// @in header // @in header
@ -69,22 +68,27 @@ func New() *RestApi {
func (api *RestApi) MountApiRoutes(r *mux.Router) { func (api *RestApi) MountApiRoutes(r *mux.Router) {
r.StrictSlash(true) r.StrictSlash(true)
// REST API Uses TokenAuth
// User List
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
// Cluster List
r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet)
// Job Handler
r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/start_job/", api.startJob).Methods(http.MethodPost, http.MethodPut)
r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/jobs/stop_job/", api.stopJobByRequest).Methods(http.MethodPost, http.MethodPut)
// r.HandleFunc("/jobs/import/", api.importJob).Methods(http.MethodPost, http.MethodPut) // r.HandleFunc("/jobs/import/", api.importJob).Methods(http.MethodPost, http.MethodPut)
r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet)
r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost)
r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet)
r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch)
r.HandleFunc("/jobs/tag_job/{id}", api.removeTagJob).Methods(http.MethodDelete)
r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch) r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch)
r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet)
r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete)
r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete)
r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete)
r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) r.HandleFunc("/tags/", api.removeTags).Methods(http.MethodDelete)
if api.MachineStateDir != "" { if api.MachineStateDir != "" {
r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet)
@ -94,7 +98,7 @@ func (api *RestApi) MountApiRoutes(r *mux.Router) {
func (api *RestApi) MountUserApiRoutes(r *mux.Router) { func (api *RestApi) MountUserApiRoutes(r *mux.Router) {
r.StrictSlash(true) r.StrictSlash(true)
// REST API Uses TokenAuth
r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet)
r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost)
r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet)
@ -103,7 +107,7 @@ func (api *RestApi) MountUserApiRoutes(r *mux.Router) {
func (api *RestApi) MountConfigApiRoutes(r *mux.Router) { func (api *RestApi) MountConfigApiRoutes(r *mux.Router) {
r.StrictSlash(true) r.StrictSlash(true)
// Settings Frontend Uses SessionAuth
if api.Authentication != nil { if api.Authentication != nil {
r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet) r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet)
r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut) r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut)
@ -116,7 +120,7 @@ func (api *RestApi) MountConfigApiRoutes(r *mux.Router) {
func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) { func (api *RestApi) MountFrontendApiRoutes(r *mux.Router) {
r.StrictSlash(true) r.StrictSlash(true)
// Settings Frontrend Uses SessionAuth
if api.Authentication != nil { if api.Authentication != nil {
r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet) r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet)
r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost) r.HandleFunc("/configuration/", api.updateConfiguration).Methods(http.MethodPost)
@ -215,50 +219,12 @@ func handleError(err error, statusCode int, rw http.ResponseWriter) {
}) })
} }
func decode(r io.Reader, val interface{}) error { func decode(r io.Reader, val any) error {
dec := json.NewDecoder(r) dec := json.NewDecoder(r)
dec.DisallowUnknownFields() dec.DisallowUnknownFields()
return dec.Decode(val) return dec.Decode(val)
} }
func securedCheck(r *http.Request) error {
user := repository.GetUserFromContext(r.Context())
if user == nil {
return fmt.Errorf("no user in context")
}
if user.AuthType == schema.AuthToken {
// If nothing declared in config: deny all request to this endpoint
if config.Keys.ApiAllowedIPs == nil || len(config.Keys.ApiAllowedIPs) == 0 {
return fmt.Errorf("missing configuration key ApiAllowedIPs")
}
if config.Keys.ApiAllowedIPs[0] == "*" {
return nil
}
// extract IP address
IPAddress := r.Header.Get("X-Real-Ip")
if IPAddress == "" {
IPAddress = r.Header.Get("X-Forwarded-For")
}
if IPAddress == "" {
IPAddress = r.RemoteAddr
}
if strings.Contains(IPAddress, ":") {
IPAddress = strings.Split(IPAddress, ":")[0]
}
// check if IP is allowed
if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) {
return fmt.Errorf("unknown ip: %v", IPAddress)
}
}
return nil
}
// getClusters godoc // getClusters godoc
// @summary Lists all cluster configs // @summary Lists all cluster configs
// @tags Cluster query // @tags Cluster query
@ -271,7 +237,7 @@ func securedCheck(r *http.Request) error {
// @failure 403 {object} api.ErrorResponse "Forbidden" // @failure 403 {object} api.ErrorResponse "Forbidden"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /clusters/ [get] // @router /api/clusters/ [get]
func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) {
if user := repository.GetUserFromContext(r.Context()); user != nil && if user := repository.GetUserFromContext(r.Context()); user != nil &&
!user.HasRole(schema.RoleApi) { !user.HasRole(schema.RoleApi) {
@ -326,7 +292,7 @@ func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) {
// @failure 403 {object} api.ErrorResponse "Forbidden" // @failure 403 {object} api.ErrorResponse "Forbidden"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/ [get] // @router /api/jobs/ [get]
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
withMetadata := false withMetadata := false
filter := &model.JobFilter{} filter := &model.JobFilter{}
@ -460,7 +426,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/{id} [get] // @router /api/jobs/{id} [get]
func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) {
// Fetch job from db // Fetch job from db
id, ok := mux.Vars(r)["id"] id, ok := mux.Vars(r)["id"]
@ -553,7 +519,7 @@ func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request)
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/{id} [post] // @router /api/jobs/{id} [post]
func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
// Fetch job from db // Fetch job from db
id, ok := mux.Vars(r)["id"] id, ok := mux.Vars(r)["id"]
@ -657,7 +623,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
// @failure 404 {object} api.ErrorResponse "Job does not exist" // @failure 404 {object} api.ErrorResponse "Job does not exist"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/edit_meta/{id} [post] // @router /api/jobs/edit_meta/{id} [post]
func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
if err != nil { if err != nil {
@ -703,7 +669,7 @@ func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) {
// @failure 404 {object} api.ErrorResponse "Job or tag does not exist" // @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/tag_job/{id} [post] // @router /api/jobs/tag_job/{id} [post]
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
if err != nil { if err != nil {
@ -749,6 +715,114 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
json.NewEncoder(rw).Encode(job) json.NewEncoder(rw).Encode(job)
} }
// removeTagJob godoc
// @summary Removes one or more tags from a job
// @tags Job add and modify
// @description Removes tag(s) from a job specified by DB ID. Name and Type of Tag(s) must match.
// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
// @description If tagged job is already finished: Tag will be removed from respective archive files.
// @accept json
// @produce json
// @param id path int true "Job Database ID"
// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
// @success 200 {object} schema.Job "Updated job resource"
// @failure 400 {object} api.ErrorResponse "Bad Request"
// @failure 401 {object} api.ErrorResponse "Unauthorized"
// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth
// @router /jobs/tag_job/{id} [delete]
func (api *RestApi) removeTagJob(rw http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
if err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
job, err := api.JobRepository.FindById(r.Context(), id)
if err != nil {
http.Error(rw, err.Error(), http.StatusNotFound)
return
}
job.Tags, err = api.JobRepository.GetTags(repository.GetUserFromContext(r.Context()), &job.ID)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
var req TagJobApiRequest
if err := decode(r.Body, &req); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
for _, rtag := range req {
// Only Global and Admin Tags
if rtag.Scope != "global" && rtag.Scope != "admin" {
log.Warnf("Cannot delete private tag for job %d: Skip", job.JobID)
continue
}
remainingTags, err := api.JobRepository.RemoveJobTagByRequest(repository.GetUserFromContext(r.Context()), job.ID, rtag.Type, rtag.Name, rtag.Scope)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
job.Tags = remainingTags
}
rw.Header().Add("Content-Type", "application/json")
rw.WriteHeader(http.StatusOK)
json.NewEncoder(rw).Encode(job)
}
// removeTags godoc
// @summary Removes all tags and job-relations for type:name tuple
// @tags Tag remove
// @description Removes tags by type and name. Name and Type of Tag(s) must match.
// @description Tag Scope is required for matching, options: "global", "admin". Private tags can not be deleted via API.
// @description Tag wills be removed from respective archive files.
// @accept json
// @produce plain
// @param request body api.TagJobApiRequest true "Array of tag-objects to remove"
// @success 200 {string} string "Success Response"
// @failure 400 {object} api.ErrorResponse "Bad Request"
// @failure 401 {object} api.ErrorResponse "Unauthorized"
// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth
// @router /tags/ [delete]
func (api *RestApi) removeTags(rw http.ResponseWriter, r *http.Request) {
var req TagJobApiRequest
if err := decode(r.Body, &req); err != nil {
http.Error(rw, err.Error(), http.StatusBadRequest)
return
}
targetCount := len(req)
currentCount := 0
for _, rtag := range req {
// Only Global and Admin Tags
if rtag.Scope != "global" && rtag.Scope != "admin" {
log.Warn("Cannot delete private tag: Skip")
continue
}
err := api.JobRepository.RemoveTagByRequest(rtag.Type, rtag.Name, rtag.Scope)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
} else {
currentCount++
}
}
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(fmt.Sprintf("Deleted Tags from DB: %d successfull of %d requested\n", currentCount, targetCount)))
}
// startJob godoc // startJob godoc
// @summary Adds a new job as "running" // @summary Adds a new job as "running"
// @tags Job add and modify // @tags Job add and modify
@ -764,7 +838,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/start_job/ [post] // @router /api/jobs/start_job/ [post]
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
req := schema.JobMeta{BaseJob: schema.JobDefaults} req := schema.JobMeta{BaseJob: schema.JobDefaults}
if err := decode(r.Body, &req); err != nil { if err := decode(r.Body, &req); err != nil {
@ -837,7 +911,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: job has already been stopped" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: job has already been stopped"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/stop_job/ [post] // @router /api/jobs/stop_job/ [post]
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// Parse request body // Parse request body
req := StopJobApiRequest{} req := StopJobApiRequest{}
@ -878,7 +952,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/delete_job/{id} [delete] // @router /api/jobs/delete_job/{id} [delete]
func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// Fetch job (that will be stopped) from db // Fetch job (that will be stopped) from db
id, ok := mux.Vars(r)["id"] id, ok := mux.Vars(r)["id"]
@ -921,7 +995,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/delete_job/ [delete] // @router /api/jobs/delete_job/ [delete]
func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
// Parse request body // Parse request body
req := DeleteJobApiRequest{} req := DeleteJobApiRequest{}
@ -971,7 +1045,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" // @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
// @failure 500 {object} api.ErrorResponse "Internal Server Error" // @failure 500 {object} api.ErrorResponse "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /jobs/delete_job_before/{ts} [delete] // @router /api/jobs/delete_job_before/{ts} [delete]
func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
var cnt int var cnt int
// Fetch job (that will be stopped) from db // Fetch job (that will be stopped) from db
@ -1008,8 +1082,8 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
return return
} }
if job == nil || job.StartTime.Unix() >= req.StopTime { if job == nil || job.StartTime.Unix() > req.StopTime {
handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw) handleError(fmt.Errorf("jobId %d (id %d) on %s : stopTime %d must be larger/equal than startTime %d", job.JobID, job.ID, job.Cluster, req.StopTime, job.StartTime.Unix()), http.StatusBadRequest, rw)
return return
} }
@ -1089,33 +1163,8 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
}) })
} }
// createUser godoc
// @summary Adds a new user
// @tags User
// @description User specified in form data will be saved to database.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param username formData string true "Unique user ID"
// @param password formData string true "User password"
// @param role formData string true "User role" Enums(admin, support, manager, user, api)
// @param project formData string false "Managed project, required for new manager role user"
// @param name formData string false "Users name"
// @param email formData string false "Users email"
// @success 200 {string} string "Success Response"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: creating user failed"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /users/ [post]
func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
me := repository.GetUserFromContext(r.Context()) me := repository.GetUserFromContext(r.Context())
@ -1158,28 +1207,8 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
fmt.Fprintf(rw, "User %v successfully created!\n", username) fmt.Fprintf(rw, "User %v successfully created!\n", username)
} }
// deleteUser godoc
// @summary Deletes a user
// @tags User
// @description User defined by username in form data will be deleted from database.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param username formData string true "User ID to delete"
// @success 200 "User deleted successfully"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: deleting user failed"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /users/ [delete]
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden) http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden)
@ -1200,7 +1229,6 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
// @tags User // @tags User
// @description Returns a JSON-encoded list of users. // @description Returns a JSON-encoded list of users.
// @description Required query-parameter defines if all users or only users with additional special roles are returned. // @description Required query-parameter defines if all users or only users with additional special roles are returned.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @produce json // @produce json
// @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles" // @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles"
// @success 200 {array} api.ApiReturnedUser "List of users returned successfully" // @success 200 {array} api.ApiReturnedUser "List of users returned successfully"
@ -1209,13 +1237,9 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
// @failure 403 {string} string "Forbidden" // @failure 403 {string} string "Forbidden"
// @failure 500 {string} string "Internal Server Error" // @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth // @security ApiKeyAuth
// @router /users/ [get] // @router /api/users/ [get]
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden) http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden)
@ -1231,33 +1255,8 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
json.NewEncoder(rw).Encode(users) json.NewEncoder(rw).Encode(users)
} }
// updateUser godoc
// @summary Updates an existing user
// @tags User
// @description Modifies user defined by username (id) in one of four possible ways.
// @description If more than one formValue is set then only the highest priority field is used.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param id path string true "Database ID of User"
// @param add-role formData string false "Priority 1: Role to add" Enums(admin, support, manager, user, api)
// @param remove-role formData string false "Priority 2: Role to remove" Enums(admin, support, manager, user, api)
// @param add-project formData string false "Priority 3: Project to add"
// @param remove-project formData string false "Priority 4: Project to remove"
// @success 200 {string} string "Success Response Message"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: The user could not be updated"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /user/{id} [post]
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden) http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden)
@ -1300,29 +1299,8 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
} }
} }
// editNotice godoc
// @summary Updates or empties the notice box content
// @tags User
// @description Modifies the content of notice.txt, shown as notice box on the homepage.
// @description If more than one formValue is set then only the highest priority field is used.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param new-content formData string false "Priority 1: New content to display"
// @success 200 {string} string "Success Response Message"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: The user could not be updated"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /notice/ [post]
func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) { if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "Only admins are allowed to update the notice.txt file", http.StatusForbidden) http.Error(rw, "Only admins are allowed to update the notice.txt file", http.StatusForbidden)
@ -1364,12 +1342,6 @@ func (api *RestApi) editNotice(rw http.ResponseWriter, r *http.Request) {
} }
func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
username := r.FormValue("username") username := r.FormValue("username")
me := repository.GetUserFromContext(r.Context()) me := repository.GetUserFromContext(r.Context())
@ -1398,11 +1370,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
} }
func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r) // SecuredCheck() only worked with TokenAuth: Removed
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
user := repository.GetUserFromContext(r.Context()) user := repository.GetUserFromContext(r.Context())
if !user.HasRole(schema.RoleAdmin) { if !user.HasRole(schema.RoleAdmin) {
@ -1423,8 +1391,6 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Content-Type", "text/plain")
key, value := r.FormValue("key"), r.FormValue("value") key, value := r.FormValue("key"), r.FormValue("value")
// fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil { if err := repository.GetUserCfgRepo().UpdateConfig(key, value, repository.GetUserFromContext(r.Context())); err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity) http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return return

View File

@ -10,9 +10,11 @@ import (
"database/sql" "database/sql"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt"
"net" "net"
"net/http" "net/http"
"os" "os"
"strings"
"sync" "sync"
"time" "time"
@ -20,6 +22,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/config"
"github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/internal/repository"
"github.com/ClusterCockpit/cc-backend/internal/util"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
"github.com/gorilla/sessions" "github.com/gorilla/sessions"
@ -233,9 +236,9 @@ func (auth *Authentication) Login(
limiter := getIPUserLimiter(ip, username) limiter := getIPUserLimiter(ip, username)
if !limiter.Allow() { if !limiter.Allow() {
log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username) log.Warnf("AUTH/RATE > Too many login attempts for combination IP: %s, Username: %s", ip, username)
onfailure(rw, r, errors.New("Too many login attempts, try again in a few minutes.")) onfailure(rw, r, errors.New("Too many login attempts, try again in a few minutes."))
return return
} }
var dbUser *schema.User var dbUser *schema.User
@ -325,6 +328,14 @@ func (auth *Authentication) AuthApi(
onfailure(rw, r, err) onfailure(rw, r, err)
return return
} }
ipErr := securedCheck(user, r)
if ipErr != nil {
log.Infof("auth api -> secured check failed: %s", ipErr.Error())
onfailure(rw, r, ipErr)
return
}
if user != nil { if user != nil {
switch { switch {
case len(user.Roles) == 1: case len(user.Roles) == 1:
@ -360,6 +371,7 @@ func (auth *Authentication) AuthUserApi(
onfailure(rw, r, err) onfailure(rw, r, err)
return return
} }
if user != nil { if user != nil {
switch { switch {
case len(user.Roles) == 1: case len(user.Roles) == 1:
@ -445,3 +457,38 @@ func (auth *Authentication) Logout(onsuccess http.Handler) http.Handler {
onsuccess.ServeHTTP(rw, r) onsuccess.ServeHTTP(rw, r)
}) })
} }
// Helper Moved To MiddleWare Auth Handlers
func securedCheck(user *schema.User, r *http.Request) error {
if user == nil {
return fmt.Errorf("no user for secured check")
}
// extract IP address for checking
IPAddress := r.Header.Get("X-Real-Ip")
if IPAddress == "" {
IPAddress = r.Header.Get("X-Forwarded-For")
}
if IPAddress == "" {
IPAddress = r.RemoteAddr
}
if strings.Contains(IPAddress, ":") {
IPAddress = strings.Split(IPAddress, ":")[0]
}
// If nothing declared in config: deny all request to this api endpoint
if len(config.Keys.ApiAllowedIPs) == 0 {
return fmt.Errorf("missing configuration key ApiAllowedIPs")
}
// If wildcard declared in config: Continue
if config.Keys.ApiAllowedIPs[0] == "*" {
return nil
}
// check if IP is allowed
if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) {
return fmt.Errorf("unknown ip: %v", IPAddress)
}
return nil
}

View File

@ -7,9 +7,9 @@ package config
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"log"
"os" "os"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/schema"
) )
@ -53,20 +53,20 @@ func Init(flagConfigFile string) {
raw, err := os.ReadFile(flagConfigFile) raw, err := os.ReadFile(flagConfigFile)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
log.Fatalf("CONFIG ERROR: %v", err) log.Abortf("Config Init: Could not read config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
} }
} else { } else {
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil { if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
log.Fatalf("Validate config: %v\n", err) log.Abortf("Config Init: Could not validate config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
} }
dec := json.NewDecoder(bytes.NewReader(raw)) dec := json.NewDecoder(bytes.NewReader(raw))
dec.DisallowUnknownFields() dec.DisallowUnknownFields()
if err := dec.Decode(&Keys); err != nil { if err := dec.Decode(&Keys); err != nil {
log.Fatalf("could not decode: %v", err) log.Abortf("Config Init: Could not decode config file '%s'.\nError: %s\n", flagConfigFile, err.Error())
} }
if Keys.Clusters == nil || len(Keys.Clusters) < 1 { if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
log.Fatal("At least one cluster required in config!") log.Abort("Config Init: At least one cluster required in config. Exited with error.")
} }
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -50,6 +50,7 @@ type IntRangeOutput struct {
type JobFilter struct { type JobFilter struct {
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitempty"`
DbID []string `json:"dbId,omitempty"`
JobID *StringInput `json:"jobId,omitempty"` JobID *StringInput `json:"jobId,omitempty"`
ArrayJobID *int `json:"arrayJobId,omitempty"` ArrayJobID *int `json:"arrayJobId,omitempty"`
User *StringInput `json:"user,omitempty"` User *StringInput `json:"user,omitempty"`
@ -81,11 +82,6 @@ type JobLinkResultList struct {
Count *int `json:"count,omitempty"` Count *int `json:"count,omitempty"`
} }
type JobMetricStatWithName struct {
Name string `json:"name"`
Stats *schema.MetricStatistics `json:"stats"`
}
type JobMetricWithName struct { type JobMetricWithName struct {
Name string `json:"name"` Name string `json:"name"`
Scope schema.MetricScope `json:"scope"` Scope schema.MetricScope `json:"scope"`
@ -100,6 +96,19 @@ type JobResultList struct {
HasNextPage *bool `json:"hasNextPage,omitempty"` HasNextPage *bool `json:"hasNextPage,omitempty"`
} }
type JobStats struct {
ID int `json:"id"`
JobID string `json:"jobId"`
StartTime int `json:"startTime"`
Duration int `json:"duration"`
Cluster string `json:"cluster"`
SubCluster string `json:"subCluster"`
NumNodes int `json:"numNodes"`
NumHWThreads *int `json:"numHWThreads,omitempty"`
NumAccelerators *int `json:"numAccelerators,omitempty"`
Stats []*NamedStats `json:"stats"`
}
type JobsStatistics struct { type JobsStatistics struct {
ID string `json:"id"` ID string `json:"id"`
Name string `json:"name"` Name string `json:"name"`
@ -147,6 +156,17 @@ type MetricStatItem struct {
type Mutation struct { type Mutation struct {
} }
type NamedStats struct {
Name string `json:"name"`
Data *schema.MetricStatistics `json:"data"`
}
type NamedStatsWithScope struct {
Name string `json:"name"`
Scope schema.MetricScope `json:"scope"`
Stats []*ScopedStats `json:"stats"`
}
type NodeMetrics struct { type NodeMetrics struct {
Host string `json:"host"` Host string `json:"host"`
SubCluster string `json:"subCluster"` SubCluster string `json:"subCluster"`
@ -173,6 +193,12 @@ type PageRequest struct {
Page int `json:"page"` Page int `json:"page"`
} }
type ScopedStats struct {
Hostname string `json:"hostname"`
ID *string `json:"id,omitempty"`
Data *schema.MetricStatistics `json:"data"`
}
type StringInput struct { type StringInput struct {
Eq *string `json:"eq,omitempty"` Eq *string `json:"eq,omitempty"`
Neq *string `json:"neq,omitempty"` Neq *string `json:"neq,omitempty"`

View File

@ -125,23 +125,41 @@ func (r *metricValueResolver) Name(ctx context.Context, obj *schema.MetricValue)
// CreateTag is the resolver for the createTag field. // CreateTag is the resolver for the createTag field.
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) { func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string, scope string) (*schema.Tag, error) {
id, err := r.Repo.CreateTag(typeArg, name, scope) user := repository.GetUserFromContext(ctx)
if err != nil { if user == nil {
log.Warn("Error while creating tag") return nil, fmt.Errorf("no user in context")
return nil, err
} }
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil // Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
if user.HasRole(schema.RoleAdmin) && scope == "admin" ||
user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && scope == "global" ||
user.Username == scope {
// Create in DB
id, err := r.Repo.CreateTag(typeArg, name, scope)
if err != nil {
log.Warn("Error while creating tag")
return nil, err
}
return &schema.Tag{ID: id, Type: typeArg, Name: name, Scope: scope}, nil
} else {
log.Warnf("Not authorized to create tag with scope: %s", scope)
return nil, fmt.Errorf("Not authorized to create tag with scope: %s", scope)
}
} }
// DeleteTag is the resolver for the deleteTag field. // DeleteTag is the resolver for the deleteTag field.
func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) { func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) {
// This Uses ID string <-> ID string, removeTagFromList uses []string <-> []int
panic(fmt.Errorf("not implemented: DeleteTag - deleteTag")) panic(fmt.Errorf("not implemented: DeleteTag - deleteTag"))
} }
// AddTagsToJob is the resolver for the addTagsToJob field. // AddTagsToJob is the resolver for the addTagsToJob field.
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
// Selectable Tags Pre-Filtered by Scope in Frontend: No backend check required user := repository.GetUserFromContext(ctx)
if user == nil {
return nil, fmt.Errorf("no user in context")
}
jid, err := strconv.ParseInt(job, 10, 64) jid, err := strconv.ParseInt(job, 10, 64)
if err != nil { if err != nil {
log.Warn("Error while adding tag to job") log.Warn("Error while adding tag to job")
@ -150,15 +168,32 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
tags := []*schema.Tag{} tags := []*schema.Tag{}
for _, tagId := range tagIds { for _, tagId := range tagIds {
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64) tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil { if err != nil {
log.Warn("Error while parsing tag id") log.Warn("Error while parsing tag id")
return nil, err return nil, err
} }
if tags, err = r.Repo.AddTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { // Test Exists
log.Warn("Error while adding tag") _, _, tscope, exists := r.Repo.TagInfo(tid)
return nil, err if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
}
// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
if user.HasRole(schema.RoleAdmin) && tscope == "admin" ||
user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" ||
user.Username == tscope {
// Add to Job
if tags, err = r.Repo.AddTag(user, jid, tid); err != nil {
log.Warn("Error while adding tag")
return nil, err
}
} else {
log.Warnf("Not authorized to add tag: %d", tid)
return nil, fmt.Errorf("Not authorized to add tag: %d", tid)
} }
} }
@ -167,7 +202,11 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
// RemoveTagsFromJob is the resolver for the removeTagsFromJob field. // RemoveTagsFromJob is the resolver for the removeTagsFromJob field.
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) { func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
// Removable Tags Pre-Filtered by Scope in Frontend: No backend check required user := repository.GetUserFromContext(ctx)
if user == nil {
return nil, fmt.Errorf("no user in context")
}
jid, err := strconv.ParseInt(job, 10, 64) jid, err := strconv.ParseInt(job, 10, 64)
if err != nil { if err != nil {
log.Warn("Error while parsing job id") log.Warn("Error while parsing job id")
@ -176,21 +215,80 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
tags := []*schema.Tag{} tags := []*schema.Tag{}
for _, tagId := range tagIds { for _, tagId := range tagIds {
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64) tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil { if err != nil {
log.Warn("Error while parsing tag id") log.Warn("Error while parsing tag id")
return nil, err return nil, err
} }
if tags, err = r.Repo.RemoveTag(repository.GetUserFromContext(ctx), jid, tid); err != nil { // Test Exists
log.Warn("Error while removing tag") _, _, tscope, exists := r.Repo.TagInfo(tid)
return nil, err if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
} }
// Test Access: Admins && Admin Tag OR Support/Admin and Global Tag OR Everyone && Private Tag
if user.HasRole(schema.RoleAdmin) && tscope == "admin" ||
user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport}) && tscope == "global" ||
user.Username == tscope {
// Remove from Job
if tags, err = r.Repo.RemoveTag(user, jid, tid); err != nil {
log.Warn("Error while removing tag")
return nil, err
}
} else {
log.Warnf("Not authorized to remove tag: %d", tid)
return nil, fmt.Errorf("Not authorized to remove tag: %d", tid)
}
} }
return tags, nil return tags, nil
} }
// RemoveTagFromList is the resolver for the removeTagFromList field.
func (r *mutationResolver) RemoveTagFromList(ctx context.Context, tagIds []string) ([]int, error) {
// Needs Contextuser
user := repository.GetUserFromContext(ctx)
if user == nil {
return nil, fmt.Errorf("no user in context")
}
tags := []int{}
for _, tagId := range tagIds {
// Get ID
tid, err := strconv.ParseInt(tagId, 10, 64)
if err != nil {
log.Warn("Error while parsing tag id for removal")
return nil, err
}
// Test Exists
_, _, tscope, exists := r.Repo.TagInfo(tid)
if !exists {
log.Warnf("Tag does not exist (ID): %d", tid)
return nil, fmt.Errorf("Tag does not exist (ID): %d", tid)
}
// Test Access: Admins && Admin Tag OR Everyone && Private Tag
if user.HasRole(schema.RoleAdmin) && (tscope == "global" || tscope == "admin") || user.Username == tscope {
// Remove from DB
if err = r.Repo.RemoveTagById(tid); err != nil {
log.Warn("Error while removing tag")
return nil, err
} else {
tags = append(tags, int(tid))
}
} else {
log.Warnf("Not authorized to remove tag: %d", tid)
return nil, fmt.Errorf("Not authorized to remove tag: %d", tid)
}
}
return tags, nil
}
// UpdateConfiguration is the resolver for the updateConfiguration field. // UpdateConfiguration is the resolver for the updateConfiguration field.
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) { func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil { if err := repository.GetUserCfgRepo().UpdateConfig(name, value, repository.GetUserFromContext(ctx)); err != nil {
@ -301,36 +399,67 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str
return res, err return res, err
} }
// JobMetricStats is the resolver for the jobMetricStats field. // JobStats is the resolver for the jobStats field.
func (r *queryResolver) JobMetricStats(ctx context.Context, id string, metrics []string) ([]*model.JobMetricStatWithName, error) { func (r *queryResolver) JobStats(ctx context.Context, id string, metrics []string) ([]*model.NamedStats, error) {
job, err := r.Query().Job(ctx, id) job, err := r.Query().Job(ctx, id)
if err != nil { if err != nil {
log.Warn("Error while querying job for metrics") log.Warnf("Error while querying job %s for metadata", id)
return nil, err return nil, err
} }
data, err := metricDataDispatcher.LoadStatData(job, metrics, ctx) data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil { if err != nil {
log.Warn("Error while loading job stat data") log.Warnf("Error while loading jobStats data for job id %s", id)
return nil, err return nil, err
} }
res := []*model.JobMetricStatWithName{} res := []*model.NamedStats{}
for name, md := range data { for name, md := range data {
res = append(res, &model.JobMetricStatWithName{ res = append(res, &model.NamedStats{
Name: name, Name: name,
Stats: &md, Data: &md,
}) })
} }
return res, err return res, err
} }
// JobsFootprints is the resolver for the jobsFootprints field. // ScopedJobStats is the resolver for the scopedJobStats field.
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) { func (r *queryResolver) ScopedJobStats(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.NamedStatsWithScope, error) {
// NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column! job, err := r.Query().Job(ctx, id)
return r.jobsFootprints(ctx, filter, metrics) if err != nil {
log.Warnf("Error while querying job %s for metadata", id)
return nil, err
}
data, err := metricDataDispatcher.LoadScopedJobStats(job, metrics, scopes, ctx)
if err != nil {
log.Warnf("Error while loading scopedJobStats data for job id %s", id)
return nil, err
}
res := make([]*model.NamedStatsWithScope, 0)
for name, scoped := range data {
for scope, stats := range scoped {
mdlStats := make([]*model.ScopedStats, 0)
for _, stat := range stats {
mdlStats = append(mdlStats, &model.ScopedStats{
Hostname: stat.Hostname,
ID: stat.Id,
Data: stat.Data,
})
}
res = append(res, &model.NamedStatsWithScope{
Name: name,
Scope: scope,
Stats: mdlStats,
})
}
}
return res, nil
} }
// Jobs is the resolver for the jobs field. // Jobs is the resolver for the jobs field.
@ -354,30 +483,28 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
return nil, err return nil, err
} }
if !config.Keys.UiDefaults["job_list_usePaging"].(bool) { // Note: Even if App-Default 'config.Keys.UiDefaults["job_list_usePaging"]' is set, always return hasNextPage boolean.
hasNextPage := false // Users can decide in frontend to use continuous scroll, even if app-default is paging!
// page.Page += 1 : Simple, but expensive /*
// Example Page 4 @ 10 IpP : Does item 41 exist? Example Page 4 @ 10 IpP : Does item 41 exist?
// Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists. Minimal Page 41 @ 1 IpP : If len(result) is 1, Page 5 @ 10 IpP exists.
nextPage := &model.PageRequest{ */
ItemsPerPage: 1, nextPage := &model.PageRequest{
Page: ((page.Page * page.ItemsPerPage) + 1), ItemsPerPage: 1,
} Page: ((page.Page * page.ItemsPerPage) + 1),
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
if err != nil {
log.Warn("Error while querying next jobs")
return nil, err
}
if len(nextJobs) == 1 {
hasNextPage = true
}
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
} else {
return &model.JobResultList{Items: jobs, Count: &count}, nil
} }
nextJobs, err := r.Repo.QueryJobs(ctx, filter, nextPage, order)
if err != nil {
log.Warn("Error while querying next jobs")
return nil, err
}
hasNextPage := false
if len(nextJobs) == 1 {
hasNextPage = true
}
return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil
} }
// JobsStatistics is the resolver for the jobsStatistics field. // JobsStatistics is the resolver for the jobsStatistics field.
@ -456,6 +583,62 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
return stats, nil return stats, nil
} }
// JobsMetricStats is the resolver for the jobsMetricStats field.
func (r *queryResolver) JobsMetricStats(ctx context.Context, filter []*model.JobFilter, metrics []string) ([]*model.JobStats, error) {
// No Paging, Fixed Order by StartTime ASC
order := &model.OrderByInput{
Field: "startTime",
Type: "col",
Order: "ASC",
}
jobs, err := r.Repo.QueryJobs(ctx, filter, nil, order)
if err != nil {
log.Warn("Error while querying jobs for comparison")
return nil, err
}
res := []*model.JobStats{}
for _, job := range jobs {
data, err := metricDataDispatcher.LoadJobStats(job, metrics, ctx)
if err != nil {
log.Warnf("Error while loading comparison jobStats data for job id %d", job.JobID)
continue
// return nil, err
}
sres := []*model.NamedStats{}
for name, md := range data {
sres = append(sres, &model.NamedStats{
Name: name,
Data: &md,
})
}
numThreadsInt := int(job.NumHWThreads)
numAccsInt := int(job.NumAcc)
res = append(res, &model.JobStats{
ID: int(job.ID),
JobID: strconv.Itoa(int(job.JobID)),
StartTime: int(job.StartTime.Unix()),
Duration: int(job.Duration),
Cluster: job.Cluster,
SubCluster: job.SubCluster,
NumNodes: int(job.NumNodes),
NumHWThreads: &numThreadsInt,
NumAccelerators: &numAccsInt,
Stats: sres,
})
}
return res, err
}
// JobsFootprints is the resolver for the jobsFootprints field.
func (r *queryResolver) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
// NOTE: Legacy Naming! This resolver is for normalized histograms in analysis view only - *Not* related to DB "footprint" column!
return r.jobsFootprints(ctx, filter, metrics)
}
// RooflineHeatmap is the resolver for the rooflineHeatmap field. // RooflineHeatmap is the resolver for the rooflineHeatmap field.
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) { func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY) return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)

View File

@ -96,27 +96,35 @@ func HandleImportFlag(flag string) error {
} }
job.EnergyFootprint = make(map[string]float64) job.EnergyFootprint = make(map[string]float64)
var totalEnergy float64
var energy float64
// Total Job Energy Outside Loop
totalEnergy := 0.0
for _, fp := range sc.EnergyFootprint { for _, fp := range sc.EnergyFootprint {
// Always Init Metric Energy Inside Loop
metricEnergy := 0.0
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh // Note: For DB data, calculate and save as kWh
// Energy: Power (in Watts) * Time (in Seconds)
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", job.JobID, job.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits // Energy: Power (in Watts) * Time (in Seconds)
energy = math.Round(((repository.LoadJobStat(&job, fp, "avg")*float64(job.Duration))/3600/1000)*100) / 100 // Unit: (W * (s / 3600)) / 1000 = kWh
// Round 2 Digits: round(Energy * 100) / 100
// Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000
// Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1
rawEnergy := ((repository.LoadJobStat(&job, fp, "avg") * float64(job.NumNodes)) * (float64(job.Duration) / 3600.0)) / 1000.0
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
} }
} else { } else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID) log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, job.ID)
} }
job.EnergyFootprint[fp] = energy job.EnergyFootprint[fp] = metricEnergy
totalEnergy += energy totalEnergy += metricEnergy
} }
job.Energy = (math.Round(totalEnergy*100) / 100) job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID) log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", job.ID)
return err return err

View File

@ -45,6 +45,9 @@ func setup(t *testing.T) *repository.JobRepository {
"jwts": { "jwts": {
"max-age": "2m" "max-age": "2m"
}, },
"apiAllowedIPs": [
"*"
],
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",

View File

@ -93,27 +93,35 @@ func InitDB() error {
} }
job.EnergyFootprint = make(map[string]float64) job.EnergyFootprint = make(map[string]float64)
var totalEnergy float64
var energy float64
// Total Job Energy Outside Loop
totalEnergy := 0.0
for _, fp := range sc.EnergyFootprint { for _, fp := range sc.EnergyFootprint {
// Always Init Metric Energy Inside Loop
metricEnergy := 0.0
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh // Note: For DB data, calculate and save as kWh
// Energy: Power (in Watts) * Time (in Seconds)
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Unit: ( W * s ) / 3600 / 1000 = kWh ; Rounded to 2 nearest digits // Energy: Power (in Watts) * Time (in Seconds)
energy = math.Round(((repository.LoadJobStat(jobMeta, fp, "avg")*float64(jobMeta.Duration))/3600/1000)*100) / 100 // Unit: (W * (s / 3600)) / 1000 = kWh
// Round 2 Digits: round(Energy * 100) / 100
// Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000
// Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1
rawEnergy := ((repository.LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0
metricEnergy = math.Round(rawEnergy*100.0) / 100.0
} }
} else { } else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
} }
job.EnergyFootprint[fp] = energy job.EnergyFootprint[fp] = metricEnergy
totalEnergy += energy totalEnergy += metricEnergy
} }
job.Energy = (math.Round(totalEnergy*100) / 100) job.Energy = (math.Round(totalEnergy*100.0) / 100.0)
if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil { if job.RawEnergyFootprint, err = json.Marshal(job.EnergyFootprint); err != nil {
log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID) log.Warnf("Error while marshaling energy footprint for job INTO BYTES, DB ID '%v'", jobMeta.ID)
return err return err

View File

@ -224,8 +224,34 @@ func LoadAverages(
return nil return nil
} }
// Used for polar plots in frontend // Used for statsTable in frontend: Return scoped statistics by metric.
func LoadStatData( func LoadScopedJobStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
if job.State != schema.JobStateRunning && !config.Keys.DisableArchive {
return archive.LoadScopedStatsFromArchive(job, metrics, scopes)
}
repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil {
return nil, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
}
scopedStats, err := repo.LoadScopedStats(job, metrics, scopes, ctx)
if err != nil {
log.Errorf("error while loading scoped statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return nil, err
}
return scopedStats, nil
}
// Used for polar plots in frontend: Aggregates statistics for all nodes to single values for job per metric.
func LoadJobStats(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
ctx context.Context, ctx context.Context,
@ -237,12 +263,12 @@ func LoadStatData(
data := make(map[string]schema.MetricStatistics, len(metrics)) data := make(map[string]schema.MetricStatistics, len(metrics))
repo, err := metricdata.GetMetricDataRepo(job.Cluster) repo, err := metricdata.GetMetricDataRepo(job.Cluster)
if err != nil { if err != nil {
return data, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster) return data, fmt.Errorf("job %d: no metric data repository configured for '%s'", job.JobID, job.Cluster)
} }
stats, err := repo.LoadStats(job, metrics, ctx) stats, err := repo.LoadStats(job, metrics, ctx)
if err != nil { if err != nil {
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project) log.Errorf("error while loading statistics for job %d (User %s, Project %s)", job.JobID, job.User, job.Project)
return data, err return data, err
} }

View File

@ -129,13 +129,13 @@ func (ccms *CCMetricStore) doRequest(
) (*ApiQueryResponse, error) { ) (*ApiQueryResponse, error) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(body); err != nil { if err := json.NewEncoder(buf).Encode(body); err != nil {
log.Warn("Error while encoding request body") log.Errorf("Error while encoding request body: %s", err.Error())
return nil, err return nil, err
} }
req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf) req, err := http.NewRequestWithContext(ctx, http.MethodGet, ccms.queryEndpoint, buf)
if err != nil { if err != nil {
log.Warn("Error while building request body") log.Errorf("Error while building request body: %s", err.Error())
return nil, err return nil, err
} }
if ccms.jwt != "" { if ccms.jwt != "" {
@ -151,7 +151,7 @@ func (ccms *CCMetricStore) doRequest(
res, err := ccms.client.Do(req) res, err := ccms.client.Do(req)
if err != nil { if err != nil {
log.Error("Error while performing request") log.Errorf("Error while performing request: %s", err.Error())
return nil, err return nil, err
} }
@ -161,7 +161,7 @@ func (ccms *CCMetricStore) doRequest(
var resBody ApiQueryResponse var resBody ApiQueryResponse
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil { if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
log.Warn("Error while decoding result body") log.Errorf("Error while decoding result body: %s", err.Error())
return nil, err return nil, err
} }
@ -177,7 +177,7 @@ func (ccms *CCMetricStore) LoadData(
) (schema.JobData, error) { ) (schema.JobData, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution) queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, resolution)
if err != nil { if err != nil {
log.Warn("Error while building queries") log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err return nil, err
} }
@ -192,7 +192,7 @@ func (ccms *CCMetricStore) LoadData(
resBody, err := ccms.doRequest(ctx, &req) resBody, err := ccms.doRequest(ctx, &req)
if err != nil { if err != nil {
log.Error("Error while performing request") log.Errorf("Error while performing request: %s", err.Error())
return nil, err return nil, err
} }
@ -302,6 +302,20 @@ func (ccms *CCMetricStore) buildQueries(
continue continue
} }
// Skip if metric is removed for subcluster
if len(mc.SubClusters) != 0 {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == job.SubCluster && scConfig.Remove == true {
isRemoved = true
break
}
}
if isRemoved {
continue
}
}
// Avoid duplicates... // Avoid duplicates...
handledScopes := make([]schema.MetricScope, 0, 3) handledScopes := make([]schema.MetricScope, 0, 3)
@ -557,16 +571,9 @@ func (ccms *CCMetricStore) LoadStats(
ctx context.Context, ctx context.Context,
) (map[string]map[string]schema.MetricStatistics, error) { ) (map[string]map[string]schema.MetricStatistics, error) {
// metricConfigs := archive.GetCluster(job.Cluster).MetricConfig
// resolution := 9000
// for _, mc := range metricConfigs {
// resolution = min(resolution, mc.Timestep)
// }
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization? queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, 0) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil { if err != nil {
log.Warn("Error while building query") log.Errorf("Error while building queries for jobId %d, Metrics %v: %s", job.JobID, metrics, err.Error())
return nil, err return nil, err
} }
@ -581,7 +588,7 @@ func (ccms *CCMetricStore) LoadStats(
resBody, err := ccms.doRequest(ctx, &req) resBody, err := ccms.doRequest(ctx, &req)
if err != nil { if err != nil {
log.Error("Error while performing request") log.Errorf("Error while performing request: %s", err.Error())
return nil, err return nil, err
} }
@ -591,9 +598,8 @@ func (ccms *CCMetricStore) LoadStats(
metric := ccms.toLocalName(query.Metric) metric := ccms.toLocalName(query.Metric)
data := res[0] data := res[0]
if data.Error != nil { if data.Error != nil {
log.Infof("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error) log.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
continue continue
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
} }
metricdata, ok := stats[metric] metricdata, ok := stats[metric]
@ -603,9 +609,8 @@ func (ccms *CCMetricStore) LoadStats(
} }
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() { if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
log.Infof("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname) log.Warnf("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
continue continue
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
} }
metricdata[query.Hostname] = schema.MetricStatistics{ metricdata[query.Hostname] = schema.MetricStatistics{
@ -618,7 +623,98 @@ func (ccms *CCMetricStore) LoadStats(
return stats, nil return stats, nil
} }
// TODO: Support sub-node-scope metrics! For this, the partition of a node needs to be known! // Used for Job-View Statistics Table
func (ccms *CCMetricStore) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context,
) (schema.ScopedJobStats, error) {
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes, 0)
if err != nil {
log.Errorf("Error while building queries for jobId %d, Metrics %v, Scopes %v: %s", job.JobID, metrics, scopes, err.Error())
return nil, err
}
req := ApiQueryRequest{
Cluster: job.Cluster,
From: job.StartTime.Unix(),
To: job.StartTime.Add(time.Duration(job.Duration) * time.Second).Unix(),
Queries: queries,
WithStats: true,
WithData: false,
}
resBody, err := ccms.doRequest(ctx, &req)
if err != nil {
log.Errorf("Error while performing request: %s", err.Error())
return nil, err
}
var errors []string
scopedJobStats := make(schema.ScopedJobStats)
for i, row := range resBody.Results {
query := req.Queries[i]
metric := ccms.toLocalName(query.Metric)
scope := assignedScope[i]
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for ndx, res := range row {
if res.Error != nil {
/* Build list for "partial errors", if any */
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
continue
}
id := (*string)(nil)
if query.Type != nil {
id = new(string)
*id = query.TypeIds[ndx]
}
if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() {
// "schema.Float()" because regular float64 can not be JSONed when NaN.
res.Avg = schema.Float(0)
res.Min = schema.Float(0)
res.Max = schema.Float(0)
}
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: query.Hostname,
Id: id,
Data: &schema.MetricStatistics{
Avg: float64(res.Avg),
Min: float64(res.Min),
Max: float64(res.Max),
},
})
}
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
if len(scopedJobStats[metric][scope]) == 0 {
delete(scopedJobStats[metric], scope)
if len(scopedJobStats[metric]) == 0 {
delete(scopedJobStats, metric)
}
}
}
if len(errors) != 0 {
/* Returns list for "partial errors" */
return scopedJobStats, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
}
return scopedJobStats, nil
}
// Used for Systems-View Node-Overview
func (ccms *CCMetricStore) LoadNodeData( func (ccms *CCMetricStore) LoadNodeData(
cluster string, cluster string,
metrics, nodes []string, metrics, nodes []string,
@ -652,7 +748,7 @@ func (ccms *CCMetricStore) LoadNodeData(
resBody, err := ccms.doRequest(ctx, &req) resBody, err := ccms.doRequest(ctx, &req)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) log.Errorf("Error while performing request: %s", err.Error())
return nil, err return nil, err
} }
@ -710,6 +806,7 @@ func (ccms *CCMetricStore) LoadNodeData(
return data, nil return data, nil
} }
// Used for Systems-View Node-List
func (ccms *CCMetricStore) LoadNodeListData( func (ccms *CCMetricStore) LoadNodeListData(
cluster, subCluster, nodeFilter string, cluster, subCluster, nodeFilter string,
metrics []string, metrics []string,
@ -768,7 +865,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution) queries, assignedScope, err := ccms.buildNodeQueries(cluster, subCluster, nodes, metrics, scopes, resolution)
if err != nil { if err != nil {
log.Warn("Error while building queries") log.Errorf("Error while building node queries for Cluster %s, SubCLuster %s, Metrics %v, Scopes %v: %s", cluster, subCluster, metrics, scopes, err.Error())
return nil, totalNodes, hasNextPage, err return nil, totalNodes, hasNextPage, err
} }
@ -783,7 +880,7 @@ func (ccms *CCMetricStore) LoadNodeListData(
resBody, err := ccms.doRequest(ctx, &req) resBody, err := ccms.doRequest(ctx, &req)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Error while performing request %#v\n", err)) log.Errorf("Error while performing request: %s", err.Error())
return nil, totalNodes, hasNextPage, err return nil, totalNodes, hasNextPage, err
} }
@ -888,7 +985,7 @@ func (ccms *CCMetricStore) buildNodeQueries(
if subCluster != "" { if subCluster != "" {
subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster) subClusterTopol, scterr = archive.GetSubCluster(cluster, subCluster)
if scterr != nil { if scterr != nil {
// TODO: Log log.Errorf("could not load cluster %s subCluster %s topology: %s", cluster, subCluster, scterr.Error())
return nil, nil, scterr return nil, nil, scterr
} }
} }
@ -898,10 +995,24 @@ func (ccms *CCMetricStore) buildNodeQueries(
mc := archive.GetMetricConfig(cluster, metric) mc := archive.GetMetricConfig(cluster, metric)
if mc == nil { if mc == nil {
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster) // return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, cluster)
log.Infof("metric '%s' is not specified for cluster '%s'", metric, cluster) log.Warnf("metric '%s' is not specified for cluster '%s'", metric, cluster)
continue continue
} }
// Skip if metric is removed for subcluster
if mc.SubClusters != nil {
isRemoved := false
for _, scConfig := range mc.SubClusters {
if scConfig.Name == subCluster && scConfig.Remove == true {
isRemoved = true
break
}
}
if isRemoved {
continue
}
}
// Avoid duplicates... // Avoid duplicates...
handledScopes := make([]schema.MetricScope, 0, 3) handledScopes := make([]schema.MetricScope, 0, 3)

View File

@ -10,6 +10,8 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"math"
"sort"
"strings" "strings"
"time" "time"
@ -64,6 +66,8 @@ func (idb *InfluxDBv2DataRepository) LoadData(
ctx context.Context, ctx context.Context,
resolution int) (schema.JobData, error) { resolution int) (schema.JobData, error) {
log.Infof("InfluxDB 2 Backend: Resolution Scaling not Implemented, will return default timestep. Requested Resolution %d", resolution)
measurementsConds := make([]string, 0, len(metrics)) measurementsConds := make([]string, 0, len(metrics))
for _, m := range metrics { for _, m := range metrics {
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m)) measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
@ -86,7 +90,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
query := "" query := ""
switch scope { switch scope {
case "node": case "node":
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows <-- Resolution could be added here?
// log.Info("Scope 'node' requested. ") // log.Info("Scope 'node' requested. ")
query = fmt.Sprintf(` query = fmt.Sprintf(`
from(bucket: "%s") from(bucket: "%s")
@ -116,6 +120,12 @@ func (idb *InfluxDBv2DataRepository) LoadData(
// idb.bucket, // idb.bucket,
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )), // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
// measurementsCond, hostsCond) // measurementsCond, hostsCond)
case "hwthread":
log.Info(" Scope 'hwthread' requested, but not yet supported: Will return 'node' scope only. ")
continue
case "accelerator":
log.Info(" Scope 'accelerator' requested, but not yet supported: Will return 'node' scope only. ")
continue
default: default:
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope) log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
continue continue
@ -173,6 +183,11 @@ func (idb *InfluxDBv2DataRepository) LoadData(
} }
case "socket": case "socket":
continue continue
case "accelerator":
continue
case "hwthread":
// See below @ core
continue
case "core": case "core":
continue continue
// Include Series.Id in hostSeries // Include Series.Id in hostSeries
@ -301,6 +316,53 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
return stats, nil return stats, nil
} }
// Used in Job-View StatsTable
// UNTESTED
func (idb *InfluxDBv2DataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
// Assumption: idb.loadData() only returns series node-scope - use node scope for statsTable
scopedJobStats := make(schema.ScopedJobStats)
data, err := idb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
log.Warn("Error while loading job for scopedJobStats")
return nil, err
}
for metric, metricData := range data {
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for _, series := range metricData[scope].Series {
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: series.Hostname,
Data: &series.Statistics,
})
}
}
}
return scopedJobStats, nil
}
// Used in Systems-View @ Node-Overview
// UNTESTED
func (idb *InfluxDBv2DataRepository) LoadNodeData( func (idb *InfluxDBv2DataRepository) LoadNodeData(
cluster string, cluster string,
metrics, nodes []string, metrics, nodes []string,
@ -308,12 +370,123 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
from, to time.Time, from, to time.Time,
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
// TODO : Implement to be used in Analysis- und System/Node-View // Note: scopes[] Array will be ignored, only return node scope
log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") // CONVERT ARGS TO INFLUX
measurementsConds := make([]string, 0)
for _, m := range metrics {
measurementsConds = append(measurementsConds, fmt.Sprintf(`r["_measurement"] == "%s"`, m))
}
measurementsCond := strings.Join(measurementsConds, " or ")
hostsConds := make([]string, 0)
if nodes == nil {
var allNodes []string
subClusterNodeLists := archive.NodeLists[cluster]
for _, nodeList := range subClusterNodeLists {
allNodes = append(nodes, nodeList.PrintList()...)
}
for _, node := range allNodes {
nodes = append(nodes, node)
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
}
} else {
for _, node := range nodes {
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, node))
}
}
hostsCond := strings.Join(hostsConds, " or ")
// BUILD AND PERFORM QUERY
query := fmt.Sprintf(`
from(bucket: "%s")
|> range(start: %s, stop: %s)
|> filter(fn: (r) => (%s) and (%s) )
|> drop(columns: ["_start", "_stop"])
|> group(columns: ["hostname", "_measurement"])
|> aggregateWindow(every: 60s, fn: mean)
|> drop(columns: ["_time"])`,
idb.bucket,
idb.formatTime(from), idb.formatTime(to),
measurementsCond, hostsCond)
rows, err := idb.queryClient.Query(ctx, query)
if err != nil {
log.Error("Error while performing query")
return nil, err
}
// HANDLE QUERY RETURN
// Collect Float Arrays for Node@Metric -> No Scope Handling!
influxData := make(map[string]map[string][]schema.Float)
for rows.Next() {
row := rows.Record()
host, field := row.ValueByKey("hostname").(string), row.Measurement()
influxHostData, ok := influxData[host]
if !ok {
influxHostData = make(map[string][]schema.Float)
influxData[host] = influxHostData
}
influxFieldData, ok := influxData[host][field]
if !ok {
influxFieldData = make([]schema.Float, 0)
influxData[host][field] = influxFieldData
}
val, ok := row.Value().(float64)
if ok {
influxData[host][field] = append(influxData[host][field], schema.Float(val))
} else {
influxData[host][field] = append(influxData[host][field], schema.Float(0))
}
}
// BUILD FUNCTION RETURN
data := make(map[string]map[string][]*schema.JobMetric)
for node, metricData := range influxData {
nodeData, ok := data[node]
if !ok {
nodeData = make(map[string][]*schema.JobMetric)
data[node] = nodeData
}
for metric, floatArray := range metricData {
avg, min, max := 0.0, 0.0, 0.0
for _, val := range floatArray {
avg += float64(val)
min = math.Min(min, float64(val))
max = math.Max(max, float64(val))
}
stats := schema.MetricStatistics{
Avg: (math.Round((avg/float64(len(floatArray)))*100) / 100),
Min: (math.Round(min*100) / 100),
Max: (math.Round(max*100) / 100),
}
mc := archive.GetMetricConfig(cluster, metric)
nodeData[metric] = append(nodeData[metric], &schema.JobMetric{
Unit: mc.Unit,
Timestep: mc.Timestep,
Series: []schema.Series{
{
Hostname: node,
Statistics: stats,
Data: floatArray,
},
},
})
}
}
return data, nil
} }
// Used in Systems-View @ Node-List
// UNTESTED
func (idb *InfluxDBv2DataRepository) LoadNodeListData( func (idb *InfluxDBv2DataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string, cluster, subCluster, nodeFilter string,
metrics []string, metrics []string,
@ -324,10 +497,79 @@ func (idb *InfluxDBv2DataRepository) LoadNodeListData(
ctx context.Context, ctx context.Context,
) (map[string]schema.JobData, int, bool, error) { ) (map[string]schema.JobData, int, bool, error) {
// Assumption: idb.loadData() only returns series node-scope - use node scope for NodeList
// 0) Init additional vars
var totalNodes int = 0 var totalNodes int = 0
var hasNextPage bool = false var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") // 1) Get list of all nodes
var nodes []string
if subCluster != "" {
scNodes := archive.NodeLists[cluster][subCluster]
nodes = scNodes.PrintList()
} else {
subClusterNodeLists := archive.NodeLists[cluster]
for _, nodeList := range subClusterNodeLists {
nodes = append(nodes, nodeList.PrintList()...)
}
}
// 2) Filter nodes
if nodeFilter != "" {
filteredNodes := []string{}
for _, node := range nodes {
if strings.Contains(node, nodeFilter) {
filteredNodes = append(filteredNodes, node)
}
}
nodes = filteredNodes
}
// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ...
totalNodes = len(nodes)
sort.Strings(nodes)
// 3) Apply paging
if len(nodes) > page.ItemsPerPage {
start := (page.Page - 1) * page.ItemsPerPage
end := start + page.ItemsPerPage
if end > len(nodes) {
end = len(nodes)
hasNextPage = false
} else {
hasNextPage = true
}
nodes = nodes[start:end]
}
// 4) Fetch And Convert Data, use idb.LoadNodeData() for query
rawNodeData, err := idb.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
if err != nil {
log.Error(fmt.Sprintf("Error while loading influx nodeData for nodeListData %#v\n", err))
return nil, totalNodes, hasNextPage, err
}
data := make(map[string]schema.JobData)
for node, nodeData := range rawNodeData {
// Init Nested Map Data Structures If Not Found
hostData, ok := data[node]
if !ok {
hostData = make(schema.JobData)
data[node] = hostData
}
for metric, nodeMetricData := range nodeData {
metricData, ok := hostData[metric]
if !ok {
metricData = make(map[schema.MetricScope]*schema.JobMetric)
data[node][metric] = metricData
}
data[node][metric][schema.MetricScopeNode] = nodeMetricData[0] // Only Node Scope Returned from loadNodeData
}
}
return data, totalNodes, hasNextPage, nil
} }

View File

@ -24,9 +24,12 @@ type MetricDataRepository interface {
// Return the JobData for the given job, only with the requested metrics. // Return the JobData for the given job, only with the requested metrics.
LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error) LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context, resolution int) (schema.JobData, error)
// Return a map of metrics to a map of nodes to the metric statistics of the job. node scope assumed for now. // Return a map of metrics to a map of nodes to the metric statistics of the job. node scope only.
LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) LoadStats(job *schema.Job, metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error)
// Return a map of metrics to a map of scopes to the scoped metric statistics of the job.
LoadScopedStats(job *schema.Job, metrics []string, scopes []schema.MetricScope, ctx context.Context) (schema.ScopedJobStats, error)
// Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node. // Return a map of hosts to a map of metrics at the requested scopes (currently only node) for that node.
LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) LoadNodeData(cluster string, metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, ctx context.Context) (map[string]map[string][]*schema.JobMetric, error)

View File

@ -448,6 +448,51 @@ func (pdb *PrometheusDataRepository) LoadNodeData(
return data, nil return data, nil
} }
// Implemented by NHR@FAU; Used in Job-View StatsTable
func (pdb *PrometheusDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
// Assumption: pdb.loadData() only returns series node-scope - use node scope for statsTable
scopedJobStats := make(schema.ScopedJobStats)
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx, 0 /*resolution here*/)
if err != nil {
log.Warn("Error while loading job for scopedJobStats")
return nil, err
}
for metric, metricData := range data {
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for _, series := range metricData[scope].Series {
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: series.Hostname,
Data: &series.Statistics,
})
}
}
}
return scopedJobStats, nil
}
// Implemented by NHR@FAU; Used in NodeList-View
func (pdb *PrometheusDataRepository) LoadNodeListData( func (pdb *PrometheusDataRepository) LoadNodeListData(
cluster, subCluster, nodeFilter string, cluster, subCluster, nodeFilter string,
metrics []string, metrics []string,
@ -458,10 +503,132 @@ func (pdb *PrometheusDataRepository) LoadNodeListData(
ctx context.Context, ctx context.Context,
) (map[string]schema.JobData, int, bool, error) { ) (map[string]schema.JobData, int, bool, error) {
// Assumption: pdb.loadData() only returns series node-scope - use node scope for NodeList
// 0) Init additional vars
var totalNodes int = 0 var totalNodes int = 0
var hasNextPage bool = false var hasNextPage bool = false
// TODO : Implement to be used in NodeList-View
log.Infof("LoadNodeListData unimplemented for PrometheusDataRepository, Args: cluster %s, metrics %v, nodeFilter %v, scopes %v", cluster, metrics, nodeFilter, scopes)
return nil, totalNodes, hasNextPage, errors.New("METRICDATA/INFLUXV2 > unimplemented for PrometheusDataRepository") // 1) Get list of all nodes
var nodes []string
if subCluster != "" {
scNodes := archive.NodeLists[cluster][subCluster]
nodes = scNodes.PrintList()
} else {
subClusterNodeLists := archive.NodeLists[cluster]
for _, nodeList := range subClusterNodeLists {
nodes = append(nodes, nodeList.PrintList()...)
}
}
// 2) Filter nodes
if nodeFilter != "" {
filteredNodes := []string{}
for _, node := range nodes {
if strings.Contains(node, nodeFilter) {
filteredNodes = append(filteredNodes, node)
}
}
nodes = filteredNodes
}
// 2.1) Count total nodes && Sort nodes -> Sorting invalidated after return ...
totalNodes = len(nodes)
sort.Strings(nodes)
// 3) Apply paging
if len(nodes) > page.ItemsPerPage {
start := (page.Page - 1) * page.ItemsPerPage
end := start + page.ItemsPerPage
if end > len(nodes) {
end = len(nodes)
hasNextPage = false
} else {
hasNextPage = true
}
nodes = nodes[start:end]
}
// 4) Fetch Data, based on pdb.LoadNodeData()
t0 := time.Now()
// Map of hosts of jobData
data := make(map[string]schema.JobData)
// query db for each metric
// TODO: scopes seems to be always empty
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
scopes = append(scopes, schema.MetricScopeNode)
}
for _, scope := range scopes {
if scope != schema.MetricScopeNode {
logOnce.Do(func() {
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
})
continue
}
for _, metric := range metrics {
metricConfig := archive.GetMetricConfig(cluster, metric)
if metricConfig == nil {
log.Warnf("Error in LoadNodeListData: Metric %s for cluster %s not configured", metric, cluster)
return nil, totalNodes, hasNextPage, errors.New("Prometheus config error")
}
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
if err != nil {
log.Warn("Error while formatting prometheus query")
return nil, totalNodes, hasNextPage, err
}
// ranged query over all nodes
r := promv1.Range{
Start: from,
End: to,
Step: time.Duration(metricConfig.Timestep * 1e9),
}
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
if err != nil {
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
return nil, totalNodes, hasNextPage, errors.New("Prometheus query error")
}
if len(warnings) > 0 {
log.Warnf("Warnings: %v\n", warnings)
}
step := int64(metricConfig.Timestep)
steps := int64(to.Sub(from).Seconds()) / step
// iter rows of host, metric, values
for _, row := range result.(promm.Matrix) {
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
hostdata, ok := data[hostname]
if !ok {
hostdata = make(schema.JobData)
data[hostname] = hostdata
}
metricdata, ok := hostdata[metric]
if !ok {
metricdata = make(map[schema.MetricScope]*schema.JobMetric)
data[hostname][metric] = metricdata
}
// output per host, metric and scope
scopeData, ok := metricdata[scope]
if !ok {
scopeData = &schema.JobMetric{
Unit: metricConfig.Unit,
Timestep: metricConfig.Timestep,
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
}
data[hostname][metric][scope] = scopeData
}
}
}
}
t1 := time.Since(t0)
log.Debugf("LoadNodeListData of %v nodes took %s", len(data), t1)
return data, totalNodes, hasNextPage, nil
} }

View File

@ -36,7 +36,17 @@ func (tmdr *TestMetricDataRepository) LoadData(
func (tmdr *TestMetricDataRepository) LoadStats( func (tmdr *TestMetricDataRepository) LoadStats(
job *schema.Job, job *schema.Job,
metrics []string, ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { metrics []string,
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
panic("TODO")
}
func (tmdr *TestMetricDataRepository) LoadScopedStats(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
ctx context.Context) (schema.ScopedJobStats, error) {
panic("TODO") panic("TODO")
} }

View File

@ -59,17 +59,15 @@ func Connect(driver string, db string) {
} else { } else {
dbHandle, err = sqlx.Open("sqlite3", opts.URL) dbHandle, err = sqlx.Open("sqlite3", opts.URL)
} }
if err != nil {
log.Fatal(err)
}
case "mysql": case "mysql":
opts.URL += "?multiStatements=true" opts.URL += "?multiStatements=true"
dbHandle, err = sqlx.Open("mysql", opts.URL) dbHandle, err = sqlx.Open("mysql", opts.URL)
if err != nil {
log.Fatalf("sqlx.Open() error: %v", err)
}
default: default:
log.Fatalf("unsupported database driver: %s", driver) log.Abortf("DB Connection: Unsupported database driver '%s'.\n", driver)
}
if err != nil {
log.Abortf("DB Connection: Could not connect to '%s' database with sqlx.Open().\nError: %s\n", driver, err.Error())
} }
dbHandle.SetMaxOpenConns(opts.MaxOpenConnections) dbHandle.SetMaxOpenConns(opts.MaxOpenConnections)
@ -80,7 +78,7 @@ func Connect(driver string, db string) {
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver} dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
err = checkDBVersion(driver, dbHandle.DB) err = checkDBVersion(driver, dbHandle.DB)
if err != nil { if err != nil {
log.Fatal(err) log.Abortf("DB Connection: Failed DB version check.\nError: %s\n", err.Error())
} }
}) })
} }

View File

@ -590,28 +590,34 @@ func (r *JobRepository) UpdateEnergy(
return stmt, err return stmt, err
} }
energyFootprint := make(map[string]float64) energyFootprint := make(map[string]float64)
var totalEnergy float64
var energy float64
// Total Job Energy Outside Loop
totalEnergy := 0.0
for _, fp := range sc.EnergyFootprint { for _, fp := range sc.EnergyFootprint {
// Always Init Metric Energy Inside Loop
metricEnergy := 0.0
if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil { if i, err := archive.MetricIndex(sc.MetricConfig, fp); err == nil {
// Note: For DB data, calculate and save as kWh // Note: For DB data, calculate and save as kWh
if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh) if sc.MetricConfig[i].Energy == "energy" { // this metric has energy as unit (Joules or Wh)
log.Warnf("Update EnergyFootprint for Job %d and Metric %s on cluster %s: Set to 'energy' in cluster.json: Not implemented, will return 0.0", jobMeta.JobID, jobMeta.Cluster, fp)
// FIXME: Needs sum as stats type // FIXME: Needs sum as stats type
} else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt) } else if sc.MetricConfig[i].Energy == "power" { // this metric has power as unit (Watt)
// Energy: Power (in Watts) * Time (in Seconds) // Energy: Power (in Watts) * Time (in Seconds)
// Unit: (( W * s ) / 3600) / 1000 = kWh ; Rounded to 2 nearest digits: (Energy * 100) / 100 // Unit: (W * (s / 3600)) / 1000 = kWh
// Here: All-Node Metric Average * Number of Nodes * Job Runtime // Round 2 Digits: round(Energy * 100) / 100
// Here: (All-Node Metric Average * Number of Nodes) * (Job Duration in Seconds / 3600) / 1000
// Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1 // Note: Shared Jobs handled correctly since "Node Average" is based on partial resources, while "numNodes" factor is 1
metricNodeSum := LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes) * float64(jobMeta.Duration) rawEnergy := ((LoadJobStat(jobMeta, fp, "avg") * float64(jobMeta.NumNodes)) * (float64(jobMeta.Duration) / 3600.0)) / 1000.0
energy = math.Round(((metricNodeSum/3600)/1000)*100) / 100 metricEnergy = math.Round(rawEnergy*100.0) / 100.0
} }
} else { } else {
log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID) log.Warnf("Error while collecting energy metric %s for job, DB ID '%v', return '0.0'", fp, jobMeta.ID)
} }
energyFootprint[fp] = energy energyFootprint[fp] = metricEnergy
totalEnergy += energy totalEnergy += metricEnergy
// log.Infof("Metric %s Average %f -> %f kWh | Job %d Total -> %f kWh", fp, LoadJobStat(jobMeta, fp, "avg"), energy, jobMeta.JobID, totalEnergy)
} }
var rawFootprint []byte var rawFootprint []byte
@ -620,7 +626,7 @@ func (r *JobRepository) UpdateEnergy(
return stmt, err return stmt, err
} }
return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100) / 100)), nil return stmt.Set("energy_footprint", string(rawFootprint)).Set("energy", (math.Round(totalEnergy*100.0) / 100.0)), nil
} }
func (r *JobRepository) UpdateFootprint( func (r *JobRepository) UpdateFootprint(

View File

@ -194,11 +194,13 @@ func (r *JobRepository) FindConcurrentJobs(
queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)", queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)",
"running", startTimeTail, stopTimeTail, startTime) "running", startTimeTail, stopTimeTail, startTime)
queryRunning = queryRunning.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%")) // Get At Least One Exact Hostname Match from JSON Resources Array in Database
queryRunning = queryRunning.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname)
query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)", query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)",
"running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime) "running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime)
query = query.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%")) // Get At Least One Exact Hostname Match from JSON Resources Array in Database
query = query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, '$.hostname') = ?)", hostname)
rows, err := query.RunWith(r.stmtCache).Query() rows, err := query.RunWith(r.stmtCache).Query()
if err != nil { if err != nil {

View File

@ -67,7 +67,8 @@ func (r *JobRepository) QueryJobs(
rows, err := query.RunWith(r.stmtCache).Query() rows, err := query.RunWith(r.stmtCache).Query()
if err != nil { if err != nil {
log.Errorf("Error while running query: %v", err) queryString, queryVars, _ := query.ToSql()
log.Errorf("Error while running query '%s' %v: %v", queryString, queryVars, err)
return nil, err return nil, err
} }
@ -145,6 +146,13 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
// This is an OR-Logic query: Returns all distinct jobs with at least one of the requested tags; TODO: AND-Logic query? // This is an OR-Logic query: Returns all distinct jobs with at least one of the requested tags; TODO: AND-Logic query?
query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}).Distinct() query = query.Join("jobtag ON jobtag.job_id = job.id").Where(sq.Eq{"jobtag.tag_id": filter.Tags}).Distinct()
} }
if filter.DbID != nil {
dbIDs := make([]string, len(filter.DbID))
for i, val := range filter.DbID {
dbIDs[i] = val
}
query = query.Where(sq.Eq{"job.id": dbIDs})
}
if filter.JobID != nil { if filter.JobID != nil {
query = buildStringCondition("job.job_id", filter.JobID, query) query = buildStringCondition("job.job_id", filter.JobID, query)
} }
@ -197,7 +205,7 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query) query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query)
} }
if filter.Node != nil { if filter.Node != nil {
query = buildStringCondition("job.resources", filter.Node, query) query = buildResourceJsonCondition("hostname", filter.Node, query)
} }
if filter.Energy != nil { if filter.Energy != nil {
query = buildFloatCondition("job.energy", filter.Energy, query) query = buildFloatCondition("job.energy", filter.Energy, query)
@ -299,6 +307,28 @@ func buildMetaJsonCondition(jsonField string, cond *model.StringInput, query sq.
return query return query
} }
func buildResourceJsonCondition(jsonField string, cond *model.StringInput, query sq.SelectBuilder) sq.SelectBuilder {
// Verify and Search Only in Valid Jsons
query = query.Where("JSON_VALID(resources)")
// add "AND" Sql query Block for field match
if cond.Eq != nil {
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") = ?)", *cond.Eq)
}
if cond.Neq != nil { // Currently Unused
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") != ?)", *cond.Neq)
}
if cond.StartsWith != nil { // Currently Unused
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\")) LIKE ?)", fmt.Sprint(*cond.StartsWith, "%"))
}
if cond.EndsWith != nil { // Currently Unused
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") LIKE ?)", fmt.Sprint("%", *cond.EndsWith))
}
if cond.Contains != nil {
return query.Where("EXISTS (SELECT 1 FROM json_each(job.resources) WHERE json_extract(value, \"$."+jsonField+"\") LIKE ?)", fmt.Sprint("%", *cond.Contains, "%"))
}
return query
}
var ( var (
matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)")
matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")

View File

@ -54,7 +54,7 @@ func checkDBVersion(backend string, db *sql.DB) error {
return err return err
} }
default: default:
log.Fatalf("unsupported database backend: %s", backend) log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
} }
v, dirty, err := m.Version() v, dirty, err := m.Version()
@ -102,7 +102,7 @@ func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err erro
return m, err return m, err
} }
default: default:
log.Fatalf("unsupported database backend: %s", backend) log.Abortf("Migration: Unsupported database backend '%s'.\n", backend)
} }
return m, nil return m, nil

View File

@ -674,57 +674,32 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
} }
} }
// log.Debugf("Metric %s, Peak %f, Unit %s, Aggregation %s", metric, peak, unit, aggreg) // log.Debugf("Metric %s, Peak %f, Unit %s", metric, peak, unit)
// Make bins, see https://jereze.com/code/sql-histogram/ // Make bins, see https://jereze.com/code/sql-histogram/ (Modified here)
start := time.Now() start := time.Now()
jm := fmt.Sprintf(`json_extract(footprint, "$.%s")`, (metric + "_" + footprintStat))
crossJoinQuery := sq.Select( // Find Jobs' Value Bin Number: Divide Value by Peak, Multiply by RequestedBins, then CAST to INT: Gets Bin-Number of Job
fmt.Sprintf(`max(%s) as max`, jm), binQuery := fmt.Sprintf(`CAST(
fmt.Sprintf(`min(%s) as min`, jm), ((case when json_extract(footprint, "$.%s") = %f then %f*0.999999999 else json_extract(footprint, "$.%s") end) / %f)
).From("job").Where( * %v as INTEGER )`,
"JSON_VALID(footprint)", (metric + "_" + footprintStat), peak, peak, (metric + "_" + footprintStat), peak, *bins)
).Where(
fmt.Sprintf(`%s is not null`, jm),
).Where(
fmt.Sprintf(`%s <= %f`, jm, peak),
)
crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery)
if cjqerr != nil {
return nil, cjqerr
}
for _, f := range filters {
crossJoinQuery = BuildWhereClause(f, crossJoinQuery)
}
crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql()
if sqlerr != nil {
return nil, sqlerr
}
binQuery := fmt.Sprintf(`CAST( (case when %s = value.max
then value.max*0.999999999 else %s end - value.min) / (value.max -
value.min) * %v as INTEGER )`, jm, jm, *bins)
mainQuery := sq.Select( mainQuery := sq.Select(
fmt.Sprintf(`%s + 1 as bin`, binQuery), fmt.Sprintf(`%s + 1 as bin`, binQuery),
fmt.Sprintf(`count(%s) as count`, jm), fmt.Sprintf(`count(*) as count`),
fmt.Sprintf(`CAST(((value.max / %d) * (%v )) as INTEGER ) as min`, *bins, binQuery), // For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * %s as min`, peak, *bins, binQuery),
fmt.Sprintf(`CAST(((value.max / %d) * (%v + 1 )) as INTEGER ) as max`, *bins, binQuery), // For Debug: // fmt.Sprintf(`CAST((%f / %d) as INTEGER ) * (%s + 1) as max`, peak, *bins, binQuery),
).From("job").CrossJoin( ).From("job").Where(
fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., "JSON_VALID(footprint)",
).Where(fmt.Sprintf(`%s is not null and %s <= %f`, jm, jm, peak)) ).Where(fmt.Sprintf(`json_extract(footprint, "$.%s") is not null and json_extract(footprint, "$.%s") <= %f`, (metric + "_" + footprintStat), (metric + "_" + footprintStat), peak))
// Only accessible Jobs...
mainQuery, qerr := SecurityCheck(ctx, mainQuery) mainQuery, qerr := SecurityCheck(ctx, mainQuery)
if qerr != nil { if qerr != nil {
return nil, qerr return nil, qerr
} }
// Filters...
for _, f := range filters { for _, f := range filters {
mainQuery = BuildWhereClause(f, mainQuery) mainQuery = BuildWhereClause(f, mainQuery)
} }
@ -738,32 +713,34 @@ func (r *JobRepository) jobsMetricStatisticsHistogram(
return nil, err return nil, err
} }
// Setup Array // Setup Return Array With Bin-Numbers for Match and Min/Max based on Peak
points := make([]*model.MetricHistoPoint, 0) points := make([]*model.MetricHistoPoint, 0)
binStep := int(peak) / *bins
for i := 1; i <= *bins; i++ { for i := 1; i <= *bins; i++ {
binMax := ((int(peak) / *bins) * i) binMin := (binStep * (i - 1))
binMin := ((int(peak) / *bins) * (i - 1)) binMax := (binStep * i)
point := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax} epoint := model.MetricHistoPoint{Bin: &i, Count: 0, Min: &binMin, Max: &binMax}
points = append(points, &point) points = append(points, &epoint)
} }
for rows.Next() { for rows.Next() { // Fill Count if Bin-No. Matches (Not every Bin exists in DB!)
point := model.MetricHistoPoint{} rpoint := model.MetricHistoPoint{}
if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { if err := rows.Scan(&rpoint.Bin, &rpoint.Count); err != nil { // Required for Debug: &rpoint.Min, &rpoint.Max
log.Warnf("Error while scanning rows for %s", jm) log.Warnf("Error while scanning rows for %s", metric)
return nil, err // Totally bricks cc-backend if returned and if all metrics requested? return nil, err // FIXME: Totally bricks cc-backend if returned and if all metrics requested?
} }
for _, e := range points { for _, e := range points {
if e.Bin != nil && point.Bin != nil { if e.Bin != nil && rpoint.Bin != nil {
if *e.Bin == *point.Bin { if *e.Bin == *rpoint.Bin {
e.Count = point.Count e.Count = rpoint.Count
if point.Min != nil { // Only Required For Debug: Check DB returned Min/Max against Backend Init above
e.Min = point.Min // if rpoint.Min != nil {
} // log.Warnf(">>>> Bin %d Min Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Min, *e.Min)
if point.Max != nil { // }
e.Max = point.Max // if rpoint.Max != nil {
} // log.Warnf(">>>> Bin %d Max Set For %s to %d (Init'd with: %d)", *e.Bin, metric, *rpoint.Max, *e.Max)
// }
break break
} }
} }

View File

@ -45,7 +45,7 @@ func (r *JobRepository) AddTag(user *schema.User, job int64, tag int64) ([]*sche
return tags, archive.UpdateTags(j, archiveTags) return tags, archive.UpdateTags(j, archiveTags)
} }
// Removes a tag from a job // Removes a tag from a job by tag id
func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) { func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.Tag, error) {
j, err := r.FindByIdWithUser(user, job) j, err := r.FindByIdWithUser(user, job)
if err != nil { if err != nil {
@ -76,6 +76,99 @@ func (r *JobRepository) RemoveTag(user *schema.User, job, tag int64) ([]*schema.
return tags, archive.UpdateTags(j, archiveTags) return tags, archive.UpdateTags(j, archiveTags)
} }
// Removes a tag from a job by tag info
func (r *JobRepository) RemoveJobTagByRequest(user *schema.User, job int64, tagType string, tagName string, tagScope string) ([]*schema.Tag, error) {
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
return nil, fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
// Get Job
j, err := r.FindByIdWithUser(user, job)
if err != nil {
log.Warn("Error while finding job by id")
return nil, err
}
// Handle Delete
q := sq.Delete("jobtag").Where("jobtag.job_id = ?", job).Where("jobtag.tag_id = ?", tagID)
if _, err := q.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := q.ToSql()
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return nil, err
}
tags, err := r.GetTags(user, &job)
if err != nil {
log.Warn("Error while getting tags for job")
return nil, err
}
archiveTags, err := r.getArchiveTags(&job)
if err != nil {
log.Warn("Error while getting tags for job")
return nil, err
}
return tags, archive.UpdateTags(j, archiveTags)
}
// Removes a tag from db by tag info
func (r *JobRepository) RemoveTagByRequest(tagType string, tagName string, tagScope string) error {
// Get Tag ID to delete
tagID, exists := r.TagId(tagType, tagName, tagScope)
if !exists {
log.Warnf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
return fmt.Errorf("Tag does not exist (name, type, scope): %s, %s, %s", tagName, tagType, tagScope)
}
// Handle Delete JobTagTable
qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID)
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qJobTag.ToSql()
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return err
}
// Handle Delete TagTable
qTag := sq.Delete("tag").Where("tag.id = ?", tagID)
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qTag.ToSql()
log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
return err
}
return nil
}
// Removes a tag from db by tag id
func (r *JobRepository) RemoveTagById(tagID int64) error {
// Handle Delete JobTagTable
qJobTag := sq.Delete("jobtag").Where("jobtag.tag_id = ?", tagID)
if _, err := qJobTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qJobTag.ToSql()
log.Errorf("Error removing tag from table 'jobTag' with %s: %v", s, err)
return err
}
// Handle Delete TagTable
qTag := sq.Delete("tag").Where("tag.id = ?", tagID)
if _, err := qTag.RunWith(r.stmtCache).Exec(); err != nil {
s, _, _ := qTag.ToSql()
log.Errorf("Error removing tag from table 'tag' with %s: %v", s, err)
return err
}
return nil
}
// CreateTag creates a new tag with the specified type and name and returns its database id. // CreateTag creates a new tag with the specified type and name and returns its database id.
func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope string) (tagId int64, err error) { func (r *JobRepository) CreateTag(tagType string, tagName string, tagScope string) (tagId int64, err error) {
// Default to "Global" scope if none defined // Default to "Global" scope if none defined
@ -209,6 +302,16 @@ func (r *JobRepository) TagId(tagType string, tagName string, tagScope string) (
return return
} }
// TagInfo returns the database infos of the tag with the specified id.
func (r *JobRepository) TagInfo(tagId int64) (tagType string, tagName string, tagScope string, exists bool) {
exists = true
if err := sq.Select("tag.tag_type", "tag.tag_name", "tag.tag_scope").From("tag").Where("tag.id = ?", tagId).
RunWith(r.stmtCache).QueryRow().Scan(&tagType, &tagName, &tagScope); err != nil {
exists = false
}
return
}
// GetTags returns a list of all scoped tags if job is nil or of the tags that the job with that database ID has. // GetTags returns a list of all scoped tags if job is nil or of the tags that the job with that database ID has.
func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, error) { func (r *JobRepository) GetTags(user *schema.User, job *int64) ([]*schema.Tag, error) {
q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag") q := sq.Select("id", "tag_type", "tag_name", "tag_scope").From("tag")

View File

@ -35,7 +35,7 @@ func GetUserCfgRepo() *UserCfgRepo {
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`) lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
if err != nil { if err != nil {
log.Fatalf("db.DB.Preparex() error: %v", err) log.Fatalf("User Config: Call 'db.DB.Preparex()' failed.\nError: %s\n", err.Error())
} }
userCfgRepoInstance = &UserCfgRepo{ userCfgRepoInstance = &UserCfgRepo{

View File

@ -25,6 +25,9 @@ func setupUserTest(t *testing.T) *UserCfgRepo {
"jwts": { "jwts": {
"max-age": "2m" "max-age": "2m"
}, },
"apiAllowedIPs": [
"*"
],
"clusters": [ "clusters": [
{ {
"name": "testcluster", "name": "testcluster",

View File

@ -297,6 +297,9 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
} }
} }
} }
if len(query["dbId"]) != 0 {
filterPresets["dbId"] = query["dbId"]
}
if query.Get("jobId") != "" { if query.Get("jobId") != "" {
if len(query["jobId"]) == 1 { if len(query["jobId"]) == 1 {
filterPresets["jobId"] = query.Get("jobId") filterPresets["jobId"] = query.Get("jobId")

View File

@ -40,7 +40,7 @@ func Start() {
jobRepo = repository.GetJobRepository() jobRepo = repository.GetJobRepository()
s, err = gocron.NewScheduler() s, err = gocron.NewScheduler()
if err != nil { if err != nil {
log.Fatalf("Error while creating gocron scheduler: %s", err.Error()) log.Abortf("Taskmanager Start: Could not create gocron scheduler.\nError: %s\n", err.Error())
} }
if config.Keys.StopJobsExceedingWalltime > 0 { if config.Keys.StopJobsExceedingWalltime > 0 {

View File

@ -27,6 +27,8 @@ type ArchiveBackend interface {
LoadJobData(job *schema.Job) (schema.JobData, error) LoadJobData(job *schema.Job) (schema.JobData, error)
LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error)
LoadClusterCfg(name string) (*schema.Cluster, error) LoadClusterCfg(name string) (*schema.Cluster, error)
StoreJobMeta(jobMeta *schema.JobMeta) error StoreJobMeta(jobMeta *schema.JobMeta) error
@ -87,7 +89,7 @@ func Init(rawConfig json.RawMessage, disableArchive bool) error {
var version uint64 var version uint64
version, err = ar.Init(rawConfig) version, err = ar.Init(rawConfig)
if err != nil { if err != nil {
log.Error("Error while initializing archiveBackend") log.Errorf("Error while initializing archiveBackend: %s", err.Error())
return return
} }
log.Infof("Load archive version %d", version) log.Infof("Load archive version %d", version)
@ -110,7 +112,7 @@ func LoadAveragesFromArchive(
) error { ) error {
metaFile, err := ar.LoadJobMeta(job) metaFile, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Warn("Error while loading job metadata from archiveBackend") log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err return err
} }
@ -125,7 +127,7 @@ func LoadAveragesFromArchive(
return nil return nil
} }
// Helper to metricdataloader.LoadStatData(). // Helper to metricdataloader.LoadJobStats().
func LoadStatsFromArchive( func LoadStatsFromArchive(
job *schema.Job, job *schema.Job,
metrics []string, metrics []string,
@ -133,7 +135,7 @@ func LoadStatsFromArchive(
data := make(map[string]schema.MetricStatistics, len(metrics)) data := make(map[string]schema.MetricStatistics, len(metrics))
metaFile, err := ar.LoadJobMeta(job) metaFile, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Warn("Error while loading job metadata from archiveBackend") log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return data, err return data, err
} }
@ -154,10 +156,26 @@ func LoadStatsFromArchive(
return data, nil return data, nil
} }
// Helper to metricdataloader.LoadScopedJobStats().
func LoadScopedStatsFromArchive(
job *schema.Job,
metrics []string,
scopes []schema.MetricScope,
) (schema.ScopedJobStats, error) {
data, err := ar.LoadJobStats(job)
if err != nil {
log.Errorf("Error while loading job stats from archiveBackend: %s", err.Error())
return nil, err
}
return data, nil
}
func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
metaFile, err := ar.LoadJobMeta(job) metaFile, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Warn("Error while loading job metadata from archiveBackend") log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return nil, err return nil, err
} }
@ -173,7 +191,7 @@ func UpdateMetadata(job *schema.Job, metadata map[string]string) error {
jobMeta, err := ar.LoadJobMeta(job) jobMeta, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Warn("Error while loading job metadata from archiveBackend") log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err return err
} }
@ -193,7 +211,7 @@ func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
jobMeta, err := ar.LoadJobMeta(job) jobMeta, err := ar.LoadJobMeta(job)
if err != nil { if err != nil {
log.Warn("Error while loading job metadata from archiveBackend") log.Errorf("Error while loading job metadata from archiveBackend: %s", err.Error())
return err return err
} }

View File

@ -68,8 +68,23 @@ func initClusterConfig() error {
} }
for _, sc := range cluster.SubClusters { for _, sc := range cluster.SubClusters {
newMetric := mc newMetric := &schema.MetricConfig{
newMetric.SubClusters = nil Unit: mc.Unit,
Energy: mc.Energy,
Name: mc.Name,
Scope: mc.Scope,
Aggregation: mc.Aggregation,
Peak: mc.Peak,
Caution: mc.Caution,
Alert: mc.Alert,
Timestep: mc.Timestep,
Normal: mc.Normal,
LowerIsBetter: mc.LowerIsBetter,
}
if mc.Footprint != "" {
newMetric.Footprint = mc.Footprint
}
if cfg, ok := scLookup[sc.Name]; ok { if cfg, ok := scLookup[sc.Name]; ok {
if !cfg.Remove { if !cfg.Remove {

View File

@ -115,6 +115,40 @@ func loadJobData(filename string, isCompressed bool) (schema.JobData, error) {
} }
} }
func loadJobStats(filename string, isCompressed bool) (schema.ScopedJobStats, error) {
f, err := os.Open(filename)
if err != nil {
log.Errorf("fsBackend LoadJobStats()- %v", err)
return nil, err
}
defer f.Close()
if isCompressed {
r, err := gzip.NewReader(f)
if err != nil {
log.Errorf(" %v", err)
return nil, err
}
defer r.Close()
if config.Keys.Validate {
if err := schema.Validate(schema.Data, r); err != nil {
return nil, fmt.Errorf("validate job data: %v", err)
}
}
return DecodeJobStats(r, filename)
} else {
if config.Keys.Validate {
if err := schema.Validate(schema.Data, bufio.NewReader(f)); err != nil {
return nil, fmt.Errorf("validate job data: %v", err)
}
}
return DecodeJobStats(bufio.NewReader(f), filename)
}
}
func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) { func (fsa *FsArchive) Init(rawConfig json.RawMessage) (uint64, error) {
var config FsArchiveConfig var config FsArchiveConfig
@ -389,6 +423,18 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
return loadJobData(filename, isCompressed) return loadJobData(filename, isCompressed)
} }
func (fsa *FsArchive) LoadJobStats(job *schema.Job) (schema.ScopedJobStats, error) {
var isCompressed bool = true
filename := getPath(job, fsa.path, "data.json.gz")
if !util.CheckFileExists(filename) {
filename = getPath(job, fsa.path, "data.json")
isCompressed = false
}
return loadJobStats(filename, isCompressed)
}
func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) { func (fsa *FsArchive) LoadJobMeta(job *schema.Job) (*schema.JobMeta, error) {
filename := getPath(job, fsa.path, "meta.json") filename := getPath(job, fsa.path, "meta.json")
return loadJobMeta(filename) return loadJobMeta(filename)

View File

@ -32,6 +32,43 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
return data.(schema.JobData), nil return data.(schema.JobData), nil
} }
func DecodeJobStats(r io.Reader, k string) (schema.ScopedJobStats, error) {
jobData, err := DecodeJobData(r, k)
// Convert schema.JobData to schema.ScopedJobStats
if jobData != nil {
scopedJobStats := make(schema.ScopedJobStats)
for metric, metricData := range jobData {
if _, ok := scopedJobStats[metric]; !ok {
scopedJobStats[metric] = make(map[schema.MetricScope][]*schema.ScopedStats)
}
for scope, jobMetric := range metricData {
if _, ok := scopedJobStats[metric][scope]; !ok {
scopedJobStats[metric][scope] = make([]*schema.ScopedStats, 0)
}
for _, series := range jobMetric.Series {
scopedJobStats[metric][scope] = append(scopedJobStats[metric][scope], &schema.ScopedStats{
Hostname: series.Hostname,
Id: series.Id,
Data: &series.Statistics,
})
}
// So that one can later check len(scopedJobStats[metric][scope]): Remove from map if empty
if len(scopedJobStats[metric][scope]) == 0 {
delete(scopedJobStats[metric], scope)
if len(scopedJobStats[metric]) == 0 {
delete(scopedJobStats, metric)
}
}
}
}
return scopedJobStats, nil
}
return nil, err
}
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) { func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
var d schema.JobMeta var d schema.JobMeta
if err := json.NewDecoder(r).Decode(&d); err != nil { if err := json.NewDecoder(r).Decode(&d); err != nil {

View File

@ -46,12 +46,12 @@ var loglevel string = "info"
/* CONFIG */ /* CONFIG */
func Init(lvl string, logdate bool) { func Init(lvl string, logdate bool) {
// Discard I/O for all writers below selected loglevel; <CRITICAL> is always written.
switch lvl { switch lvl {
case "crit": case "crit":
ErrWriter = io.Discard ErrWriter = io.Discard
fallthrough fallthrough
case "err", "fatal": case "err":
WarnWriter = io.Discard WarnWriter = io.Discard
fallthrough fallthrough
case "warn": case "warn":
@ -63,8 +63,7 @@ func Init(lvl string, logdate bool) {
// Nothing to do... // Nothing to do...
break break
default: default:
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl) fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel '%s'\n", lvl, loglevel)
//SetLogLevel("debug")
} }
if !logdate { if !logdate {
@ -84,109 +83,138 @@ func Init(lvl string, logdate bool) {
loglevel = lvl loglevel = lvl
} }
/* PRINT */ /* HELPER */
// Private helper
func printStr(v ...interface{}) string {
return fmt.Sprint(v...)
}
// Uses Info() -> If errorpath required at some point:
// Will need own writer with 'Output(2, out)' to correctly render path
func Print(v ...interface{}) {
Info(v...)
}
func Debug(v ...interface{}) {
DebugLog.Output(2, printStr(v...))
}
func Info(v ...interface{}) {
InfoLog.Output(2, printStr(v...))
}
func Warn(v ...interface{}) {
WarnLog.Output(2, printStr(v...))
}
func Error(v ...interface{}) {
ErrLog.Output(2, printStr(v...))
}
// Writes panic stacktrace, but keeps application alive
func Panic(v ...interface{}) {
ErrLog.Output(2, printStr(v...))
panic("Panic triggered ...")
}
func Crit(v ...interface{}) {
CritLog.Output(2, printStr(v...))
}
// Writes critical log, stops application
func Fatal(v ...interface{}) {
CritLog.Output(2, printStr(v...))
os.Exit(1)
}
/* PRINT FORMAT*/
// Private helper
func printfStr(format string, v ...interface{}) string {
return fmt.Sprintf(format, v...)
}
// Uses Infof() -> If errorpath required at some point:
// Will need own writer with 'Output(2, out)' to correctly render path
func Printf(format string, v ...interface{}) {
Infof(format, v...)
}
func Debugf(format string, v ...interface{}) {
DebugLog.Output(2, printfStr(format, v...))
}
func Infof(format string, v ...interface{}) {
InfoLog.Output(2, printfStr(format, v...))
}
func Warnf(format string, v ...interface{}) {
WarnLog.Output(2, printfStr(format, v...))
}
func Errorf(format string, v ...interface{}) {
ErrLog.Output(2, printfStr(format, v...))
}
// Writes panic stacktrace, but keeps application alive
func Panicf(format string, v ...interface{}) {
ErrLog.Output(2, printfStr(format, v...))
panic("Panic triggered ...")
}
func Critf(format string, v ...interface{}) {
CritLog.Output(2, printfStr(format, v...))
}
// Writes crit log, stops application
func Fatalf(format string, v ...interface{}) {
CritLog.Output(2, printfStr(format, v...))
os.Exit(1)
}
func Loglevel() string { func Loglevel() string {
return loglevel return loglevel
} }
/* SPECIAL */ /* PRIVATE HELPER */
// func Finfof(w io.Writer, format string, v ...interface{}) { // Return unformatted string
// if w != io.Discard { func printStr(v ...interface{}) string {
// if logDateTime { return fmt.Sprint(v...)
// currentTime := time.Now() }
// fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...)
// } else { // Return formatted string
// fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...) func printfStr(format string, v ...interface{}) string {
// } return fmt.Sprintf(format, v...)
// } }
// }
/* PRINT */
// Prints to STDOUT without string formatting; application continues.
// Used for special cases not requiring log information like date or location.
func Print(v ...interface{}) {
fmt.Fprintln(os.Stdout, v...)
}
// Prints to STDOUT without string formatting; application exits with error code 0.
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
func Exit(v ...interface{}) {
fmt.Fprintln(os.Stdout, v...)
os.Exit(0)
}
// Prints to STDOUT without string formatting; application exits with error code 1.
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
func Abort(v ...interface{}) {
fmt.Fprintln(os.Stdout, v...)
os.Exit(1)
}
// Prints to DEBUG writer without string formatting; application continues.
// Used for logging additional information, primarily for development.
func Debug(v ...interface{}) {
DebugLog.Output(2, printStr(v...))
}
// Prints to INFO writer without string formatting; application continues.
// Used for logging additional information, e.g. notable returns or common fail-cases.
func Info(v ...interface{}) {
InfoLog.Output(2, printStr(v...))
}
// Prints to WARNING writer without string formatting; application continues.
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
func Warn(v ...interface{}) {
WarnLog.Output(2, printStr(v...))
}
// Prints to ERROR writer without string formatting; application continues.
// Used for logging errors, but code still can return default(s) or nil.
func Error(v ...interface{}) {
ErrLog.Output(2, printStr(v...))
}
// Prints to CRITICAL writer without string formatting; application exits with error code 1.
// Used for terminating on unexpected errors with date and code location.
func Fatal(v ...interface{}) {
CritLog.Output(2, printStr(v...))
os.Exit(1)
}
// Prints to PANIC function without string formatting; application exits with panic.
// Used for terminating on unexpected errors with stacktrace.
func Panic(v ...interface{}) {
panic(printStr(v...))
}
/* PRINT FORMAT*/
// Prints to STDOUT with string formatting; application continues.
// Used for special cases not requiring log information like date or location.
func Printf(format string, v ...interface{}) {
fmt.Fprintf(os.Stdout, format, v...)
}
// Prints to STDOUT with string formatting; application exits with error code 0.
// Used for exiting succesfully with message after expected outcome, e.g. successful single-call application runs.
func Exitf(format string, v ...interface{}) {
fmt.Fprintf(os.Stdout, format, v...)
os.Exit(0)
}
// Prints to STDOUT with string formatting; application exits with error code 1.
// Used for terminating with message after to be expected errors, e.g. wrong arguments or during init().
func Abortf(format string, v ...interface{}) {
fmt.Fprintf(os.Stdout, format, v...)
os.Exit(1)
}
// Prints to DEBUG writer with string formatting; application continues.
// Used for logging additional information, primarily for development.
func Debugf(format string, v ...interface{}) {
DebugLog.Output(2, printfStr(format, v...))
}
// Prints to INFO writer with string formatting; application continues.
// Used for logging additional information, e.g. notable returns or common fail-cases.
func Infof(format string, v ...interface{}) {
InfoLog.Output(2, printfStr(format, v...))
}
// Prints to WARNING writer with string formatting; application continues.
// Used for logging important information, e.g. uncommon edge-cases or administration related information.
func Warnf(format string, v ...interface{}) {
WarnLog.Output(2, printfStr(format, v...))
}
// Prints to ERROR writer with string formatting; application continues.
// Used for logging errors, but code still can return default(s) or nil.
func Errorf(format string, v ...interface{}) {
ErrLog.Output(2, printfStr(format, v...))
}
// Prints to CRITICAL writer with string formatting; application exits with error code 1.
// Used for terminating on unexpected errors with date and code location.
func Fatalf(format string, v ...interface{}) {
CritLog.Output(2, printfStr(format, v...))
os.Exit(1)
}
// Prints to PANIC function with string formatting; application exits with panic.
// Used for terminating on unexpected errors with stacktrace.
func Panicf(format string, v ...interface{}) {
panic(printfStr(format, v...))
}

View File

@ -5,85 +5,16 @@
package runtimeEnv package runtimeEnv
import ( import (
"bufio"
"errors"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"os/user" "os/user"
"strconv" "strconv"
"strings"
"syscall" "syscall"
"github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/log"
) )
// Very simple and limited .env file reader.
// All variable definitions found are directly
// added to the processes environment.
func LoadEnv(file string) error {
f, err := os.Open(file)
if err != nil {
log.Error("Error while opening .env file")
return err
}
defer f.Close()
s := bufio.NewScanner(bufio.NewReader(f))
for s.Scan() {
line := s.Text()
if strings.HasPrefix(line, "#") || len(line) == 0 {
continue
}
if strings.Contains(line, "#") {
return errors.New("'#' are only supported at the start of a line")
}
line = strings.TrimPrefix(line, "export ")
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
}
key := strings.TrimSpace(parts[0])
val := strings.TrimSpace(parts[1])
if strings.HasPrefix(val, "\"") {
if !strings.HasSuffix(val, "\"") {
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
}
runes := []rune(val[1 : len(val)-1])
sb := strings.Builder{}
for i := 0; i < len(runes); i++ {
if runes[i] == '\\' {
i++
switch runes[i] {
case 'n':
sb.WriteRune('\n')
case 'r':
sb.WriteRune('\r')
case 't':
sb.WriteRune('\t')
case '"':
sb.WriteRune('"')
default:
return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i])
}
continue
}
sb.WriteRune(runes[i])
}
val = sb.String()
}
os.Setenv(key, val)
}
return s.Err()
}
// Changes the processes user and group to that // Changes the processes user and group to that
// specified in the config.json. The go runtime // specified in the config.json. The go runtime
// takes care of all threads (and not only the calling one) // takes care of all threads (and not only the calling one)

View File

@ -100,7 +100,7 @@ type ProgramConfig struct {
// Address where the http (or https) server will listen on (for example: 'localhost:80'). // Address where the http (or https) server will listen on (for example: 'localhost:80').
Addr string `json:"addr"` Addr string `json:"addr"`
// Addresses from which secured API endpoints can be reached // Addresses from which secured admin API endpoints can be reached, can be wildcard "*"
ApiAllowedIPs []string `json:"apiAllowedIPs"` ApiAllowedIPs []string `json:"apiAllowedIPs"`
// Drop root permissions once .env was read and the port was taken. // Drop root permissions once .env was read and the port was taken.

View File

@ -15,6 +15,7 @@ import (
) )
type JobData map[string]map[MetricScope]*JobMetric type JobData map[string]map[MetricScope]*JobMetric
type ScopedJobStats map[string]map[MetricScope][]*ScopedStats
type JobMetric struct { type JobMetric struct {
StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"` StatisticsSeries *StatsSeries `json:"statisticsSeries,omitempty"`
@ -30,6 +31,12 @@ type Series struct {
Statistics MetricStatistics `json:"statistics"` Statistics MetricStatistics `json:"statistics"`
} }
type ScopedStats struct {
Hostname string `json:"hostname"`
Id *string `json:"id,omitempty"`
Data *MetricStatistics `json:"data"`
}
type MetricStatistics struct { type MetricStatistics struct {
Avg float64 `json:"avg"` Avg float64 `json:"avg"`
Min float64 `json:"min"` Min float64 `json:"min"`

View File

@ -25,10 +25,18 @@
}, },
"scope": { "scope": {
"description": "Native measurement resolution", "description": "Native measurement resolution",
"type": "string" "type": "string",
"enum": [
"node",
"socket",
"memoryDomain",
"core",
"hwthread",
"accelerator"
]
}, },
"timestep": { "timestep": {
"description": "Frequency of timeseries points", "description": "Frequency of timeseries points in seconds",
"type": "integer" "type": "integer"
}, },
"aggregation": { "aggregation": {
@ -108,15 +116,19 @@
"type": "boolean" "type": "boolean"
}, },
"peak": { "peak": {
"description": "The maximum possible metric value",
"type": "number" "type": "number"
}, },
"normal": { "normal": {
"description": "A common metric value level",
"type": "number" "type": "number"
}, },
"caution": { "caution": {
"description": "Metric value requires attention",
"type": "number" "type": "number"
}, },
"alert": { "alert": {
"description": "Metric value requiring immediate attention",
"type": "number" "type": "number"
}, },
"remove": { "remove": {

View File

@ -492,6 +492,7 @@
}, },
"required": [ "required": [
"jwts", "jwts",
"clusters" "clusters",
"apiAllowedIPs"
] ]
} }

View File

@ -17,6 +17,7 @@
"IPC", "IPC",
"Hz", "Hz",
"W", "W",
"J",
"°C", "°C",
"" ""
] ]

View File

@ -85,6 +85,7 @@ func IsValidRole(role string) bool {
return getRoleEnum(role) != RoleError return getRoleEnum(role) != RoleError
} }
// Check if User has SPECIFIED role AND role is VALID
func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) { func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) {
if IsValidRole(role) { if IsValidRole(role) {
for _, r := range u.Roles { for _, r := range u.Roles {
@ -97,6 +98,7 @@ func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) {
return false, false return false, false
} }
// Check if User has SPECIFIED role
func (u *User) HasRole(role Role) bool { func (u *User) HasRole(role Role) bool {
for _, r := range u.Roles { for _, r := range u.Roles {
if r == GetRoleString(role) { if r == GetRoleString(role) {
@ -106,7 +108,7 @@ func (u *User) HasRole(role Role) bool {
return false return false
} }
// Role-Arrays are short: performance not impacted by nested loop // Check if User has ANY of the listed roles
func (u *User) HasAnyRole(queryroles []Role) bool { func (u *User) HasAnyRole(queryroles []Role) bool {
for _, ur := range u.Roles { for _, ur := range u.Roles {
for _, qr := range queryroles { for _, qr := range queryroles {
@ -118,7 +120,7 @@ func (u *User) HasAnyRole(queryroles []Role) bool {
return false return false
} }
// Role-Arrays are short: performance not impacted by nested loop // Check if User has ALL of the listed roles
func (u *User) HasAllRoles(queryroles []Role) bool { func (u *User) HasAllRoles(queryroles []Role) bool {
target := len(queryroles) target := len(queryroles)
matches := 0 matches := 0
@ -138,7 +140,7 @@ func (u *User) HasAllRoles(queryroles []Role) bool {
} }
} }
// Role-Arrays are short: performance not impacted by nested loop // Check if User has NONE of the listed roles
func (u *User) HasNotRoles(queryroles []Role) bool { func (u *User) HasNotRoles(queryroles []Role) bool {
matches := 0 matches := 0
for _, ur := range u.Roles { for _, ur := range u.Roles {

View File

@ -14,17 +14,20 @@ func TestValidateConfig(t *testing.T) {
"jwts": { "jwts": {
"max-age": "2m" "max-age": "2m"
}, },
"clusters": [ "apiAllowedIPs": [
{ "*"
"name": "testcluster", ],
"metricDataRepository": { "clusters": [
"kind": "cc-metric-store", {
"url": "localhost:8082"}, "name": "testcluster",
"filterRanges": { "metricDataRepository": {
"numNodes": { "from": 1, "to": 64 }, "kind": "cc-metric-store",
"duration": { "from": 0, "to": 86400 }, "url": "localhost:8082"},
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null } "filterRanges": {
}}] "numNodes": { "from": 1, "to": 64 },
"duration": { "from": 0, "to": 86400 },
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
}}]
}`) }`)
if err := Validate(Config, bytes.NewReader(json)); err != nil { if err := Validate(Config, bytes.NewReader(json)); err != nil {
@ -33,7 +36,6 @@ func TestValidateConfig(t *testing.T) {
} }
func TestValidateJobMeta(t *testing.T) { func TestValidateJobMeta(t *testing.T) {
} }
func TestValidateCluster(t *testing.T) { func TestValidateCluster(t *testing.T) {

View File

@ -22,8 +22,7 @@ func parseDate(in string) int64 {
if in != "" { if in != "" {
t, err := time.ParseInLocation(shortForm, in, loc) t, err := time.ParseInLocation(shortForm, in, loc)
if err != nil { if err != nil {
fmt.Printf("date parse error %v", err) log.Abortf("Archive Manager Main: Date parse failed with input: '%s'\nError: %s\n", in, err.Error())
os.Exit(0)
} }
return t.Unix() return t.Unix()
} }

View File

@ -31,9 +31,9 @@
} }
}, },
"node_modules/@0no-co/graphql.web": { "node_modules/@0no-co/graphql.web": {
"version": "1.0.13", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.0.13.tgz", "resolved": "https://registry.npmjs.org/@0no-co/graphql.web/-/graphql.web-1.1.2.tgz",
"integrity": "sha512-jqYxOevheVTU1S36ZdzAkJIdvRp2m3OYIG5SEoKDw5NI8eVwkoI0D/Q3DYNGmXCxkA6CQuoa7zvMiDPTLqUNuw==", "integrity": "sha512-N2NGsU5FLBhT8NZ+3l2YrzZSHITjNXNuDhC4iDiikv0IujaJ0Xc6xIxQZ/Ek3Cb+rgPjnLHYyJm11tInuJn+cw==",
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
"graphql": "^14.0.0 || ^15.0.0 || ^16.0.0" "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0"
@ -58,13 +58,10 @@
} }
}, },
"node_modules/@babel/runtime": { "node_modules/@babel/runtime": {
"version": "7.26.7", "version": "7.27.1",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.7.tgz", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.1.tgz",
"integrity": "sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==", "integrity": "sha512-1x3D2xEk2fRo3PAhwQwu5UubzgiVWSXTBfWpVd2Mx2AzRqJuDJCsgaDVZ7HB5iGzDW1Hl1sWN2mFyKjmR9uAog==",
"license": "MIT", "license": "MIT",
"dependencies": {
"regenerator-runtime": "^0.14.0"
},
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
} }
@ -134,19 +131,6 @@
"integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/@lambdatest/node-tunnel": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/@lambdatest/node-tunnel/-/node-tunnel-4.0.8.tgz",
"integrity": "sha512-IY42aDD4Ryqjug9V4wpCjckKpHjC2zrU/XhhorR5ztX088XITRFKUo8U6+gOjy/V8kAB+EgDuIXfK0izXbt9Ow==",
"license": "ISC",
"dependencies": {
"adm-zip": "^0.5.10",
"axios": "^1.6.2",
"get-port": "^1.0.0",
"https-proxy-agent": "^5.0.0",
"split": "^1.0.1"
}
},
"node_modules/@popperjs/core": { "node_modules/@popperjs/core": {
"version": "2.11.8", "version": "2.11.8",
"resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz",
@ -158,9 +142,9 @@
} }
}, },
"node_modules/@rollup/plugin-commonjs": { "node_modules/@rollup/plugin-commonjs": {
"version": "28.0.2", "version": "28.0.3",
"resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-28.0.2.tgz", "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-28.0.3.tgz",
"integrity": "sha512-BEFI2EDqzl+vA1rl97IDRZ61AIwGH093d9nz8+dThxJNH8oSoB7MjWvPCX3dkaK1/RCJ/1v/R1XB15FuSs0fQw==", "integrity": "sha512-pyltgilam1QPdn+Zd9gaCfOLcnjMEJ9gV+bTw6/r73INdvzf1ah9zLIJBm+kW7R6IUFIQ1YO+VqZtYxZNWFPEQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -185,9 +169,9 @@
} }
}, },
"node_modules/@rollup/plugin-node-resolve": { "node_modules/@rollup/plugin-node-resolve": {
"version": "16.0.0", "version": "16.0.1",
"resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.0.tgz", "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.1.tgz",
"integrity": "sha512-0FPvAeVUT/zdWoO0jnb/V5BlBsUSNfkIOtFHzMO4H9MOklrmQFY6FduVHKucNb/aTFxvnGhj4MNj/T1oNdDfNg==", "integrity": "sha512-tk5YCxJWIG81umIvNkSod2qK5KyQW19qcBF/B78n1bjtOON6gzKoVeSzAE8yHCZEDmqkHKkxplExA8KzdJLJpA==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@ -276,9 +260,9 @@
} }
}, },
"node_modules/@rollup/rollup-android-arm-eabi": { "node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.40.2.tgz",
"integrity": "sha512-kwctwVlswSEsr4ljpmxKrRKp1eG1v2NAhlzFzDf1x1OdYaMjBYjDCbHkzWm57ZXzTwqn8stMXgROrnMw8dJK3w==", "integrity": "sha512-JkdNEq+DFxZfUwxvB58tHMHBHVgX23ew41g1OQinthJ+ryhdRk67O31S7sYw8u2lTjHUPFxwar07BBt1KHp/hg==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@ -290,9 +274,9 @@
] ]
}, },
"node_modules/@rollup/rollup-android-arm64": { "node_modules/@rollup/rollup-android-arm64": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.40.2.tgz",
"integrity": "sha512-4H5ZtZitBPlbPsTv6HBB8zh1g5d0T8TzCmpndQdqq20Ugle/nroOyDMf9p7f88Gsu8vBLU78/cuh8FYHZqdXxw==", "integrity": "sha512-13unNoZ8NzUmnndhPTkWPWbX3vtHodYmy+I9kuLxN+F+l+x3LdVF7UCu8TWVMt1POHLh6oDHhnOA04n8oJZhBw==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -304,9 +288,9 @@
] ]
}, },
"node_modules/@rollup/rollup-darwin-arm64": { "node_modules/@rollup/rollup-darwin-arm64": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.40.2.tgz",
"integrity": "sha512-f2AJ7Qwx9z25hikXvg+asco8Sfuc5NCLg8rmqQBIOUoWys5sb/ZX9RkMZDPdnnDevXAMJA5AWLnRBmgdXGEUiA==", "integrity": "sha512-Gzf1Hn2Aoe8VZzevHostPX23U7N5+4D36WJNHK88NZHCJr7aVMG4fadqkIf72eqVPGjGc0HJHNuUaUcxiR+N/w==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -318,9 +302,9 @@
] ]
}, },
"node_modules/@rollup/rollup-darwin-x64": { "node_modules/@rollup/rollup-darwin-x64": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.40.2.tgz",
"integrity": "sha512-+/2JBrRfISCsWE4aEFXxd+7k9nWGXA8+wh7ZUHn/u8UDXOU9LN+QYKKhd57sIn6WRcorOnlqPMYFIwie/OHXWw==", "integrity": "sha512-47N4hxa01a4x6XnJoskMKTS8XZ0CZMd8YTbINbi+w03A2w4j1RTlnGHOz/P0+Bg1LaVL6ufZyNprSg+fW5nYQQ==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@ -332,9 +316,9 @@
] ]
}, },
"node_modules/@rollup/rollup-freebsd-arm64": { "node_modules/@rollup/rollup-freebsd-arm64": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.40.2.tgz",
"integrity": "sha512-SUeB0pYjIXwT2vfAMQ7E4ERPq9VGRrPR7Z+S4AMssah5EHIilYqjWQoTn5dkDtuIJUSTs8H+C9dwoEcg3b0sCA==", "integrity": "sha512-8t6aL4MD+rXSHHZUR1z19+9OFJ2rl1wGKvckN47XFRVO+QL/dUSpKA2SLRo4vMg7ELA8pzGpC+W9OEd1Z/ZqoQ==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -346,9 +330,9 @@
] ]
}, },
"node_modules/@rollup/rollup-freebsd-x64": { "node_modules/@rollup/rollup-freebsd-x64": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.40.2.tgz",
"integrity": "sha512-L3T66wAZiB/ooiPbxz0s6JEX6Sr2+HfgPSK+LMuZkaGZFAFCQAHiP3dbyqovYdNaiUXcl9TlgnIbcsIicAnOZg==", "integrity": "sha512-C+AyHBzfpsOEYRFjztcYUFsH4S7UsE9cDtHCtma5BK8+ydOZYgMmWg1d/4KBytQspJCld8ZIujFMAdKG1xyr4Q==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@ -360,9 +344,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm-gnueabihf": { "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.40.2.tgz",
"integrity": "sha512-UBXdQ4+ATARuFgsFrQ+tAsKvBi/Hly99aSVdeCUiHV9dRTTpMU7OrM3WXGys1l40wKVNiOl0QYY6cZQJ2xhKlQ==", "integrity": "sha512-de6TFZYIvJwRNjmW3+gaXiZ2DaWL5D5yGmSYzkdzjBDS3W+B9JQ48oZEsmMvemqjtAFzE16DIBLqd6IQQRuG9Q==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@ -374,9 +358,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm-musleabihf": { "node_modules/@rollup/rollup-linux-arm-musleabihf": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.40.2.tgz",
"integrity": "sha512-m/yfZ25HGdcCSwmopEJm00GP7xAUyVcBPjttGLRAqZ60X/bB4Qn6gP7XTwCIU6bITeKmIhhwZ4AMh2XLro+4+w==", "integrity": "sha512-urjaEZubdIkacKc930hUDOfQPysezKla/O9qV+O89enqsqUmQm8Xj8O/vh0gHg4LYfv7Y7UsE3QjzLQzDYN1qg==",
"cpu": [ "cpu": [
"arm" "arm"
], ],
@ -388,9 +372,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm64-gnu": { "node_modules/@rollup/rollup-linux-arm64-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.40.2.tgz",
"integrity": "sha512-Wy+cUmFuvziNL9qWRRzboNprqSQ/n38orbjRvd6byYWridp5TJ3CD+0+HUsbcWVSNz9bxkDUkyASGP0zS7GAvg==", "integrity": "sha512-KlE8IC0HFOC33taNt1zR8qNlBYHj31qGT1UqWqtvR/+NuCVhfufAq9fxO8BMFC22Wu0rxOwGVWxtCMvZVLmhQg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -402,9 +386,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-arm64-musl": { "node_modules/@rollup/rollup-linux-arm64-musl": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.40.2.tgz",
"integrity": "sha512-CQ3MAGgiFmQW5XJX5W3wnxOBxKwFlUAgSXFA2SwgVRjrIiVt5LHfcQLeNSHKq5OEZwv+VCBwlD1+YKCjDG8cpg==", "integrity": "sha512-j8CgxvfM0kbnhu4XgjnCWJQyyBOeBI1Zq91Z850aUddUmPeQvuAy6OiMdPS46gNFgy8gN1xkYyLgwLYZG3rBOg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -416,9 +400,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-loongarch64-gnu": { "node_modules/@rollup/rollup-linux-loongarch64-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.40.2.tgz",
"integrity": "sha512-rSzb1TsY4lSwH811cYC3OC2O2mzNMhM13vcnA7/0T6Mtreqr3/qs6WMDriMRs8yvHDI54qxHgOk8EV5YRAHFbw==", "integrity": "sha512-Ybc/1qUampKuRF4tQXc7G7QY9YRyeVSykfK36Y5Qc5dmrIxwFhrOzqaVTNoZygqZ1ZieSWTibfFhQ5qK8jpWxw==",
"cpu": [ "cpu": [
"loong64" "loong64"
], ],
@ -430,9 +414,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-powerpc64le-gnu": { "node_modules/@rollup/rollup-linux-powerpc64le-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.40.2.tgz",
"integrity": "sha512-fwr0n6NS0pG3QxxlqVYpfiY64Fd1Dqd8Cecje4ILAV01ROMp4aEdCj5ssHjRY3UwU7RJmeWd5fi89DBqMaTawg==", "integrity": "sha512-3FCIrnrt03CCsZqSYAOW/k9n625pjpuMzVfeI+ZBUSDT3MVIFDSPfSUgIl9FqUftxcUXInvFah79hE1c9abD+Q==",
"cpu": [ "cpu": [
"ppc64" "ppc64"
], ],
@ -444,9 +428,23 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-riscv64-gnu": { "node_modules/@rollup/rollup-linux-riscv64-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.40.2.tgz",
"integrity": "sha512-4uJb9qz7+Z/yUp5RPxDGGGUcoh0PnKF33QyWgEZ3X/GocpWb6Mb+skDh59FEt5d8+Skxqs9mng6Swa6B2AmQZg==", "integrity": "sha512-QNU7BFHEvHMp2ESSY3SozIkBPaPBDTsfVNGx3Xhv+TdvWXFGOSH2NJvhD1zKAT6AyuuErJgbdvaJhYVhVqrWTg==",
"cpu": [
"riscv64"
],
"dev": true,
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
"version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.40.2.tgz",
"integrity": "sha512-5W6vNYkhgfh7URiXTO1E9a0cy4fSgfE4+Hl5agb/U1sa0kjOLMLC1wObxwKxecE17j0URxuTrYZZME4/VH57Hg==",
"cpu": [ "cpu": [
"riscv64" "riscv64"
], ],
@ -458,9 +456,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-s390x-gnu": { "node_modules/@rollup/rollup-linux-s390x-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.40.2.tgz",
"integrity": "sha512-QlIo8ndocWBEnfmkYqj8vVtIUpIqJjfqKggjy7IdUncnt8BGixte1wDON7NJEvLg3Kzvqxtbo8tk+U1acYEBlw==", "integrity": "sha512-B7LKIz+0+p348JoAL4X/YxGx9zOx3sR+o6Hj15Y3aaApNfAshK8+mWZEf759DXfRLeL2vg5LYJBB7DdcleYCoQ==",
"cpu": [ "cpu": [
"s390x" "s390x"
], ],
@ -472,9 +470,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-x64-gnu": { "node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.40.2.tgz",
"integrity": "sha512-hzpleiKtq14GWjz3ahWvJXgU1DQC9DteiwcsY4HgqUJUGxZThlL66MotdUEK9zEo0PK/2ADeZGM9LIondE302A==", "integrity": "sha512-lG7Xa+BmBNwpjmVUbmyKxdQJ3Q6whHjMjzQplOs5Z+Gj7mxPtWakGHqzMqNER68G67kmCX9qX57aRsW5V0VOng==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@ -486,9 +484,9 @@
] ]
}, },
"node_modules/@rollup/rollup-linux-x64-musl": { "node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.40.2.tgz",
"integrity": "sha512-jqtKrO715hDlvUcEsPn55tZt2TEiBvBtCMkUuU0R6fO/WPT7lO9AONjPbd8II7/asSiNVQHCMn4OLGigSuxVQA==", "integrity": "sha512-tD46wKHd+KJvsmije4bUskNuvWKFcTOIM9tZ/RrmIvcXnbi0YK/cKS9FzFtAm7Oxi2EhV5N2OpfFB348vSQRXA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@ -500,9 +498,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-arm64-msvc": { "node_modules/@rollup/rollup-win32-arm64-msvc": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.40.2.tgz",
"integrity": "sha512-RnHy7yFf2Wz8Jj1+h8klB93N0NHNHXFhNwAmiy9zJdpY7DE01VbEVtPdrK1kkILeIbHGRJjvfBDBhnxBr8kD4g==", "integrity": "sha512-Bjv/HG8RRWLNkXwQQemdsWw4Mg+IJ29LK+bJPW2SCzPKOUaMmPEppQlu/Fqk1d7+DX3V7JbFdbkh/NMmurT6Pg==",
"cpu": [ "cpu": [
"arm64" "arm64"
], ],
@ -514,9 +512,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-ia32-msvc": { "node_modules/@rollup/rollup-win32-ia32-msvc": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.40.2.tgz",
"integrity": "sha512-i7aT5HdiZIcd7quhzvwQ2oAuX7zPYrYfkrd1QFfs28Po/i0q6kas/oRrzGlDhAEyug+1UfUtkWdmoVlLJj5x9Q==", "integrity": "sha512-dt1llVSGEsGKvzeIO76HToiYPNPYPkmjhMHhP00T9S4rDern8P2ZWvWAQUEJ+R1UdMWJ/42i/QqJ2WV765GZcA==",
"cpu": [ "cpu": [
"ia32" "ia32"
], ],
@ -528,9 +526,9 @@
] ]
}, },
"node_modules/@rollup/rollup-win32-x64-msvc": { "node_modules/@rollup/rollup-win32-x64-msvc": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.34.1.tgz", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.40.2.tgz",
"integrity": "sha512-k3MVFD9Oq+laHkw2N2v7ILgoa9017ZMF/inTtHzyTVZjYs9cSH18sdyAf6spBAJIGwJ5UaC7et2ZH1WCdlhkMw==", "integrity": "sha512-bwspbWB04XJpeElvsp+DCylKfF4trJDa2Y9Go8O6A7YLX2LIKGcNK/CYImJN6ZP4DcuOHB4Utl3iCbnR62DudA==",
"cpu": [ "cpu": [
"x64" "x64"
], ],
@ -541,10 +539,19 @@
"win32" "win32"
] ]
}, },
"node_modules/@sveltejs/acorn-typescript": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@sveltejs/acorn-typescript/-/acorn-typescript-1.0.5.tgz",
"integrity": "sha512-IwQk4yfwLdibDlrXVE04jTZYlLnwsTT2PIOQQGNLWfjavGifnk1JD1LcZjZaBTRcxZu2FfPfNLOE04DSu9lqtQ==",
"license": "MIT",
"peerDependencies": {
"acorn": "^8.9.0"
}
},
"node_modules/@sveltestrap/sveltestrap": { "node_modules/@sveltestrap/sveltestrap": {
"version": "7.0.3", "version": "7.1.0",
"resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-7.0.3.tgz", "resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-7.1.0.tgz",
"integrity": "sha512-lvZpVlq7pHVxJbjq2d6JAAr/Z1mkSaPOw3pwpZiuQ9FK97/Pr66m5Bf9qZIc1FUkLnbNiDtRAbhVyR8LVdr3FQ==", "integrity": "sha512-TpIx25kqLV+z+VD3yfqYayOI1IaCeWFbT0uqM6NfA4vQgDs9PjFwmjkU4YEAlV/ngs9e7xPmaRWE7lkrg4Miow==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@popperjs/core": "^2.11.8" "@popperjs/core": "^2.11.8"
@ -561,9 +568,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/estree": { "node_modules/@types/estree": {
"version": "1.0.6", "version": "1.0.7",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz",
"integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/resolve": { "node_modules/@types/resolve": {
@ -574,9 +581,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@urql/core": { "node_modules/@urql/core": {
"version": "5.1.0", "version": "5.1.1",
"resolved": "https://registry.npmjs.org/@urql/core/-/core-5.1.0.tgz", "resolved": "https://registry.npmjs.org/@urql/core/-/core-5.1.1.tgz",
"integrity": "sha512-yC3sw8yqjbX45GbXxfiBY8GLYCiyW/hLBbQF9l3TJrv4ro00Y0ChkKaD9I2KntRxAVm9IYBqh0awX8fwWAe/Yw==", "integrity": "sha512-aGh024z5v2oINGD/In6rAtVKTm4VmQ2TxKQBAtk2ZSME5dunZFcjltw4p5ENQg+5CBhZ3FHMzl0Oa+rwqiWqlg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@0no-co/graphql.web": "^1.0.5", "@0no-co/graphql.web": "^1.0.5",
@ -584,12 +591,12 @@
} }
}, },
"node_modules/@urql/svelte": { "node_modules/@urql/svelte": {
"version": "4.2.2", "version": "4.2.3",
"resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.2.tgz", "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.2.3.tgz",
"integrity": "sha512-6ntLGsWcnNtaMZVmFpePfFTSpYxYpznCAqnuvLDjt7Oa7YqHcFiyPnz7IIsiPD9VE6hZSi0+RwmRk5BMba/teQ==", "integrity": "sha512-v3eArfymhdjaM5VQFp3QZxq9veYPadmDfX7ueid/kD4DlRplIycPakJ2FrKigh46SXa5mWqJ3QWuWyRKVu61sw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@urql/core": "^5.0.0", "@urql/core": "^5.1.1",
"wonka": "^6.3.2" "wonka": "^6.3.2"
}, },
"peerDependencies": { "peerDependencies": {
@ -598,9 +605,9 @@
} }
}, },
"node_modules/acorn": { "node_modules/acorn": {
"version": "8.14.0", "version": "8.14.1",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz",
"integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==",
"license": "MIT", "license": "MIT",
"bin": { "bin": {
"acorn": "bin/acorn" "acorn": "bin/acorn"
@ -609,36 +616,6 @@
"node": ">=0.4.0" "node": ">=0.4.0"
} }
}, },
"node_modules/acorn-typescript": {
"version": "1.4.13",
"resolved": "https://registry.npmjs.org/acorn-typescript/-/acorn-typescript-1.4.13.tgz",
"integrity": "sha512-xsc9Xv0xlVfwp2o7sQ+GCQ1PgbkdcpWdTzrwXxO3xDMTAywVS3oXVOcOHuRjAPkS4P9b+yc/qNF15460v+jp4Q==",
"license": "MIT",
"peerDependencies": {
"acorn": ">=8.9.0"
}
},
"node_modules/adm-zip": {
"version": "0.5.16",
"resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.5.16.tgz",
"integrity": "sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==",
"license": "MIT",
"engines": {
"node": ">=12.0"
}
},
"node_modules/agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
"integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"license": "MIT",
"dependencies": {
"debug": "4"
},
"engines": {
"node": ">= 6.0.0"
}
},
"node_modules/aria-query": { "node_modules/aria-query": {
"version": "5.3.2", "version": "5.3.2",
"resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz",
@ -648,23 +625,6 @@
"node": ">= 0.4" "node": ">= 0.4"
} }
}, },
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"license": "MIT"
},
"node_modules/axios": {
"version": "1.7.9",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz",
"integrity": "sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw==",
"license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
},
"node_modules/axobject-query": { "node_modules/axobject-query": {
"version": "4.1.0", "version": "4.1.0",
"resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz",
@ -682,9 +642,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/chart.js": { "node_modules/chart.js": {
"version": "4.4.7", "version": "4.4.9",
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.7.tgz", "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.9.tgz",
"integrity": "sha512-pwkcKfdzTMAU/+jNosKhNL2bHtJc/sSmYgVbuGTEDhzkrhmyihmP7vUc/5ZK9WopidMDHNe3Wm7jOd/WhuHWuw==", "integrity": "sha512-EyZ9wWKgpAU0fLJ43YAEIF8sr5F2W3LqbS40ZJyHIner2lY14ufqv2VMp69MAiZ2rpwxEUxEhIH/0U3xyRynxg==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@kurkle/color": "^0.3.0" "@kurkle/color": "^0.3.0"
@ -702,18 +662,6 @@
"node": ">=6" "node": ">=6"
} }
}, },
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"license": "MIT",
"dependencies": {
"delayed-stream": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/commander": { "node_modules/commander": {
"version": "2.20.3", "version": "2.20.3",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
@ -751,23 +699,6 @@
"url": "https://github.com/sponsors/kossnocorp" "url": "https://github.com/sponsors/kossnocorp"
} }
}, },
"node_modules/debug": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
"integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decimal.js": { "node_modules/decimal.js": {
"version": "10.5.0", "version": "10.5.0",
"resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz", "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz",
@ -784,15 +715,6 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"license": "MIT",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/escape-latex": { "node_modules/escape-latex": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/escape-latex/-/escape-latex-1.2.0.tgz", "resolved": "https://registry.npmjs.org/escape-latex/-/escape-latex-1.2.0.tgz",
@ -806,9 +728,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/esrap": { "node_modules/esrap": {
"version": "1.4.3", "version": "1.4.6",
"resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.3.tgz", "resolved": "https://registry.npmjs.org/esrap/-/esrap-1.4.6.tgz",
"integrity": "sha512-Xddc1RsoFJ4z9nR7W7BFaEPIp4UXoeQ0+077UdWLxbafMQFyU79sQJMk7kxNgRwQ9/aVgaKacCHC2pUACGwmYw==", "integrity": "sha512-F/D2mADJ9SHY3IwksD4DAXjTt7qt7GWUf3/8RhCNWmC/67tyb55dpimHmy7EplakFaflV0R/PC+fdSPqrRHAQw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@jridgewell/sourcemap-codec": "^1.4.15" "@jridgewell/sourcemap-codec": "^1.4.15"
@ -821,9 +743,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/fdir": { "node_modules/fdir": {
"version": "6.4.3", "version": "6.4.4",
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.3.tgz", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz",
"integrity": "sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==", "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
@ -835,44 +757,10 @@
} }
} }
}, },
"node_modules/follow-redirects": {
"version": "1.15.9",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz",
"integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"license": "MIT",
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/form-data": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
"integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/fraction.js": { "node_modules/fraction.js": {
"version": "5.2.1", "version": "5.2.2",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.2.1.tgz", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.2.2.tgz",
"integrity": "sha512-Ah6t/7YCYjrPUFUFsOsRLMXAdnYM+aQwmojD2Ayb/Ezr82SwES0vuyQ8qZ3QO8n9j7W14VJuVZZet8U3bhSdQQ==", "integrity": "sha512-uXBDv5knpYmv/2gLzWQ5mBHGBRk9wcKTeWu6GLTUEQfjCxO09uM/mHDrojlL+Q1mVGIIFo149Gba7od1XPgSzQ==",
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 12" "node": ">= 12"
@ -907,22 +795,10 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/get-port": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/get-port/-/get-port-1.0.0.tgz",
"integrity": "sha512-vg59F3kcXBOtcIijwtdAyCxFocyv/fVkGQvw1kVGrxFO1U4SSGkGjrbASg5DN3TVekVle/jltwOjYRnZWc1YdA==",
"license": "MIT",
"bin": {
"get-port": "cli.js"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/graphql": { "node_modules/graphql": {
"version": "16.10.0", "version": "16.11.0",
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.10.0.tgz", "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.11.0.tgz",
"integrity": "sha512-AjqGKbDGUFRKIRCP9tCKiIGHyriz2oHEbPIbEtcSLSs4YjReZOIPQQWek4+6hjw62H9QShXHyaGivGiYVLeYFQ==", "integrity": "sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==",
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
@ -941,19 +817,6 @@
"node": ">= 0.4" "node": ">= 0.4"
} }
}, },
"node_modules/https-proxy-agent": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
"integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
"license": "MIT",
"dependencies": {
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/is-core-module": { "node_modules/is-core-module": {
"version": "2.16.1", "version": "2.16.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
@ -1009,13 +872,12 @@
} }
}, },
"node_modules/mathjs": { "node_modules/mathjs": {
"version": "14.2.0", "version": "14.4.0",
"resolved": "https://registry.npmjs.org/mathjs/-/mathjs-14.2.0.tgz", "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-14.4.0.tgz",
"integrity": "sha512-CcJV1cQwRSrQIAAX3sWejFPUvUsQnTZYisEEuoMBw3gMDJDQzvKQlrul/vjKAbdtW7zaDzPCl04h1sf0wh41TA==", "integrity": "sha512-CpoYDhNENefjIG9wU9epr+0pBHzlaySfpWcblZdAf5qXik/j/U8eSmx/oNbmXO0F5PyfwPGVD/wK4VWsTho1SA==",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@babel/runtime": "^7.25.7", "@babel/runtime": "^7.26.10",
"@lambdatest/node-tunnel": "^4.0.8",
"complex.js": "^2.2.5", "complex.js": "^2.2.5",
"decimal.js": "^10.4.3", "decimal.js": "^10.4.3",
"escape-latex": "^1.2.0", "escape-latex": "^1.2.0",
@ -1032,33 +894,6 @@
"node": ">= 18" "node": ">= 18"
} }
}, },
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/path-parse": { "node_modules/path-parse": {
"version": "1.0.7", "version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
@ -1078,12 +913,6 @@
"url": "https://github.com/sponsors/jonschlinkert" "url": "https://github.com/sponsors/jonschlinkert"
} }
}, },
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/randombytes": { "node_modules/randombytes": {
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
@ -1094,12 +923,6 @@
"safe-buffer": "^5.1.0" "safe-buffer": "^5.1.0"
} }
}, },
"node_modules/regenerator-runtime": {
"version": "0.14.1",
"resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz",
"integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==",
"license": "MIT"
},
"node_modules/resolve": { "node_modules/resolve": {
"version": "1.22.10", "version": "1.22.10",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz",
@ -1132,13 +955,13 @@
} }
}, },
"node_modules/rollup": { "node_modules/rollup": {
"version": "4.34.1", "version": "4.40.2",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.1.tgz", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.40.2.tgz",
"integrity": "sha512-iYZ/+PcdLYSGfH3S+dGahlW/RWmsqDhLgj1BT9DH/xXJ0ggZN7xkdP9wipPNjjNLczI+fmMLmTB9pye+d2r4GQ==", "integrity": "sha512-tfUOg6DTP4rhQ3VjOO6B4wyrJnGOX85requAXvqYTHsOgb2TFJdZ3aWpT8W2kPoypSGP7dZUyzxJ9ee4buM5Fg==",
"devOptional": true, "devOptional": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@types/estree": "1.0.6" "@types/estree": "1.0.7"
}, },
"bin": { "bin": {
"rollup": "dist/bin/rollup" "rollup": "dist/bin/rollup"
@ -1148,25 +971,26 @@
"npm": ">=8.0.0" "npm": ">=8.0.0"
}, },
"optionalDependencies": { "optionalDependencies": {
"@rollup/rollup-android-arm-eabi": "4.34.1", "@rollup/rollup-android-arm-eabi": "4.40.2",
"@rollup/rollup-android-arm64": "4.34.1", "@rollup/rollup-android-arm64": "4.40.2",
"@rollup/rollup-darwin-arm64": "4.34.1", "@rollup/rollup-darwin-arm64": "4.40.2",
"@rollup/rollup-darwin-x64": "4.34.1", "@rollup/rollup-darwin-x64": "4.40.2",
"@rollup/rollup-freebsd-arm64": "4.34.1", "@rollup/rollup-freebsd-arm64": "4.40.2",
"@rollup/rollup-freebsd-x64": "4.34.1", "@rollup/rollup-freebsd-x64": "4.40.2",
"@rollup/rollup-linux-arm-gnueabihf": "4.34.1", "@rollup/rollup-linux-arm-gnueabihf": "4.40.2",
"@rollup/rollup-linux-arm-musleabihf": "4.34.1", "@rollup/rollup-linux-arm-musleabihf": "4.40.2",
"@rollup/rollup-linux-arm64-gnu": "4.34.1", "@rollup/rollup-linux-arm64-gnu": "4.40.2",
"@rollup/rollup-linux-arm64-musl": "4.34.1", "@rollup/rollup-linux-arm64-musl": "4.40.2",
"@rollup/rollup-linux-loongarch64-gnu": "4.34.1", "@rollup/rollup-linux-loongarch64-gnu": "4.40.2",
"@rollup/rollup-linux-powerpc64le-gnu": "4.34.1", "@rollup/rollup-linux-powerpc64le-gnu": "4.40.2",
"@rollup/rollup-linux-riscv64-gnu": "4.34.1", "@rollup/rollup-linux-riscv64-gnu": "4.40.2",
"@rollup/rollup-linux-s390x-gnu": "4.34.1", "@rollup/rollup-linux-riscv64-musl": "4.40.2",
"@rollup/rollup-linux-x64-gnu": "4.34.1", "@rollup/rollup-linux-s390x-gnu": "4.40.2",
"@rollup/rollup-linux-x64-musl": "4.34.1", "@rollup/rollup-linux-x64-gnu": "4.40.2",
"@rollup/rollup-win32-arm64-msvc": "4.34.1", "@rollup/rollup-linux-x64-musl": "4.40.2",
"@rollup/rollup-win32-ia32-msvc": "4.34.1", "@rollup/rollup-win32-arm64-msvc": "4.40.2",
"@rollup/rollup-win32-x64-msvc": "4.34.1", "@rollup/rollup-win32-ia32-msvc": "4.40.2",
"@rollup/rollup-win32-x64-msvc": "4.40.2",
"fsevents": "~2.3.2" "fsevents": "~2.3.2"
} }
}, },
@ -1296,18 +1120,6 @@
"source-map": "^0.6.0" "source-map": "^0.6.0"
} }
}, },
"node_modules/split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"license": "MIT",
"dependencies": {
"through": "2"
},
"engines": {
"node": "*"
}
},
"node_modules/supports-preserve-symlinks-flag": { "node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
@ -1322,21 +1134,21 @@
} }
}, },
"node_modules/svelte": { "node_modules/svelte": {
"version": "5.19.6", "version": "5.28.6",
"resolved": "https://registry.npmjs.org/svelte/-/svelte-5.19.6.tgz", "resolved": "https://registry.npmjs.org/svelte/-/svelte-5.28.6.tgz",
"integrity": "sha512-6ydekB3qyqUal+UhfMjmVOjRGtxysR8vuiMhi2nwuBtPJWnctVlsGspjVFB05qmR+TXI1emuqtZt81c0XiFleA==", "integrity": "sha512-9qqr7mw8YR9PAnxGFfzCK6PUlNGtns7wVavrhnxyf3fpB1mP/Ol55Z2UnIapsSzNNl3k9qw7cZ22PdE8+xT/jQ==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@ampproject/remapping": "^2.3.0", "@ampproject/remapping": "^2.3.0",
"@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/sourcemap-codec": "^1.5.0",
"@sveltejs/acorn-typescript": "^1.0.5",
"@types/estree": "^1.0.5", "@types/estree": "^1.0.5",
"acorn": "^8.12.1", "acorn": "^8.12.1",
"acorn-typescript": "^1.4.13",
"aria-query": "^5.3.1", "aria-query": "^5.3.1",
"axobject-query": "^4.1.0", "axobject-query": "^4.1.0",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"esm-env": "^1.2.1", "esm-env": "^1.2.1",
"esrap": "^1.4.3", "esrap": "^1.4.6",
"is-reference": "^3.0.3", "is-reference": "^3.0.3",
"locate-character": "^3.0.0", "locate-character": "^3.0.0",
"magic-string": "^0.30.11", "magic-string": "^0.30.11",
@ -1356,9 +1168,9 @@
} }
}, },
"node_modules/terser": { "node_modules/terser": {
"version": "5.37.0", "version": "5.39.1",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.37.0.tgz", "resolved": "https://registry.npmjs.org/terser/-/terser-5.39.1.tgz",
"integrity": "sha512-B8wRRkmre4ERucLM/uXx4MOV5cbnOlVAqUst+1+iLKPI0dOgFO28f84ptoQt9HEI537PMzfYa/d+GEPKTRXmYA==", "integrity": "sha512-Mm6+uad0ZuDtcV8/4uOZQDQ8RuiC5Pu+iZRedJtF7yA/27sPL7d++In/AJKpWZlU3SYMPPkVfwetn6sgZ66pUA==",
"dev": true, "dev": true,
"license": "BSD-2-Clause", "license": "BSD-2-Clause",
"dependencies": { "dependencies": {
@ -1374,12 +1186,6 @@
"node": ">=10" "node": ">=10"
} }
}, },
"node_modules/through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==",
"license": "MIT"
},
"node_modules/tiny-emitter": { "node_modules/tiny-emitter": {
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz",
@ -1396,15 +1202,15 @@
} }
}, },
"node_modules/uplot": { "node_modules/uplot": {
"version": "1.6.31", "version": "1.6.32",
"resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.31.tgz", "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.32.tgz",
"integrity": "sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==", "integrity": "sha512-KIMVnG68zvu5XXUbC4LQEPnhwOxBuLyW1AHtpm6IKTXImkbLgkMy+jabjLgSLMasNuGGzQm/ep3tOkyTxpiQIw==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/wonka": { "node_modules/wonka": {
"version": "6.3.4", "version": "6.3.5",
"resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.4.tgz", "resolved": "https://registry.npmjs.org/wonka/-/wonka-6.3.5.tgz",
"integrity": "sha512-CjpbqNtBGNAeyNS/9W6q3kSkKE52+FjIj7AkFlLr11s/VWGUu6a2CdYSdGxocIhIVjaW/zchesBQUKPVU69Cqg==", "integrity": "sha512-SSil+ecw6B4/Dm7Pf2sAshKQ5hWFvfyGlfPbEd6A14dOH6VDjrmbY86u6nZvy9omGwwIPFR8V41+of1EezgoUw==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/zimmerframe": { "node_modules/zimmerframe": {

View File

@ -62,6 +62,7 @@ export default [
entrypoint('jobs', 'src/jobs.entrypoint.js'), entrypoint('jobs', 'src/jobs.entrypoint.js'),
entrypoint('user', 'src/user.entrypoint.js'), entrypoint('user', 'src/user.entrypoint.js'),
entrypoint('list', 'src/list.entrypoint.js'), entrypoint('list', 'src/list.entrypoint.js'),
entrypoint('taglist', 'src/tags.entrypoint.js'),
entrypoint('job', 'src/job.entrypoint.js'), entrypoint('job', 'src/job.entrypoint.js'),
entrypoint('systems', 'src/systems.entrypoint.js'), entrypoint('systems', 'src/systems.entrypoint.js'),
entrypoint('node', 'src/node.entrypoint.js'), entrypoint('node', 'src/node.entrypoint.js'),

View File

@ -26,6 +26,8 @@
init, init,
convert2uplot, convert2uplot,
binsFromFootprint, binsFromFootprint,
scramble,
scrambleNames,
} from "./generic/utils.js"; } from "./generic/utils.js";
import PlotSelection from "./analysis/PlotSelection.svelte"; import PlotSelection from "./analysis/PlotSelection.svelte";
import Filters from "./generic/Filters.svelte"; import Filters from "./generic/Filters.svelte";
@ -396,7 +398,7 @@
quantities={$topQuery.data.topList.map( quantities={$topQuery.data.topList.map(
(t) => t[sortSelection.key], (t) => t[sortSelection.key],
)} )}
entities={$topQuery.data.topList.map((t) => t.id)} entities={$topQuery.data.topList.map((t) => scrambleNames ? scramble(t.id) : t.id)}
/> />
{/if} {/if}
{/key} {/key}
@ -429,21 +431,21 @@
{#if groupSelection.key == "user"} {#if groupSelection.key == "user"}
<th scope="col" id="topName-{te.id}" <th scope="col" id="topName-{te.id}"
><a href="/monitoring/user/{te.id}?cluster={clusterName}" ><a href="/monitoring/user/{te.id}?cluster={clusterName}"
>{te.id}</a >{scrambleNames ? scramble(te.id) : te.id}</a
></th ></th
> >
{#if te?.name} {#if te?.name}
<Tooltip <Tooltip
target={`topName-${te.id}`} target={`topName-${te.id}`}
placement="left" placement="left"
>{te.name}</Tooltip >{scrambleNames ? scramble(te.name) : te.name}</Tooltip
> >
{/if} {/if}
{:else} {:else}
<th scope="col" <th scope="col"
><a ><a
href="/monitoring/jobs/?cluster={clusterName}&project={te.id}&projectMatch=eq" href="/monitoring/jobs/?cluster={clusterName}&project={te.id}&projectMatch=eq"
>{te.id}</a >{scrambleNames ? scramble(te.id) : te.id}</a
></th ></th
> >
{/if} {/if}

View File

@ -40,7 +40,7 @@
import JobRoofline from "./job/JobRoofline.svelte"; import JobRoofline from "./job/JobRoofline.svelte";
import EnergySummary from "./job/EnergySummary.svelte"; import EnergySummary from "./job/EnergySummary.svelte";
import PlotGrid from "./generic/PlotGrid.svelte"; import PlotGrid from "./generic/PlotGrid.svelte";
import StatsTable from "./job/StatsTable.svelte"; import StatsTab from "./job/StatsTab.svelte";
export let dbid; export let dbid;
export let username; export let username;
@ -53,10 +53,8 @@
let isMetricsSelectionOpen = false, let isMetricsSelectionOpen = false,
selectedMetrics = [], selectedMetrics = [],
selectedScopes = []; selectedScopes = [],
plots = {};
let plots = {},
statsTable
let availableMetrics = new Set(), let availableMetrics = new Set(),
missingMetrics = [], missingMetrics = [],
@ -127,19 +125,16 @@
let job = $initq.data.job; let job = $initq.data.job;
if (!job) return; if (!job) return;
const pendingMetrics = [ const pendingMetrics = (
...(ccconfig[`job_view_selectedMetrics:${job.cluster}`] || ccconfig[`job_view_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
$initq.data.globalMetrics.reduce((names, gm) => { ccconfig[`job_view_selectedMetrics:${job.cluster}`]
if (gm.availability.find((av) => av.cluster === job.cluster)) { ) ||
names.push(gm.name); $initq.data.globalMetrics.reduce((names, gm) => {
} if (gm.availability.find((av) => av.cluster === job.cluster && av.subClusters.includes(job.subCluster))) {
return names; names.push(gm.name);
}, []) }
), return names;
...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || }, [])
ccconfig[`job_view_nodestats_selectedMetrics`]
),
];
// Select default Scopes to load: Check before if any metric has accelerator scope by default // Select default Scopes to load: Check before if any metric has accelerator scope by default
const accScopeDefault = [...pendingMetrics].some(function (m) { const accScopeDefault = [...pendingMetrics].some(function (m) {
@ -222,7 +217,7 @@
<Col xs={12} md={6} xl={3} class="mb-3 mb-xxl-0"> <Col xs={12} md={6} xl={3} class="mb-3 mb-xxl-0">
{#if $initq.error} {#if $initq.error}
<Card body color="danger">{$initq.error.message}</Card> <Card body color="danger">{$initq.error.message}</Card>
{:else if $initq.data} {:else if $initq?.data}
<Card class="overflow-auto" style="height: 400px;"> <Card class="overflow-auto" style="height: 400px;">
<TabContent> <!-- on:tab={(e) => (status = e.detail)} --> <TabContent> <!-- on:tab={(e) => (status = e.detail)} -->
{#if $initq.data?.job?.metaData?.message} {#if $initq.data?.job?.metaData?.message}
@ -296,7 +291,7 @@
<Card class="mb-3"> <Card class="mb-3">
<CardBody> <CardBody>
<Row class="mb-2"> <Row class="mb-2">
{#if $initq.data} {#if $initq?.data}
<Col xs="auto"> <Col xs="auto">
<Button outline on:click={() => (isMetricsSelectionOpen = true)} color="primary"> <Button outline on:click={() => (isMetricsSelectionOpen = true)} color="primary">
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available) Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
@ -309,7 +304,7 @@
{#if $jobMetrics.error} {#if $jobMetrics.error}
<Row class="mt-2"> <Row class="mt-2">
<Col> <Col>
{#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2} {#if $initq?.data && ($initq.data.job?.monitoringStatus == 0 || $initq.data.job?.monitoringStatus == 2)}
<Card body color="warning">Not monitored or archiving failed</Card> <Card body color="warning">Not monitored or archiving failed</Card>
<br /> <br />
{/if} {/if}
@ -334,7 +329,6 @@
{#if item.data} {#if item.data}
<Metric <Metric
bind:this={plots[item.metric]} bind:this={plots[item.metric]}
on:more-loaded={({ detail }) => statsTable.moreLoaded(detail)}
job={$initq.data.job} job={$initq.data.job}
metricName={item.metric} metricName={item.metric}
metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit} metricUnit={$initq.data.globalMetrics.find((gm) => gm.name == item.metric)?.unit}
@ -343,10 +337,25 @@
scopes={item.data.map((x) => x.scope)} scopes={item.data.map((x) => x.scope)}
isShared={$initq.data.job.exclusive != 1} isShared={$initq.data.job.exclusive != 1}
/> />
{:else if item.disabled == true}
<Card color="info">
<CardHeader class="mb-0">
<b>Disabled Metric</b>
</CardHeader>
<CardBody>
<p>Metric <b>{item.metric}</b> is disabled for subcluster <b>{$initq.data.job.subCluster}</b>.</p>
<p class="mb-1">To remove this card, open metric selection and press "Close and Apply".</p>
</CardBody>
</Card>
{:else} {:else}
<Card body color="warning" class="mt-2" <Card color="warning" class="mt-2">
>No dataset returned for <code>{item.metric}</code></Card <CardHeader class="mb-0">
> <b>Missing Metric</b>
</CardHeader>
<CardBody>
<p class="mb-1">No dataset returned for <b>{item.metric}</b>.</p>
</CardBody>
</Card>
{/if} {/if}
</PlotGrid> </PlotGrid>
{/if} {/if}
@ -356,7 +365,7 @@
<!-- Statistcics Table --> <!-- Statistcics Table -->
<Row class="mb-3"> <Row class="mb-3">
<Col> <Col>
{#if $initq.data} {#if $initq?.data}
<Card> <Card>
<TabContent> <TabContent>
{#if somethingMissing} {#if somethingMissing}
@ -389,22 +398,8 @@
</div> </div>
</TabPane> </TabPane>
{/if} {/if}
<TabPane <!-- Includes <TabPane> Statistics Table with Independent GQL Query -->
tabId="stats" <StatsTab job={$initq.data.job} clusters={$initq.data.clusters} tabActive={!somethingMissing}/>
tab="Statistics Table"
class="overflow-x-auto"
active={!somethingMissing}
>
{#if $jobMetrics?.data?.jobMetrics}
{#key $jobMetrics.data.jobMetrics}
<StatsTable
bind:this={statsTable}
job={$initq.data.job}
jobMetrics={$jobMetrics.data.jobMetrics}
/>
{/key}
{/if}
</TabPane>
<TabPane tabId="job-script" tab="Job Script"> <TabPane tabId="job-script" tab="Job Script">
<div class="pre-wrapper"> <div class="pre-wrapper">
{#if $initq.data.job.metaData?.jobScript} {#if $initq.data.job.metaData?.jobScript}
@ -431,9 +426,10 @@
</Col> </Col>
</Row> </Row>
{#if $initq.data} {#if $initq?.data}
<MetricSelection <MetricSelection
cluster={$initq.data.job.cluster} cluster={$initq.data.job.cluster}
subCluster={$initq.data.job.subCluster}
configName="job_view_selectedMetrics" configName="job_view_selectedMetrics"
bind:metrics={selectedMetrics} bind:metrics={selectedMetrics}
bind:isOpen={isMetricsSelectionOpen} bind:isOpen={isMetricsSelectionOpen}

View File

@ -21,6 +21,7 @@
import { init } from "./generic/utils.js"; import { init } from "./generic/utils.js";
import Filters from "./generic/Filters.svelte"; import Filters from "./generic/Filters.svelte";
import JobList from "./generic/JobList.svelte"; import JobList from "./generic/JobList.svelte";
import JobCompare from "./generic/JobCompare.svelte";
import TextFilter from "./generic/helper/TextFilter.svelte"; import TextFilter from "./generic/helper/TextFilter.svelte";
import Refresher from "./generic/helper/Refresher.svelte"; import Refresher from "./generic/helper/Refresher.svelte";
import Sorting from "./generic/select/SortSelection.svelte"; import Sorting from "./generic/select/SortSelection.svelte";
@ -35,8 +36,12 @@
export let roles; export let roles;
let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the let filterComponent; // see why here: https://stackoverflow.com/questions/58287729/how-can-i-export-a-function-from-a-svelte-component-that-changes-a-value-in-the
let filterBuffer = [];
let selectedJobs = [];
let jobList, let jobList,
matchedJobs = null; jobCompare,
matchedListJobs,
matchedCompareJobs = null;
let sorting = { field: "startTime", type: "col", order: "DESC" }, let sorting = { field: "startTime", type: "col", order: "DESC" },
isSortingOpen = false, isSortingOpen = false,
isMetricsSelectionOpen = false; isMetricsSelectionOpen = false;
@ -49,11 +54,16 @@
: !!ccconfig.plot_list_showFootprint; : !!ccconfig.plot_list_showFootprint;
let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null; let selectedCluster = filterPresets?.cluster ? filterPresets.cluster : null;
let presetProject = filterPresets?.project ? filterPresets.project : "" let presetProject = filterPresets?.project ? filterPresets.project : ""
let showCompare = false;
// The filterPresets are handled by the Filters component, // The filterPresets are handled by the Filters component,
// so we need to wait for it to be ready before we can start a query. // so we need to wait for it to be ready before we can start a query.
// This is also why JobList component starts out with a paused query. // This is also why JobList component starts out with a paused query.
onMount(() => filterComponent.updateFilters()); onMount(() => filterComponent.updateFilters());
$: if (filterComponent && selectedJobs.length == 0) {
filterComponent.updateFilters({dbId: []})
}
</script> </script>
<!-- ROW1: Status--> <!-- ROW1: Status-->
@ -72,10 +82,10 @@
{/if} {/if}
<!-- ROW2: Tools--> <!-- ROW2: Tools-->
<Row cols={{ xs: 1, md: 2, lg: 4}} class="mb-3"> <Row cols={{ xs: 1, md: 2, lg: 5}} class="mb-3">
<Col lg="2" class="mb-2 mb-lg-0"> <Col lg="2" class="mb-2 mb-lg-0">
<ButtonGroup class="w-100"> <ButtonGroup class="w-100">
<Button outline color="primary" on:click={() => (isSortingOpen = true)}> <Button outline color="primary" on:click={() => (isSortingOpen = true)} disabled={showCompare}>
<Icon name="sort-up" /> Sorting <Icon name="sort-up" /> Sorting
</Button> </Button>
<Button <Button
@ -87,49 +97,88 @@
</Button> </Button>
</ButtonGroup> </ButtonGroup>
</Col> </Col>
<Col lg="4" xl="{(presetProject !== '') ? 5 : 6}" class="mb-1 mb-lg-0"> <Col lg="4" class="mb-1 mb-lg-0">
<Filters <Filters
showFilter={!showCompare}
{filterPresets} {filterPresets}
{matchedJobs} matchedJobs={showCompare? matchedCompareJobs: matchedListJobs}
bind:this={filterComponent} bind:this={filterComponent}
on:update-filters={({ detail }) => { on:update-filters={({ detail }) => {
selectedCluster = detail.filters[0]?.cluster selectedCluster = detail.filters[0]?.cluster
? detail.filters[0].cluster.eq ? detail.filters[0].cluster.eq
: null; : null;
jobList.queryJobs(detail.filters); filterBuffer = [...detail.filters]
if (showCompare) {
jobCompare.queryJobs(detail.filters);
} else {
jobList.queryJobs(detail.filters);
}
}} }}
/> />
</Col> </Col>
<Col lg="3" xl="{(presetProject !== '') ? 3 : 2}" class="mb-2 mb-lg-0"> <Col lg="2" class="mb-2 mb-lg-0">
<TextFilter {#if !showCompare}
{presetProject} <TextFilter
bind:authlevel {presetProject}
bind:roles bind:authlevel
on:set-filter={({ detail }) => filterComponent.updateFilters(detail)} bind:roles
/> on:set-filter={({ detail }) => filterComponent.updateFilters(detail)}
/>
{/if}
</Col> </Col>
<Col lg="3" xl="2" class="mb-1 mb-lg-0"> <Col lg="2" class="mb-1 mb-lg-0">
<Refresher on:refresh={() => { {#if !showCompare}
jobList.refreshJobs() <Refresher on:refresh={() => {
jobList.refreshAllMetrics() jobList.refreshJobs()
}} /> jobList.refreshAllMetrics()
}} />
{/if}
</Col>
<Col lg="2" class="mb-2 mb-lg-0">
<ButtonGroup class="w-100">
<Button color="primary" disabled={matchedListJobs >= 500 && !(selectedJobs.length != 0)} on:click={() => {
if (selectedJobs.length != 0) filterComponent.updateFilters({dbId: selectedJobs}, true)
showCompare = !showCompare
}} >
{showCompare ? 'Return to List' :
'Compare Jobs' + (selectedJobs.length != 0 ? ` (${selectedJobs.length} selected)` : matchedListJobs >= 500 ? ` (Too Many)` : ``)}
</Button>
{#if !showCompare && selectedJobs.length != 0}
<Button color="warning" on:click={() => {
selectedJobs = [] // Only empty array, filters handled by reactive reset
}}>
Clear
</Button>
{/if}
</ButtonGroup>
</Col> </Col>
</Row> </Row>
<!-- ROW3: Job List--> <!-- ROW3: Job List / Job Compare-->
<Row> <Row>
<Col> <Col>
<JobList {#if !showCompare}
bind:this={jobList} <JobList
bind:metrics bind:this={jobList}
bind:sorting bind:metrics
bind:matchedJobs bind:sorting
bind:showFootprint bind:matchedListJobs
/> bind:showFootprint
bind:selectedJobs
{filterBuffer}
/>
{:else}
<JobCompare
bind:this={jobCompare}
bind:metrics
bind:matchedCompareJobs
{filterBuffer}
/>
{/if}
</Col> </Col>
</Row> </Row>
<Sorting bind:sorting bind:isOpen={isSortingOpen} /> <Sorting bind:sorting bind:isOpen={isSortingOpen}/>
<MetricSelection <MetricSelection
bind:cluster={selectedCluster} bind:cluster={selectedCluster}
@ -137,5 +186,5 @@
bind:metrics bind:metrics
bind:isOpen={isMetricsSelectionOpen} bind:isOpen={isMetricsSelectionOpen}
bind:showFootprint bind:showFootprint
footprintSelect={true} footprintSelect
/> />

View File

@ -31,6 +31,8 @@
init, init,
convert2uplot, convert2uplot,
transformPerNodeDataForRoofline, transformPerNodeDataForRoofline,
scramble,
scrambleNames,
} from "./generic/utils.js"; } from "./generic/utils.js";
import { scaleNumbers } from "./generic/units.js"; import { scaleNumbers } from "./generic/units.js";
import PlotGrid from "./generic/PlotGrid.svelte"; import PlotGrid from "./generic/PlotGrid.svelte";
@ -487,7 +489,7 @@
quantities={$topUserQuery.data.topUser.map( quantities={$topUserQuery.data.topUser.map(
(tu) => tu[topUserSelection.key], (tu) => tu[topUserSelection.key],
)} )}
entities={$topUserQuery.data.topUser.map((tu) => tu.id)} entities={$topUserQuery.data.topUser.map((tu) => scrambleNames ? scramble(tu.id) : tu.id)}
/> />
{/if} {/if}
{/key} {/key}
@ -521,14 +523,14 @@
<th scope="col" id="topName-{tu.id}" <th scope="col" id="topName-{tu.id}"
><a ><a
href="/monitoring/user/{tu.id}?cluster={cluster}&state=running" href="/monitoring/user/{tu.id}?cluster={cluster}&state=running"
>{tu.id}</a >{scrambleNames ? scramble(tu.id) : tu.id}</a
></th ></th
> >
{#if tu?.name} {#if tu?.name}
<Tooltip <Tooltip
target={`topName-${tu.id}`} target={`topName-${tu.id}`}
placement="left" placement="left"
>{tu.name}</Tooltip >{scrambleNames ? scramble(tu.name) : tu.name}</Tooltip
> >
{/if} {/if}
<td>{tu[topUserSelection.key]}</td> <td>{tu[topUserSelection.key]}</td>
@ -555,7 +557,7 @@
quantities={$topProjectQuery.data.topProjects.map( quantities={$topProjectQuery.data.topProjects.map(
(tp) => tp[topProjectSelection.key], (tp) => tp[topProjectSelection.key],
)} )}
entities={$topProjectQuery.data.topProjects.map((tp) => tp.id)} entities={$topProjectQuery.data.topProjects.map((tp) => scrambleNames ? scramble(tp.id) : tp.id)}
/> />
{/if} {/if}
{/key} {/key}
@ -588,7 +590,7 @@
<th scope="col" <th scope="col"
><a ><a
href="/monitoring/jobs/?cluster={cluster}&state=running&project={tp.id}&projectMatch=eq" href="/monitoring/jobs/?cluster={cluster}&state=running&project={tp.id}&projectMatch=eq"
>{tp.id}</a >{scrambleNames ? scramble(tp.id) : tp.id}</a
></th ></th
> >
<td>{tp[topProjectSelection.key]}</td> <td>{tp[topProjectSelection.key]}</td>

View File

@ -29,8 +29,8 @@
import Refresher from "./generic/helper/Refresher.svelte"; import Refresher from "./generic/helper/Refresher.svelte";
export let displayType; export let displayType;
export let cluster; export let cluster = null;
export let subCluster = ""; export let subCluster = null;
export let from = null; export let from = null;
export let to = null; export let to = null;
@ -60,7 +60,10 @@
let hostnameFilter = ""; let hostnameFilter = "";
let pendingHostnameFilter = ""; let pendingHostnameFilter = "";
let selectedMetric = ccconfig.system_view_selectedMetric || ""; let selectedMetric = ccconfig.system_view_selectedMetric || "";
let selectedMetrics = ccconfig[`node_list_selectedMetrics:${cluster}`] || [ccconfig.system_view_selectedMetric]; let selectedMetrics = (
ccconfig[`node_list_selectedMetrics:${cluster}:${subCluster}`] ||
ccconfig[`node_list_selectedMetrics:${cluster}`]
) || [ccconfig.system_view_selectedMetric];
let isMetricsSelectionOpen = false; let isMetricsSelectionOpen = false;
/* /*
@ -191,6 +194,7 @@
<MetricSelection <MetricSelection
{cluster} {cluster}
{subCluster}
configName="node_list_selectedMetrics" configName="node_list_selectedMetrics"
metrics={selectedMetrics} metrics={selectedMetrics}
bind:isOpen={isMetricsSelectionOpen} bind:isOpen={isMetricsSelectionOpen}

View File

@ -0,0 +1,110 @@
<!--
@component Tag List Svelte Component. Displays All Tags, Allows deletion.
Properties:
- `username String!`: Users username.
- `isAdmin Bool!`: User has Admin Auth.
- `tagmap Object!`: Map of accessible, appwide tags. Prefiltered in backend.
-->
<script>
import {
gql,
getContextClient,
mutationStore,
} from "@urql/svelte";
import {
Badge,
InputGroup,
Icon,
Button,
Spinner,
} from "@sveltestrap/sveltestrap";
import {
init,
} from "./generic/utils.js";
export let username;
export let isAdmin;
export let tagmap;
const {} = init();
const client = getContextClient();
let pendingChange = "none";
const removeTagMutation = ({ tagIds }) => {
return mutationStore({
client: client,
query: gql`
mutation ($tagIds: [ID!]!) {
removeTagFromList(tagIds: $tagIds)
}
`,
variables: { tagIds },
});
};
function removeTag(tag, tagType) {
if (confirm("Are you sure you want to completely remove this tag?\n\n" + tagType + ':' + tag.name)) {
pendingChange = tagType;
removeTagMutation({tagIds: [tag.id] }).subscribe(
(res) => {
if (res.fetching === false && !res.error) {
tagmap[tagType] = tagmap[tagType].filter((t) => !res.data.removeTagFromList.includes(t.id));
if (tagmap[tagType].length === 0) {
delete tagmap[tagType]
}
pendingChange = "none";
} else if (res.fetching === false && res.error) {
throw res.error;
}
},
);
}
}
</script>
<div class="container">
<div class="row justify-content-center">
<div class="col-10">
{#each Object.entries(tagmap) as [tagType, tagList]}
<div class="my-3 p-2 bg-secondary rounded text-white"> <!-- text-capitalize -->
Tag Type: <b>{tagType}</b>
{#if pendingChange === tagType}
<Spinner size="sm" secondary />
{/if}
<span style="float: right; padding-bottom: 0.4rem; padding-top: 0.4rem;" class="badge bg-light text-secondary">
{tagList.length} Tag{(tagList.length != 1)?'s':''}
</span>
</div>
<div class="d-inline-flex flex-wrap">
{#each tagList as tag (tag.id)}
<InputGroup class="w-auto flex-nowrap" style="margin-right: 0.5rem; margin-bottom: 0.5rem;">
<Button outline color="secondary" href="/monitoring/jobs/?tag={tag.id}" target="_blank">
<Badge color="light" style="font-size:medium;" border>{tag.name}</Badge> :
<Badge color="primary" pill>{tag.count} Job{(tag.count != 1)?'s':''}</Badge>
{#if tag.scope == "global"}
<Badge style="background-color:#c85fc8 !important;" pill>Global</Badge>
{:else if tag.scope == "admin"}
<Badge style="background-color:#19e5e6 !important;" pill>Admin</Badge>
{:else}
<Badge color="warning" pill>Private</Badge>
{/if}
</Button>
{#if (isAdmin && (tag.scope == "admin" || tag.scope == "global")) || tag.scope == username }
<Button
size="sm"
color="danger"
on:click={() => removeTag(tag, tagType)}
>
<Icon name="x" />
</Button>
{/if}
</InputGroup>
{/each}
</div>
{/each}
</div>
</div>
</div>

View File

@ -352,7 +352,7 @@
bind:metrics bind:metrics
bind:isOpen={isMetricsSelectionOpen} bind:isOpen={isMetricsSelectionOpen}
bind:showFootprint bind:showFootprint
footprintSelect={true} footprintSelect
/> />
<HistogramSelection <HistogramSelection

View File

@ -44,11 +44,59 @@
export let disableClusterSelection = false; export let disableClusterSelection = false;
export let startTimeQuickSelect = false; export let startTimeQuickSelect = false;
export let matchedJobs = -2; export let matchedJobs = -2;
export let showFilter = true;
const startTimeSelectOptions = [
{ range: "", rangeLabel: "No Selection"},
{ range: "last6h", rangeLabel: "Last 6hrs"},
{ range: "last24h", rangeLabel: "Last 24hrs"},
{ range: "last7d", rangeLabel: "Last 7 days"},
{ range: "last30d", rangeLabel: "Last 30 days"}
];
const nodeMatchLabels = {
eq: "",
contains: " Contains",
}
const filterReset = {
projectMatch: "contains",
userMatch: "contains",
jobIdMatch: "eq",
nodeMatch: "eq",
cluster: null,
partition: null,
states: allJobStates,
startTime: { from: null, to: null, range: ""},
tags: [],
duration: {
lessThan: null,
moreThan: null,
from: null,
to: null,
},
dbId: [],
jobId: "",
arrayJobId: null,
user: "",
project: "",
jobName: "",
node: null,
energy: { from: null, to: null },
numNodes: { from: null, to: null },
numHWThreads: { from: null, to: null },
numAccelerators: { from: null, to: null },
stats: [],
};
let filters = { let filters = {
projectMatch: filterPresets.projectMatch || "contains", projectMatch: filterPresets.projectMatch || "contains",
userMatch: filterPresets.userMatch || "contains", userMatch: filterPresets.userMatch || "contains",
jobIdMatch: filterPresets.jobIdMatch || "eq", jobIdMatch: filterPresets.jobIdMatch || "eq",
nodeMatch: filterPresets.nodeMatch || "eq",
cluster: filterPresets.cluster || null, cluster: filterPresets.cluster || null,
partition: filterPresets.partition || null, partition: filterPresets.partition || null,
@ -56,7 +104,7 @@
filterPresets.states || filterPresets.state filterPresets.states || filterPresets.state
? [filterPresets.state].flat() ? [filterPresets.state].flat()
: allJobStates, : allJobStates,
startTime: filterPresets.startTime || { from: null, to: null }, startTime: filterPresets.startTime || { from: null, to: null, range: ""},
tags: filterPresets.tags || [], tags: filterPresets.tags || [],
duration: filterPresets.duration || { duration: filterPresets.duration || {
lessThan: null, lessThan: null,
@ -64,6 +112,7 @@
from: null, from: null,
to: null, to: null,
}, },
dbId: filterPresets.dbId || [],
jobId: filterPresets.jobId || "", jobId: filterPresets.jobId || "",
arrayJobId: filterPresets.arrayJobId || null, arrayJobId: filterPresets.arrayJobId || null,
user: filterPresets.user || "", user: filterPresets.user || "",
@ -92,13 +141,20 @@
isAccsModified = false; isAccsModified = false;
// Can be called from the outside to trigger a 'update' event from this component. // Can be called from the outside to trigger a 'update' event from this component.
export function updateFilters(additionalFilters = null) { // 'force' option empties existing filters and then applies only 'additionalFilters'
if (additionalFilters != null) export function updateFilters(additionalFilters = null, force = false) {
// Empty Current Filter For Force
if (additionalFilters != null && force) {
filters = {...filterReset}
}
// Add Additional Filters
if (additionalFilters != null) {
for (let key in additionalFilters) filters[key] = additionalFilters[key]; for (let key in additionalFilters) filters[key] = additionalFilters[key];
}
// Construct New Filter
let items = []; let items = [];
if (filters.cluster) items.push({ cluster: { eq: filters.cluster } }); if (filters.cluster) items.push({ cluster: { eq: filters.cluster } });
if (filters.node) items.push({ node: { contains: filters.node } }); if (filters.node) items.push({ node: { [filters.nodeMatch]: filters.node } });
if (filters.partition) items.push({ partition: { eq: filters.partition } }); if (filters.partition) items.push({ partition: { eq: filters.partition } });
if (filters.states.length != allJobStates.length) if (filters.states.length != allJobStates.length)
items.push({ state: filters.states }); items.push({ state: filters.states });
@ -123,6 +179,8 @@
items.push({ items.push({
energy: { from: filters.energy.from, to: filters.energy.to }, energy: { from: filters.energy.from, to: filters.energy.to },
}); });
if (filters.dbId.length != 0)
items.push({ dbId: filters.dbId });
if (filters.jobId) if (filters.jobId)
items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } }); items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } });
if (filters.arrayJobId != null) if (filters.arrayJobId != null)
@ -166,10 +224,12 @@
function changeURL() { function changeURL() {
const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000); const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000);
let opts = []; let opts = [];
if (filters.cluster) opts.push(`cluster=${filters.cluster}`); if (filters.cluster) opts.push(`cluster=${filters.cluster}`);
if (filters.node) opts.push(`node=${filters.node}`); if (filters.node) opts.push(`node=${filters.node}`);
if (filters.node && filters.nodeMatch != "eq") // "eq" is default-case
opts.push(`nodeMatch=${filters.nodeMatch}`);
if (filters.partition) opts.push(`partition=${filters.partition}`); if (filters.partition) opts.push(`partition=${filters.partition}`);
if (filters.states.length != allJobStates.length) if (filters.states.length != allJobStates.length)
for (let state of filters.states) opts.push(`state=${state}`); for (let state of filters.states) opts.push(`state=${state}`);
@ -180,6 +240,11 @@
if (filters.startTime.range) { if (filters.startTime.range) {
opts.push(`startTime=${filters.startTime.range}`) opts.push(`startTime=${filters.startTime.range}`)
} }
if (filters.dbId.length != 0) {
for (let dbi of filters.dbId) {
opts.push(`dbId=${dbi}`);
}
}
if (filters.jobId.length != 0) if (filters.jobId.length != 0)
if (filters.jobIdMatch != "in") { if (filters.jobIdMatch != "in") {
opts.push(`jobId=${filters.jobId}`); opts.push(`jobId=${filters.jobId}`);
@ -188,7 +253,7 @@
opts.push(`jobId=${singleJobId}`); opts.push(`jobId=${singleJobId}`);
} }
if (filters.jobIdMatch != "eq") if (filters.jobIdMatch != "eq")
opts.push(`jobIdMatch=${filters.jobIdMatch}`); opts.push(`jobIdMatch=${filters.jobIdMatch}`); // "eq" is default-case
for (let tag of filters.tags) opts.push(`tag=${tag}`); for (let tag of filters.tags) opts.push(`tag=${tag}`);
if (filters.duration.from && filters.duration.to) if (filters.duration.from && filters.duration.to)
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`); opts.push(`duration=${filters.duration.from}-${filters.duration.to}`);
@ -210,19 +275,19 @@
} else { } else {
for (let singleUser of filters.user) opts.push(`user=${singleUser}`); for (let singleUser of filters.user) opts.push(`user=${singleUser}`);
} }
if (filters.userMatch != "contains") if (filters.userMatch != "contains") // "contains" is default-case
opts.push(`userMatch=${filters.userMatch}`); opts.push(`userMatch=${filters.userMatch}`);
if (filters.project) opts.push(`project=${filters.project}`); if (filters.project) opts.push(`project=${filters.project}`);
if (filters.project && filters.projectMatch != "contains") // "contains" is default-case
opts.push(`projectMatch=${filters.projectMatch}`);
if (filters.jobName) opts.push(`jobName=${filters.jobName}`); if (filters.jobName) opts.push(`jobName=${filters.jobName}`);
if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`); if (filters.arrayJobId) opts.push(`arrayJobId=${filters.arrayJobId}`);
if (filters.project && filters.projectMatch != "contains")
opts.push(`projectMatch=${filters.projectMatch}`);
if (filters.stats.length != 0) if (filters.stats.length != 0)
for (let stat of filters.stats) { for (let stat of filters.stats) {
opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`); opts.push(`stat=${stat.field}-${stat.from}-${stat.to}`);
} }
if (opts.length == 0 && window.location.search.length <= 1) return;
if (opts.length == 0 && window.location.search.length <= 1) return;
let newurl = `${window.location.pathname}?${opts.join("&")}`; let newurl = `${window.location.pathname}?${opts.join("&")}`;
window.history.replaceState(null, "", newurl); window.history.replaceState(null, "", newurl);
} }
@ -230,59 +295,63 @@
<!-- Dropdown-Button --> <!-- Dropdown-Button -->
<ButtonGroup> <ButtonGroup>
<ButtonDropdown class="cc-dropdown-on-hover mb-1" style="{(matchedJobs >= -1) ? '' : 'margin-right: 0.5rem;'}"> {#if showFilter}
<DropdownToggle outline caret color="success"> <ButtonDropdown class="cc-dropdown-on-hover mb-1" style="{(matchedJobs >= -1) ? '' : 'margin-right: 0.5rem;'}">
<Icon name="sliders" /> <DropdownToggle outline caret color="success">
Filters <Icon name="sliders" />
</DropdownToggle> Filters
<DropdownMenu> </DropdownToggle>
<DropdownItem header>Manage Filters</DropdownItem> <DropdownMenu>
{#if menuText} <DropdownItem header>Manage Filters</DropdownItem>
<DropdownItem disabled>{menuText}</DropdownItem> {#if menuText}
<DropdownItem divider /> <DropdownItem disabled>{menuText}</DropdownItem>
{/if} <DropdownItem divider />
<DropdownItem on:click={() => (isClusterOpen = true)}> {/if}
<Icon name="cpu" /> Cluster/Partition <DropdownItem on:click={() => (isClusterOpen = true)}>
</DropdownItem> <Icon name="cpu" /> Cluster/Partition
<DropdownItem on:click={() => (isJobStatesOpen = true)}> </DropdownItem>
<Icon name="gear-fill" /> Job States <DropdownItem on:click={() => (isJobStatesOpen = true)}>
</DropdownItem> <Icon name="gear-fill" /> Job States
<DropdownItem on:click={() => (isStartTimeOpen = true)}> </DropdownItem>
<Icon name="calendar-range" /> Start Time <DropdownItem on:click={() => (isStartTimeOpen = true)}>
</DropdownItem> <Icon name="calendar-range" /> Start Time
<DropdownItem on:click={() => (isDurationOpen = true)}> </DropdownItem>
<Icon name="stopwatch" /> Duration <DropdownItem on:click={() => (isDurationOpen = true)}>
</DropdownItem> <Icon name="stopwatch" /> Duration
<DropdownItem on:click={() => (isTagsOpen = true)}> </DropdownItem>
<Icon name="tags" /> Tags <DropdownItem on:click={() => (isTagsOpen = true)}>
</DropdownItem> <Icon name="tags" /> Tags
<DropdownItem on:click={() => (isResourcesOpen = true)}> </DropdownItem>
<Icon name="hdd-stack" /> Resources <DropdownItem on:click={() => (isResourcesOpen = true)}>
</DropdownItem> <Icon name="hdd-stack" /> Resources
<DropdownItem on:click={() => (isEnergyOpen = true)}> </DropdownItem>
<Icon name="lightning-charge-fill" /> Energy <DropdownItem on:click={() => (isEnergyOpen = true)}>
</DropdownItem> <Icon name="lightning-charge-fill" /> Energy
<DropdownItem on:click={() => (isStatsOpen = true)}> </DropdownItem>
<Icon name="bar-chart" on:click={() => (isStatsOpen = true)} /> Statistics <DropdownItem on:click={() => (isStatsOpen = true)}>
</DropdownItem> <Icon name="bar-chart" on:click={() => (isStatsOpen = true)} /> Statistics
{#if startTimeQuickSelect} </DropdownItem>
<DropdownItem divider /> {#if startTimeQuickSelect}
<DropdownItem disabled>Start Time Quick Selection</DropdownItem> <DropdownItem divider />
{#each [{ text: "Last 6hrs", range: "last6h" }, { text: "Last 24hrs", range: "last24h" }, { text: "Last 7 days", range: "last7d" }, { text: "Last 30 days", range: "last30d" }] as { text, range }} <DropdownItem disabled>Start Time Quick Selection</DropdownItem>
<DropdownItem {#each startTimeSelectOptions.filter((stso) => stso.range !== "") as { rangeLabel, range }}
on:click={() => { <DropdownItem
filters.startTime.range = range; on:click={() => {
filters.startTime.text = text; filters.startTime.from = null
updateFilters(); filters.startTime.to = null
}} filters.startTime.range = range;
> updateFilters();
<Icon name="calendar-range" /> }}
{text} >
</DropdownItem> <Icon name="calendar-range" />
{/each} {rangeLabel}
{/if} </DropdownItem>
</DropdownMenu> {/each}
</ButtonDropdown> {/if}
</DropdownMenu>
</ButtonDropdown>
{/if}
{#if matchedJobs >= -1} {#if matchedJobs >= -1}
<Button class="mb-1" style="margin-right: 0.5rem;" disabled outline> <Button class="mb-1" style="margin-right: 0.5rem;" disabled outline>
{matchedJobs == -1 ? 'Loading ...' : `${matchedJobs} jobs`} {matchedJobs == -1 ? 'Loading ...' : `${matchedJobs} jobs`}
@ -290,109 +359,111 @@
{/if} {/if}
</ButtonGroup> </ButtonGroup>
<!-- SELECTED FILTER PILLS --> {#if showFilter}
{#if filters.cluster} <!-- SELECTED FILTER PILLS -->
<Info icon="cpu" on:click={() => (isClusterOpen = true)}> {#if filters.cluster}
{filters.cluster} <Info icon="cpu" on:click={() => (isClusterOpen = true)}>
{#if filters.partition} {filters.cluster}
({filters.partition}) {#if filters.partition}
{/if} ({filters.partition})
</Info> {/if}
{/if} </Info>
{/if}
{#if filters.states.length != allJobStates.length} {#if filters.states.length != allJobStates.length}
<Info icon="gear-fill" on:click={() => (isJobStatesOpen = true)}> <Info icon="gear-fill" on:click={() => (isJobStatesOpen = true)}>
{filters.states.join(", ")} {filters.states.join(", ")}
</Info> </Info>
{/if} {/if}
{#if filters.startTime.from || filters.startTime.to} {#if filters.startTime.from || filters.startTime.to}
<Info icon="calendar-range" on:click={() => (isStartTimeOpen = true)}> <Info icon="calendar-range" on:click={() => (isStartTimeOpen = true)}>
{new Date(filters.startTime.from).toLocaleString()} - {new Date( {new Date(filters.startTime.from).toLocaleString()} - {new Date(
filters.startTime.to, filters.startTime.to,
).toLocaleString()} ).toLocaleString()}
</Info> </Info>
{/if} {/if}
{#if filters.startTime.range} {#if filters.startTime.range}
<Info icon="calendar-range" on:click={() => (isStartTimeOpen = true)}> <Info icon="calendar-range" on:click={() => (isStartTimeOpen = true)}>
{filters?.startTime?.text ? filters.startTime.text : filters.startTime.range } {startTimeSelectOptions.find((stso) => stso.range === filters.startTime.range).rangeLabel }
</Info> </Info>
{/if} {/if}
{#if filters.duration.from || filters.duration.to} {#if filters.duration.from || filters.duration.to}
<Info icon="stopwatch" on:click={() => (isDurationOpen = true)}> <Info icon="stopwatch" on:click={() => (isDurationOpen = true)}>
{Math.floor(filters.duration.from / 3600)}h:{Math.floor( {Math.floor(filters.duration.from / 3600)}h:{Math.floor(
(filters.duration.from % 3600) / 60, (filters.duration.from % 3600) / 60,
)}m - )}m -
{Math.floor(filters.duration.to / 3600)}h:{Math.floor( {Math.floor(filters.duration.to / 3600)}h:{Math.floor(
(filters.duration.to % 3600) / 60, (filters.duration.to % 3600) / 60,
)}m )}m
</Info> </Info>
{/if} {/if}
{#if filters.duration.lessThan} {#if filters.duration.lessThan}
<Info icon="stopwatch" on:click={() => (isDurationOpen = true)}> <Info icon="stopwatch" on:click={() => (isDurationOpen = true)}>
Duration less than {Math.floor( Duration less than {Math.floor(
filters.duration.lessThan / 3600, filters.duration.lessThan / 3600,
)}h:{Math.floor((filters.duration.lessThan % 3600) / 60)}m )}h:{Math.floor((filters.duration.lessThan % 3600) / 60)}m
</Info> </Info>
{/if} {/if}
{#if filters.duration.moreThan} {#if filters.duration.moreThan}
<Info icon="stopwatch" on:click={() => (isDurationOpen = true)}> <Info icon="stopwatch" on:click={() => (isDurationOpen = true)}>
Duration more than {Math.floor( Duration more than {Math.floor(
filters.duration.moreThan / 3600, filters.duration.moreThan / 3600,
)}h:{Math.floor((filters.duration.moreThan % 3600) / 60)}m )}h:{Math.floor((filters.duration.moreThan % 3600) / 60)}m
</Info> </Info>
{/if} {/if}
{#if filters.tags.length != 0} {#if filters.tags.length != 0}
<Info icon="tags" on:click={() => (isTagsOpen = true)}> <Info icon="tags" on:click={() => (isTagsOpen = true)}>
{#each filters.tags as tagId} {#each filters.tags as tagId}
{#key tagId} {#key tagId}
<Tag id={tagId} clickable={false} /> <Tag id={tagId} clickable={false} />
{/key} {/key}
{/each} {/each}
</Info> </Info>
{/if} {/if}
{#if filters.numNodes.from != null || filters.numNodes.to != null || filters.numHWThreads.from != null || filters.numHWThreads.to != null || filters.numAccelerators.from != null || filters.numAccelerators.to != null} {#if filters.numNodes.from != null || filters.numNodes.to != null || filters.numHWThreads.from != null || filters.numHWThreads.to != null || filters.numAccelerators.from != null || filters.numAccelerators.to != null}
<Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}> <Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}>
{#if isNodesModified} {#if isNodesModified}
Nodes: {filters.numNodes.from} - {filters.numNodes.to} Nodes: {filters.numNodes.from} - {filters.numNodes.to}
{/if} {/if}
{#if isNodesModified && isHwthreadsModified}, {#if isNodesModified && isHwthreadsModified},
{/if} {/if}
{#if isHwthreadsModified} {#if isHwthreadsModified}
HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to} HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to}
{/if} {/if}
{#if (isNodesModified || isHwthreadsModified) && isAccsModified}, {#if (isNodesModified || isHwthreadsModified) && isAccsModified},
{/if} {/if}
{#if isAccsModified} {#if isAccsModified}
Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to} Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to}
{/if} {/if}
</Info> </Info>
{/if} {/if}
{#if filters.node != null} {#if filters.node != null}
<Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}> <Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}>
Node: {filters.node} Node{nodeMatchLabels[filters.nodeMatch]}: {filters.node}
</Info> </Info>
{/if} {/if}
{#if filters.energy.from || filters.energy.to} {#if filters.energy.from || filters.energy.to}
<Info icon="lightning-charge-fill" on:click={() => (isEnergyOpen = true)}> <Info icon="lightning-charge-fill" on:click={() => (isEnergyOpen = true)}>
Total Energy: {filters.energy.from} - {filters.energy.to} Total Energy: {filters.energy.from} - {filters.energy.to}
</Info> </Info>
{/if} {/if}
{#if filters.stats.length > 0} {#if filters.stats.length > 0}
<Info icon="bar-chart" on:click={() => (isStatsOpen = true)}> <Info icon="bar-chart" on:click={() => (isStatsOpen = true)}>
{filters.stats {filters.stats
.map((stat) => `${stat.field}: ${stat.from} - ${stat.to}`) .map((stat) => `${stat.field}: ${stat.from} - ${stat.to}`)
.join(", ")} .join(", ")}
</Info> </Info>
{/if}
{/if} {/if}
<Cluster <Cluster
@ -414,11 +485,8 @@
bind:from={filters.startTime.from} bind:from={filters.startTime.from}
bind:to={filters.startTime.to} bind:to={filters.startTime.to}
bind:range={filters.startTime.range} bind:range={filters.startTime.range}
on:set-filter={() => { {startTimeSelectOptions}
delete filters.startTime["text"]; on:set-filter={() => updateFilters()}
delete filters.startTime["range"];
updateFilters();
}}
/> />
<Duration <Duration
@ -443,6 +511,7 @@
bind:numHWThreads={filters.numHWThreads} bind:numHWThreads={filters.numHWThreads}
bind:numAccelerators={filters.numAccelerators} bind:numAccelerators={filters.numAccelerators}
bind:namedNode={filters.node} bind:namedNode={filters.node}
bind:nodeMatch={filters.nodeMatch}
bind:isNodesModified bind:isNodesModified
bind:isHwthreadsModified bind:isHwthreadsModified
bind:isAccsModified bind:isAccsModified

View File

@ -0,0 +1,394 @@
<!--
@component jobCompare component; compares jobs according to set filters or job selection
Properties:
- `matchedJobs Number?`: Number of matched jobs for selected filters [Default: 0]
- `metrics [String]?`: The currently selected metrics [Default: User-Configured Selection]
- `showFootprint Bool`: If to display the jobFootprint component
Functions:
- `queryJobs(filters?: [JobFilter])`: Load jobs data with new filters, starts from page 1
-->
<script>
import { getContext } from "svelte";
import uPlot from "uplot";
import {
queryStore,
gql,
getContextClient,
// mutationStore,
} from "@urql/svelte";
import { Row, Col, Card, Spinner, Table, Input, InputGroup, InputGroupText, Icon } from "@sveltestrap/sveltestrap";
import { formatTime, roundTwoDigits } from "./units.js";
import Comparogram from "./plots/Comparogram.svelte";
const ccconfig = getContext("cc-config"),
// initialized = getContext("initialized"),
globalMetrics = getContext("globalMetrics");
export let matchedCompareJobs = 0;
export let metrics = ccconfig.plot_list_selectedMetrics;
export let filterBuffer = [];
let filter = [...filterBuffer] || [];
let comparePlotData = {};
let compareTableData = [];
let compareTableSorting = {};
let jobIds = [];
let jobClusters = [];
let tableJobIDFilter = "";
/*uPlot*/
let plotSync = uPlot.sync("compareJobsView");
/* GQL */
const client = getContextClient();
// Pull All Series For Metrics Statistics Only On Node Scope
const compareQuery = gql`
query ($filter: [JobFilter!]!, $metrics: [String!]!) {
jobsMetricStats(filter: $filter, metrics: $metrics) {
id
jobId
startTime
duration
cluster
subCluster
numNodes
numHWThreads
numAccelerators
stats {
name
data {
min
avg
max
}
}
}
}
`;
/* REACTIVES */
$: compareData = queryStore({
client: client,
query: compareQuery,
variables:{ filter, metrics },
});
$: matchedCompareJobs = $compareData.data != null ? $compareData.data.jobsMetricStats.length : -1;
$: if ($compareData.data != null) {
jobIds = [];
jobClusters = [];
comparePlotData = {};
compareTableData = [...$compareData.data.jobsMetricStats];
jobs2uplot($compareData.data.jobsMetricStats, metrics);
}
$: if ((!$compareData.fetching && !$compareData.error) && metrics) {
// Meta
compareTableSorting['meta'] = {
startTime: { dir: "down", active: true },
duration: { dir: "up", active: false },
cluster: { dir: "up", active: false },
};
// Resources
compareTableSorting['resources'] = {
Nodes: { dir: "up", active: false },
Threads: { dir: "up", active: false },
Accs: { dir: "up", active: false },
};
// Metrics
for (let metric of metrics) {
compareTableSorting[metric] = {
min: { dir: "up", active: false },
avg: { dir: "up", active: false },
max: { dir: "up", active: false },
};
}
}
/* FUNCTIONS */
// (Re-)query and optionally set new filters; Query will be started reactively.
export function queryJobs(filters) {
if (filters != null) {
let minRunningFor = ccconfig.plot_list_hideShortRunningJobs;
if (minRunningFor && minRunningFor > 0) {
filters.push({ minRunningFor });
}
filter = filters;
}
}
function sortBy(key, field) {
let s = compareTableSorting[key][field];
if (s.active) {
s.dir = s.dir == "up" ? "down" : "up";
} else {
for (let key in compareTableSorting)
for (let field in compareTableSorting[key]) compareTableSorting[key][field].active = false;
s.active = true;
}
compareTableSorting = { ...compareTableSorting };
if (key == 'resources') {
let longField = "";
switch (field) {
case "Nodes":
longField = "numNodes"
break
case "Threads":
longField = "numHWThreads"
break
case "Accs":
longField = "numAccelerators"
break
default:
console.log("Unknown Res Field", field)
}
compareTableData = compareTableData.sort((j1, j2) => {
if (j1[longField] == null || j2[longField] == null) return -1;
return s.dir != "up" ? j1[longField] - j2[longField] : j2[longField] - j1[longField];
});
} else if (key == 'meta') {
compareTableData = compareTableData.sort((j1, j2) => {
if (j1[field] == null || j2[field] == null) return -1;
if (field == 'cluster') {
let c1 = `${j1.cluster} (${j1.subCluster})`
let c2 = `${j2.cluster} (${j2.subCluster})`
return s.dir != "up" ? c1.localeCompare(c2) : c2.localeCompare(c1)
} else {
return s.dir != "up" ? j1[field] - j2[field] : j2[field] - j1[field];
}
});
} else {
compareTableData = compareTableData.sort((j1, j2) => {
let s1 = j1.stats.find((m) => m.name == key)?.data;
let s2 = j2.stats.find((m) => m.name == key)?.data;
if (s1 == null || s2 == null) return -1;
return s.dir != "up" ? s1[field] - s2[field] : s2[field] - s1[field];
});
}
}
function jobs2uplot(jobs, metrics) {
// Resources Init
comparePlotData['resources'] = {unit:'', data: [[],[],[],[],[],[]]} // data: [X, XST, XRT, YNODES, YTHREADS, YACCS]
// Metric Init
for (let m of metrics) {
// Get Unit
const rawUnit = globalMetrics.find((gm) => gm.name == m)?.unit
const metricUnit = (rawUnit?.prefix ? rawUnit.prefix : "") + (rawUnit?.base ? rawUnit.base : "")
comparePlotData[m] = {unit: metricUnit, data: [[],[],[],[],[],[]]} // data: [X, XST, XRT, YMIN, YAVG, YMAX]
}
// Iterate jobs if exists
if (jobs) {
let plotIndex = 0
jobs.forEach((j) => {
// Collect JobIDs & Clusters for X-Ticks and Legend
jobIds.push(j.jobId)
jobClusters.push(`${j.cluster} ${j.subCluster}`)
// Resources
comparePlotData['resources'].data[0].push(plotIndex)
comparePlotData['resources'].data[1].push(j.startTime)
comparePlotData['resources'].data[2].push(j.duration)
comparePlotData['resources'].data[3].push(j.numNodes)
comparePlotData['resources'].data[4].push(j?.numHWThreads?j.numHWThreads:0)
comparePlotData['resources'].data[5].push(j?.numAccelerators?j.numAccelerators:0)
// Metrics
for (let s of j.stats) {
comparePlotData[s.name].data[0].push(plotIndex)
comparePlotData[s.name].data[1].push(j.startTime)
comparePlotData[s.name].data[2].push(j.duration)
comparePlotData[s.name].data[3].push(s.data.min)
comparePlotData[s.name].data[4].push(s.data.avg)
comparePlotData[s.name].data[5].push(s.data.max)
}
plotIndex++
})
}
}
// Adapt for Persisting Job Selections in DB later down the line
// const updateConfigurationMutation = ({ name, value }) => {
// return mutationStore({
// client: client,
// query: gql`
// mutation ($name: String!, $value: String!) {
// updateConfiguration(name: $name, value: $value)
// }
// `,
// variables: { name, value },
// });
// };
// function updateConfiguration(value, page) {
// updateConfigurationMutation({
// name: "plot_list_jobsPerPage",
// value: value,
// }).subscribe((res) => {
// if (res.fetching === false && !res.error) {
// jobs = [] // Empty List
// paging = { itemsPerPage: value, page: page }; // Trigger reload of jobList
// } else if (res.fetching === false && res.error) {
// throw res.error;
// }
// });
// }
</script>
{#if $compareData.fetching}
<Row>
<Col>
<Spinner secondary />
</Col>
</Row>
{:else if $compareData.error}
<Row>
<Col>
<Card body color="danger" class="mb-3"
><h2>{$compareData.error.message}</h2></Card
>
</Col>
</Row>
{:else}
{#key comparePlotData}
<Row>
<Col>
<Comparogram
title={'Compare Resources'}
xlabel="JobIDs"
xticks={jobIds}
xinfo={jobClusters}
ylabel={'Resource Counts'}
data={comparePlotData['resources'].data}
{plotSync}
forResources
/>
</Col>
</Row>
{#each metrics as m}
<Row>
<Col>
<Comparogram
title={`Compare Metric '${m}'`}
xlabel="JobIDs"
xticks={jobIds}
xinfo={jobClusters}
ylabel={m}
metric={m}
yunit={comparePlotData[m].unit}
data={comparePlotData[m].data}
{plotSync}
/>
</Col>
</Row>
{/each}
{/key}
<hr/>
<Card>
<Table hover>
<thead>
<!-- Header Row 1 -->
<tr>
<th style="width:8%; max-width:10%;">JobID</th>
<th>StartTime</th>
<th>Duration</th>
<th>Cluster</th>
<th colspan="3">Resources</th>
{#each metrics as metric}
<th colspan="3">{metric} {comparePlotData[metric]?.unit? `(${comparePlotData[metric]?.unit})` : ''}</th>
{/each}
</tr>
<!-- Header Row 2: Fields -->
<tr>
<th>
<InputGroup size="sm">
<Input type="text" bind:value={tableJobIDFilter}/>
<InputGroupText>
<Icon name="search"></Icon>
</InputGroupText>
</InputGroup>
</th>
<th on:click={() => sortBy('meta', 'startTime')}>
Sort
<Icon
name="caret-{compareTableSorting['meta']['startTime'].dir}{compareTableSorting['meta']['startTime']
.active
? '-fill'
: ''}"
/>
</th>
<th on:click={() => sortBy('meta', 'duration')}>
Sort
<Icon
name="caret-{compareTableSorting['meta']['duration'].dir}{compareTableSorting['meta']['duration']
.active
? '-fill'
: ''}"
/>
</th>
<th on:click={() => sortBy('meta', 'cluster')}>
Sort
<Icon
name="caret-{compareTableSorting['meta']['cluster'].dir}{compareTableSorting['meta']['cluster']
.active
? '-fill'
: ''}"
/>
</th>
{#each ["Nodes", "Threads", "Accs"] as res}
<th on:click={() => sortBy('resources', res)}>
{res}
<Icon
name="caret-{compareTableSorting['resources'][res].dir}{compareTableSorting['resources'][res]
.active
? '-fill'
: ''}"
/>
</th>
{/each}
{#each metrics as metric}
{#each ["min", "avg", "max"] as stat}
<th on:click={() => sortBy(metric, stat)}>
{stat.charAt(0).toUpperCase() + stat.slice(1)}
<Icon
name="caret-{compareTableSorting[metric][stat].dir}{compareTableSorting[metric][stat]
.active
? '-fill'
: ''}"
/>
</th>
{/each}
{/each}
</tr>
</thead>
<tbody>
{#each compareTableData.filter((j) => j.jobId.includes(tableJobIDFilter)) as job (job.id)}
<tr>
<td><b><a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a></b></td>
<td>{new Date(job.startTime * 1000).toLocaleString()}</td>
<td>{formatTime(job.duration)}</td>
<td>{job.cluster} ({job.subCluster})</td>
<td>{job.numNodes}</td>
<td>{job.numHWThreads}</td>
<td>{job.numAccelerators}</td>
{#each metrics as metric}
<td>{roundTwoDigits(job.stats.find((s) => s.name == metric).data.min)}</td>
<td>{roundTwoDigits(job.stats.find((s) => s.name == metric).data.avg)}</td>
<td>{roundTwoDigits(job.stats.find((s) => s.name == metric).data.max)}</td>
{/each}
</tr>
{:else}
<tr>
<td colspan={7 + (metrics.length * 3)}><b>No jobs found.</b></td>
</tr>
{/each}
</tbody>
</Table>
</Card>
{/if}

View File

@ -35,15 +35,17 @@
} }
export let sorting = { field: "startTime", type: "col", order: "DESC" }; export let sorting = { field: "startTime", type: "col", order: "DESC" };
export let matchedJobs = 0; export let matchedListJobs = 0;
export let metrics = ccconfig.plot_list_selectedMetrics; export let metrics = ccconfig.plot_list_selectedMetrics;
export let showFootprint; export let showFootprint;
export let filterBuffer = [];
export let selectedJobs = [];
let usePaging = ccconfig.job_list_usePaging let usePaging = ccconfig.job_list_usePaging
let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10; let itemsPerPage = usePaging ? ccconfig.plot_list_jobsPerPage : 10;
let page = 1; let page = 1;
let paging = { itemsPerPage, page }; let paging = { itemsPerPage, page };
let filter = []; let filter = [...filterBuffer];
let lastFilter = []; let lastFilter = [];
let lastSorting = null; let lastSorting = null;
let triggerMetricRefresh = false; let triggerMetricRefresh = false;
@ -141,7 +143,7 @@
} }
} }
$: matchedJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : -1; $: matchedListJobs = $jobsStore.data != null ? $jobsStore.data.jobs.count : -1;
// Force refresh list with existing unchanged variables (== usually would not trigger reactivity) // Force refresh list with existing unchanged variables (== usually would not trigger reactivity)
export function refreshJobs() { export function refreshJobs() {
@ -284,7 +286,10 @@
</tr> </tr>
{:else} {:else}
{#each jobs as job (job)} {#each jobs as job (job)}
<JobListRow bind:triggerMetricRefresh {job} {metrics} {plotWidth} {showFootprint} /> <JobListRow bind:triggerMetricRefresh {job} {metrics} {plotWidth} {showFootprint} previousSelect={selectedJobs.includes(job.id)}
on:select-job={({detail}) => selectedJobs = [...selectedJobs, detail]}
on:unselect-job={({detail}) => selectedJobs = selectedJobs.filter(item => item !== detail)}
/>
{:else} {:else}
<tr> <tr>
<td colspan={metrics.length + 1}> No jobs found </td> <td colspan={metrics.length + 1}> No jobs found </td>
@ -310,7 +315,7 @@
bind:page bind:page
{itemsPerPage} {itemsPerPage}
itemText="Jobs" itemText="Jobs"
totalItems={matchedJobs} totalItems={matchedListJobs}
on:update-paging={({ detail }) => { on:update-paging={({ detail }) => {
if (detail.itemsPerPage != itemsPerPage) { if (detail.itemsPerPage != itemsPerPage) {
updateConfiguration(detail.itemsPerPage.toString(), detail.page); updateConfiguration(detail.itemsPerPage.toString(), detail.page);

View File

@ -24,6 +24,7 @@
ModalBody, ModalBody,
ModalHeader, ModalHeader,
ModalFooter, ModalFooter,
Input
} from "@sveltestrap/sveltestrap"; } from "@sveltestrap/sveltestrap";
import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte"; import DoubleRangeSlider from "../select/DoubleRangeSlider.svelte";
@ -40,11 +41,18 @@
export let isHwthreadsModified = false; export let isHwthreadsModified = false;
export let isAccsModified = false; export let isAccsModified = false;
export let namedNode = null; export let namedNode = null;
export let nodeMatch = "eq"
let pendingNumNodes = numNodes, let pendingNumNodes = numNodes,
pendingNumHWThreads = numHWThreads, pendingNumHWThreads = numHWThreads,
pendingNumAccelerators = numAccelerators, pendingNumAccelerators = numAccelerators,
pendingNamedNode = namedNode; pendingNamedNode = namedNode,
pendingNodeMatch = nodeMatch;
const nodeMatchLabels = {
eq: "Equal To",
contains: "Contains",
}
const findMaxNumAccels = (clusters) => const findMaxNumAccels = (clusters) =>
clusters.reduce( clusters.reduce(
@ -145,7 +153,17 @@
<ModalHeader>Select number of utilized Resources</ModalHeader> <ModalHeader>Select number of utilized Resources</ModalHeader>
<ModalBody> <ModalBody>
<h6>Named Node</h6> <h6>Named Node</h6>
<input type="text" class="form-control" bind:value={pendingNamedNode} /> <div class="d-flex">
<Input type="text" class="w-75" bind:value={pendingNamedNode} />
<div class="mx-1"></div>
<Input type="select" class="w-25" bind:value={pendingNodeMatch}>
{#each Object.entries(nodeMatchLabels) as [nodeMatchKey, nodeMatchLabel]}
<option value={nodeMatchKey}>
{nodeMatchLabel}
</option>
{/each}
</Input>
</div>
<h6 style="margin-top: 1rem;">Number of Nodes</h6> <h6 style="margin-top: 1rem;">Number of Nodes</h6>
<DoubleRangeSlider <DoubleRangeSlider
on:change={({ detail }) => { on:change={({ detail }) => {
@ -215,11 +233,13 @@
to: pendingNumAccelerators.to, to: pendingNumAccelerators.to,
}; };
namedNode = pendingNamedNode; namedNode = pendingNamedNode;
nodeMatch = pendingNodeMatch;
dispatch("set-filter", { dispatch("set-filter", {
numNodes, numNodes,
numHWThreads, numHWThreads,
numAccelerators, numAccelerators,
namedNode, namedNode,
nodeMatch
}); });
}} }}
> >
@ -233,6 +253,7 @@
pendingNumHWThreads = { from: null, to: null }; pendingNumHWThreads = { from: null, to: null };
pendingNumAccelerators = { from: null, to: null }; pendingNumAccelerators = { from: null, to: null };
pendingNamedNode = null; pendingNamedNode = null;
pendingNodeMatch = null;
numNodes = { from: pendingNumNodes.from, to: pendingNumNodes.to }; numNodes = { from: pendingNumNodes.from, to: pendingNumNodes.to };
numHWThreads = { numHWThreads = {
from: pendingNumHWThreads.from, from: pendingNumHWThreads.from,
@ -246,11 +267,13 @@
isHwthreadsModified = false; isHwthreadsModified = false;
isAccsModified = false; isAccsModified = false;
namedNode = pendingNamedNode; namedNode = pendingNamedNode;
nodeMatch = pendingNodeMatch;
dispatch("set-filter", { dispatch("set-filter", {
numNodes, numNodes,
numHWThreads, numHWThreads,
numAccelerators, numAccelerators,
namedNode, namedNode,
nodeMatch
}); });
}}>Reset</Button }}>Reset</Button
> >

View File

@ -17,7 +17,6 @@
import { parse, format, sub } from "date-fns"; import { parse, format, sub } from "date-fns";
import { import {
Row, Row,
Col,
Button, Button,
Input, Input,
Modal, Modal,
@ -34,8 +33,7 @@
export let from = null; export let from = null;
export let to = null; export let to = null;
export let range = ""; export let range = "";
export let startTimeSelectOptions;
let pendingFrom, pendingTo;
const now = new Date(Date.now()); const now = new Date(Date.now());
const ago = sub(now, { months: 1 }); const ago = sub(now, { months: 1 });
@ -48,12 +46,24 @@
time: format(now, "HH:mm"), time: format(now, "HH:mm"),
}; };
function reset() { $: pendingFrom = (from == null) ? defaultFrom : fromRFC3339(from)
pendingFrom = from == null ? defaultFrom : fromRFC3339(from); $: pendingTo = (to == null) ? defaultTo : fromRFC3339(to)
pendingTo = to == null ? defaultTo : fromRFC3339(to); $: pendingRange = range
}
reset(); $: isModified =
(from != toRFC3339(pendingFrom) || to != toRFC3339(pendingTo, "59")) &&
(range != pendingRange) &&
!(
from == null &&
pendingFrom.date == "0000-00-00" &&
pendingFrom.time == "00:00"
) &&
!(
to == null &&
pendingTo.date == "0000-00-00" &&
pendingTo.time == "00:00"
) &&
!( range == "" && pendingRange == "");
function toRFC3339({ date, time }, secs = "00") { function toRFC3339({ date, time }, secs = "00") {
const parsedDate = parse( const parsedDate = parse(
@ -71,19 +81,6 @@
time: format(parsedDate, "HH:mm"), time: format(parsedDate, "HH:mm"),
}; };
} }
$: isModified =
(from != toRFC3339(pendingFrom) || to != toRFC3339(pendingTo, "59")) &&
!(
from == null &&
pendingFrom.date == "0000-00-00" &&
pendingFrom.time == "00:00"
) &&
!(
to == null &&
pendingTo.date == "0000-00-00" &&
pendingTo.time == "00:00"
);
</script> </script>
<Modal {isOpen} toggle={() => (isOpen = !isOpen)}> <Modal {isOpen} toggle={() => (isOpen = !isOpen)}>
@ -92,52 +89,82 @@
{#if range !== ""} {#if range !== ""}
<h4>Current Range</h4> <h4>Current Range</h4>
<Row> <Row>
<Col> <FormGroup class="col">
<Input type="text" value={range} disabled/> <Input type ="select" bind:value={pendingRange} >
</Col> {#each startTimeSelectOptions as { rangeLabel, range }}
<option label={rangeLabel} value={range}/>
{/each}
</Input>
</FormGroup>
</Row> </Row>
{/if} {/if}
<h4>From</h4> <h4>From</h4>
<Row> <Row>
<FormGroup class="col"> <FormGroup class="col">
<Input type="date" bind:value={pendingFrom.date} /> <Input type="date" bind:value={pendingFrom.date} disabled={pendingRange !== ""}/>
</FormGroup> </FormGroup>
<FormGroup class="col"> <FormGroup class="col">
<Input type="time" bind:value={pendingFrom.time} /> <Input type="time" bind:value={pendingFrom.time} disabled={pendingRange !== ""}/>
</FormGroup> </FormGroup>
</Row> </Row>
<h4>To</h4> <h4>To</h4>
<Row> <Row>
<FormGroup class="col"> <FormGroup class="col">
<Input type="date" bind:value={pendingTo.date} /> <Input type="date" bind:value={pendingTo.date} disabled={pendingRange !== ""}/>
</FormGroup> </FormGroup>
<FormGroup class="col"> <FormGroup class="col">
<Input type="time" bind:value={pendingTo.time} /> <Input type="time" bind:value={pendingTo.time} disabled={pendingRange !== ""}/>
</FormGroup> </FormGroup>
</Row> </Row>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>
<Button {#if pendingRange !== ""}
color="primary" <Button
disabled={pendingFrom.date == "0000-00-00" || color="warning"
pendingTo.date == "0000-00-00"} disabled={pendingRange === ""}
on:click={() => { on:click={() => {
isOpen = false; pendingRange = ""
from = toRFC3339(pendingFrom); }}
to = toRFC3339(pendingTo, "59"); >
dispatch("set-filter", { from, to }); Reset Range
}} </Button>
> <Button
Close & Apply color="primary"
</Button> disabled={pendingRange === ""}
on:click={() => {
isOpen = false;
from = null;
to = null;
range = pendingRange;
dispatch("set-filter", { from, to, range });
}}
>
Close & Apply Range
</Button>
{:else}
<Button
color="primary"
disabled={pendingFrom.date == "0000-00-00" ||
pendingTo.date == "0000-00-00"}
on:click={() => {
isOpen = false;
from = toRFC3339(pendingFrom);
to = toRFC3339(pendingTo, "59");
range = "";
dispatch("set-filter", { from, to, range });
}}
>
Close & Apply Dates
</Button>
{/if}
<Button <Button
color="danger" color="danger"
on:click={() => { on:click={() => {
isOpen = false; isOpen = false;
from = null; from = null;
to = null; to = null;
reset(); range = "";
dispatch("set-filter", { from, to }); dispatch("set-filter", { from, to, range });
}}>Reset</Button }}>Reset</Button
> >
<Button on:click={() => (isOpen = false)}>Close</Button> <Button on:click={() => (isOpen = false)}>Close</Button>

View File

@ -18,6 +18,8 @@
export let username = null; export let username = null;
export let authlevel= null; export let authlevel= null;
export let roles = null; export let roles = null;
export let isSelected = null;
export let showSelect = false;
function formatDuration(duration) { function formatDuration(duration) {
const hours = Math.floor(duration / 3600); const hours = Math.floor(duration / 3600);
@ -76,18 +78,39 @@
<a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a> <a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a>
({job.cluster}) ({job.cluster})
</span> </span>
<Button id={`${job.cluster}-${job.jobId}-clipboard`} outline color="secondary" size="sm" on:click={() => clipJobId(job.jobId)} > <span>
{#if displayCheck} {#if showSelect}
<Icon name="clipboard2-check-fill"/> <Button id={`${job.cluster}-${job.jobId}-select`} outline={!isSelected} color={isSelected? `success`: `secondary`} size="sm" class="mr-2"
{:else} on:click={() => {
<Icon name="clipboard2"/> isSelected = !isSelected
}}>
{#if isSelected}
<Icon name="check-square"/>
{:else if isSelected == false}
<Icon name="square"/>
{:else}
<Icon name="plus-square-dotted" />
{/if}
</Button>
<Tooltip
target={`${job.cluster}-${job.jobId}-select`}
placement="left">
{ 'Add or Remove Job to/from Comparison Selection' }
</Tooltip>
{/if} {/if}
</Button> <Button id={`${job.cluster}-${job.jobId}-clipboard`} outline color="secondary" size="sm" on:click={clipJobId(job.jobId)} >
<Tooltip {#if displayCheck}
target={`${job.cluster}-${job.jobId}-clipboard`} <Icon name="clipboard2-check-fill"/>
placement="right"> {:else}
{ displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' } <Icon name="clipboard2"/>
</Tooltip> {/if}
</Button>
<Tooltip
target={`${job.cluster}-${job.jobId}-clipboard`}
placement="right">
{ displayCheck ? 'Copied!' : 'Copy Job ID to Clipboard' }
</Tooltip>
</span>
</span> </span>
{#if job.metaData?.jobName} {#if job.metaData?.jobName}
{#if job.metaData?.jobName.length <= 25} {#if job.metaData?.jobName.length <= 25}

View File

@ -12,7 +12,7 @@
<script> <script>
import { queryStore, gql, getContextClient } from "@urql/svelte"; import { queryStore, gql, getContextClient } from "@urql/svelte";
import { getContext } from "svelte"; import { getContext, createEventDispatcher } from "svelte";
import { Card, Spinner } from "@sveltestrap/sveltestrap"; import { Card, Spinner } from "@sveltestrap/sveltestrap";
import { maxScope, checkMetricDisabled } from "../utils.js"; import { maxScope, checkMetricDisabled } from "../utils.js";
import JobInfo from "./JobInfo.svelte"; import JobInfo from "./JobInfo.svelte";
@ -25,7 +25,9 @@
export let plotHeight = 275; export let plotHeight = 275;
export let showFootprint; export let showFootprint;
export let triggerMetricRefresh = false; export let triggerMetricRefresh = false;
export let previousSelect = false;
const dispatch = createEventDispatcher();
const resampleConfig = getContext("resampling") || null; const resampleConfig = getContext("resampling") || null;
const resampleDefault = resampleConfig ? Math.max(...resampleConfig.resolutions) : 0; const resampleDefault = resampleConfig ? Math.max(...resampleConfig.resolutions) : 0;
@ -39,6 +41,8 @@
let zoomStates = {}; let zoomStates = {};
let thresholdStates = {}; let thresholdStates = {};
$: isSelected = previousSelect || null;
const cluster = getContext("clusters").find((c) => c.name == job.cluster); const cluster = getContext("clusters").find((c) => c.name == job.cluster);
const client = getContextClient(); const client = getContextClient();
const query = gql` const query = gql`
@ -112,6 +116,12 @@
refreshMetrics(); refreshMetrics();
} }
$: if (isSelected == true && previousSelect == false) {
dispatch("select-job", job.id)
} else if (isSelected == false && previousSelect == true) {
dispatch("unselect-job", job.id)
}
// Helper // Helper
const selectScope = (jobMetrics) => const selectScope = (jobMetrics) =>
jobMetrics.reduce( jobMetrics.reduce(
@ -152,7 +162,7 @@
<tr> <tr>
<td> <td>
<JobInfo {job} /> <JobInfo {job} bind:isSelected showSelect/>
</td> </td>
{#if job.monitoringStatus == 0 || job.monitoringStatus == 2} {#if job.monitoringStatus == 0 || job.monitoringStatus == 2}
<td colspan={metrics.length}> <td colspan={metrics.length}>

View File

@ -0,0 +1,314 @@
<!--
@component Main plot component, based on uPlot; metricdata values by time
Only width/height should change reactively.
Properties:
- `metric String`: The metric name
- `width Number?`: The plot width [Default: 0]
- `height Number?`: The plot height [Default: 300]
- `data [Array]`: The metric data object
- `cluster String`: Cluster name of the parent job / data
- `subCluster String`: Name of the subCluster of the parent job
-->
<script>
import uPlot from "uplot";
import { roundTwoDigits, formatTime, formatNumber } from "../units.js";
import { getContext, onMount, onDestroy } from "svelte";
import { Card } from "@sveltestrap/sveltestrap";
export let metric = "";
export let width = 0;
export let height = 300;
export let data = null;
export let xlabel = "";
export let xticks = [];
export let xinfo = [];
export let ylabel = "";
export let yunit = "";
export let title = "";
export let forResources = false;
export let plotSync;
// NOTE: Metric Thresholds non-required, Cluster Mixing Allowed
const clusterCockpitConfig = getContext("cc-config");
const lineWidth = clusterCockpitConfig.plot_general_lineWidth / window.devicePixelRatio;
const cbmode = clusterCockpitConfig?.plot_general_colorblindMode || false;
// UPLOT PLUGIN // converts the legend into a simple tooltip
function legendAsTooltipPlugin({
className,
style = { backgroundColor: "rgba(255, 249, 196, 0.92)", color: "black" },
} = {}) {
let legendEl;
function init(u, opts) {
legendEl = u.root.querySelector(".u-legend");
legendEl.classList.remove("u-inline");
className && legendEl.classList.add(className);
uPlot.assign(legendEl.style, {
minWidth: "100px",
textAlign: "left",
pointerEvents: "none",
display: "none",
position: "absolute",
left: 0,
top: 0,
zIndex: 100,
boxShadow: "2px 2px 10px rgba(0,0,0,0.5)",
...style,
});
// hide series color markers:
const idents = legendEl.querySelectorAll(".u-marker");
for (let i = 0; i < idents.length; i++)
idents[i].style.display = "none";
const overEl = u.over;
overEl.style.overflow = "visible";
// move legend into plot bounds
overEl.appendChild(legendEl);
// show/hide tooltip on enter/exit
overEl.addEventListener("mouseenter", () => {
legendEl.style.display = null;
});
overEl.addEventListener("mouseleave", () => {
legendEl.style.display = "none";
});
}
function update(u) {
const { left, top } = u.cursor;
const width = u?.over?.querySelector(".u-legend")?.offsetWidth ? u.over.querySelector(".u-legend").offsetWidth : 0;
legendEl.style.transform =
"translate(" + (left - width - 15) + "px, " + (top + 15) + "px)";
}
return {
hooks: {
init: init,
setCursor: update,
},
};
}
const plotSeries = [
{
label: "JobID",
scale: "x",
value: (u, ts, sidx, didx) => {
return `${xticks[didx]} | ${xinfo[didx]}`;
},
},
{
label: "Starttime",
scale: "xst",
value: (u, ts, sidx, didx) => {
return new Date(ts * 1000).toLocaleString();
},
},
{
label: "Duration",
scale: "xrt",
value: (u, ts, sidx, didx) => {
return formatTime(ts);
},
},
]
if (forResources) {
const resSeries = [
{
label: "Nodes",
scale: "y",
width: lineWidth,
stroke: "black",
},
{
label: "Threads",
scale: "y",
width: lineWidth,
stroke: "rgb(0,0,255)",
},
{
label: "Accelerators",
scale: "y",
width: lineWidth,
stroke: cbmode ? "rgb(0,255,0)" : "red",
}
];
plotSeries.push(...resSeries)
} else {
const statsSeries = [
{
label: "Min",
scale: "y",
width: lineWidth,
stroke: cbmode ? "rgb(0,255,0)" : "red",
value: (u, ts, sidx, didx) => {
return `${roundTwoDigits(ts)} ${yunit}`;
},
},
{
label: "Avg",
scale: "y",
width: lineWidth,
stroke: "black",
value: (u, ts, sidx, didx) => {
return `${roundTwoDigits(ts)} ${yunit}`;
},
},
{
label: "Max",
scale: "y",
width: lineWidth,
stroke: cbmode ? "rgb(0,0,255)" : "green",
value: (u, ts, sidx, didx) => {
return `${roundTwoDigits(ts)} ${yunit}`;
},
}
];
plotSeries.push(...statsSeries)
};
const plotBands = [
{ series: [5, 4], fill: cbmode ? "rgba(0,0,255,0.1)" : "rgba(0,255,0,0.1)" },
{ series: [4, 3], fill: cbmode ? "rgba(0,255,0,0.1)" : "rgba(255,0,0,0.1)" },
];
const opts = {
width,
height,
title,
plugins: [legendAsTooltipPlugin()],
series: plotSeries,
axes: [
{
scale: "x",
space: 25, // Tick Spacing
rotate: 30,
show: true,
label: xlabel,
values(self, splits) {
return splits.map(s => xticks[s]);
}
},
{
scale: "xst",
show: false,
},
{
scale: "xrt",
show: false,
},
{
scale: "y",
grid: { show: true },
labelFont: "sans-serif",
label: ylabel + (yunit ? ` (${yunit})` : ''),
values: (u, vals) => vals.map((v) => formatNumber(v)),
},
],
bands: forResources ? [] : plotBands,
padding: [5, 10, 0, 0],
hooks: {
draw: [
(u) => {
// Draw plot type label:
let textl = forResources ? "Job Resources by Type" : "Metric Min/Avg/Max for Job Duration";
let textr = "Earlier <- StartTime -> Later";
u.ctx.save();
u.ctx.textAlign = "start";
u.ctx.fillStyle = "black";
u.ctx.fillText(textl, u.bbox.left + 10, u.bbox.top + 10);
u.ctx.textAlign = "end";
u.ctx.fillStyle = "black";
u.ctx.fillText(
textr,
u.bbox.left + u.bbox.width - 10,
u.bbox.top + 10,
);
u.ctx.restore();
return;
},
]
},
scales: {
x: { time: false },
xst: { time: false },
xrt: { time: false },
y: {auto: true, distr: forResources ? 3 : 1},
},
legend: {
// Display legend
show: true,
live: true,
},
cursor: {
drag: { x: true, y: true },
sync: {
key: plotSync.key,
scales: ["x", null],
}
}
};
// RENDER HANDLING
let plotWrapper = null;
let uplot = null;
let timeoutId = null;
function render(ren_width, ren_height) {
if (!uplot) {
opts.width = ren_width;
opts.height = ren_height;
uplot = new uPlot(opts, data, plotWrapper); // Data is uplot formatted [[X][Ymin][Yavg][Ymax]]
plotSync.sub(uplot)
} else {
uplot.setSize({ width: ren_width, height: ren_height });
}
}
function onSizeChange(chg_width, chg_height) {
if (!uplot) return;
if (timeoutId != null) clearTimeout(timeoutId);
timeoutId = setTimeout(() => {
timeoutId = null;
render(chg_width, chg_height);
}, 200);
}
onMount(() => {
if (plotWrapper) {
render(width, height);
}
});
onDestroy(() => {
if (timeoutId != null) clearTimeout(timeoutId);
if (uplot) uplot.destroy();
});
// This updates plot on all size changes if wrapper (== data) exists
$: if (plotWrapper) {
onSizeChange(width, height);
}
</script>
<!-- Define $width Wrapper and NoData Card -->
{#if data && data[0].length > 0}
<div bind:this={plotWrapper} bind:clientWidth={width}
style="background-color: rgba(255, 255, 255, 1.0);" class="rounded"
/>
{:else}
<Card body color="warning" class="mx-4 my-2"
>Cannot render plot: No series data returned for <code>{metric?metric:'job resources'}</code></Card
>
{/if}

View File

@ -16,7 +16,7 @@
<script> <script>
import uPlot from "uplot"; import uPlot from "uplot";
import { onMount, onDestroy } from "svelte"; import { onMount, onDestroy } from "svelte";
import { formatNumber } from "../units.js"; import { formatNumber, formatTime } from "../units.js";
import { Card } from "@sveltestrap/sveltestrap"; import { Card } from "@sveltestrap/sveltestrap";
export let data; export let data;
@ -36,21 +36,6 @@
points: 2, points: 2,
}; };
function formatTime(t) {
if (t !== null) {
if (isNaN(t)) {
return t;
} else {
const tAbs = Math.abs(t);
const h = Math.floor(tAbs / 3600);
const m = Math.floor((tAbs % 3600) / 60);
if (h == 0) return `${m}m`;
else if (m == 0) return `${h}h`;
else return `${h}:${m}h`;
}
}
}
function paths(u, seriesIdx, idx0, idx1, extendGap, buildClip) { function paths(u, seriesIdx, idx0, idx1, extendGap, buildClip) {
let s = u.series[seriesIdx]; let s = u.series[seriesIdx];
let style = s.drawStyle; let style = s.drawStyle;

View File

@ -21,22 +21,6 @@
--> -->
<script context="module"> <script context="module">
function formatTime(t, forNode = false) {
if (t !== null) {
if (isNaN(t)) {
return t;
} else {
const tAbs = Math.abs(t);
const h = Math.floor(tAbs / 3600);
const m = Math.floor((tAbs % 3600) / 60);
// Re-Add "negativity" to time ticks only as string, so that if-cases work as intended
if (h == 0) return `${forNode && m != 0 ? "-" : ""}${m}m`;
else if (m == 0) return `${forNode ? "-" : ""}${h}h`;
else return `${forNode ? "-" : ""}${h}:${m}h`;
}
}
}
function timeIncrs(timestep, maxX, forNode) { function timeIncrs(timestep, maxX, forNode) {
if (forNode === true) { if (forNode === true) {
return [60, 120, 240, 300, 360, 480, 600, 900, 1800, 3600, 7200, 14400, 21600]; // forNode fixed increments return [60, 120, 240, 300, 360, 480, 600, 900, 1800, 3600, 7200, 14400, 21600]; // forNode fixed increments
@ -118,7 +102,7 @@
<script> <script>
import uPlot from "uplot"; import uPlot from "uplot";
import { formatNumber } from "../units.js"; import { formatNumber, formatTime } from "../units.js";
import { getContext, onMount, onDestroy, createEventDispatcher } from "svelte"; import { getContext, onMount, onDestroy, createEventDispatcher } from "svelte";
import { Card } from "@sveltestrap/sveltestrap"; import { Card } from "@sveltestrap/sveltestrap";

View File

@ -58,7 +58,7 @@
const getValues = (type) => labels.map(name => { const getValues = (type) => labels.map(name => {
// Peak is adapted and scaled for job shared state // Peak is adapted and scaled for job shared state
const peak = polarMetrics.find(m => m?.name == name)?.peak const peak = polarMetrics.find(m => m?.name == name)?.peak
const metric = polarData.find(m => m?.name == name)?.stats const metric = polarData.find(m => m?.name == name)?.data
const value = (peak && metric) ? (metric[type] / peak) : 0 const value = (peak && metric) ? (metric[type] / peak) : 0
return value <= 1. ? value : 1. return value <= 1. ? value : 1.
}) })

View File

@ -28,6 +28,7 @@
export let configName; export let configName;
export let allMetrics = null; export let allMetrics = null;
export let cluster = null; export let cluster = null;
export let subCluster = null;
export let showFootprint = false; export let showFootprint = false;
export let footprintSelect = false; export let footprintSelect = false;
@ -44,25 +45,29 @@
for (let metric of globalMetrics) allMetrics.add(metric.name); for (let metric of globalMetrics) allMetrics.add(metric.name);
}); });
$: if (newMetricsOrder.length === 0) { $: {
if (allMetrics != null) { if (allMetrics != null) {
if (cluster == null) { if (!cluster) {
for (let metric of globalMetrics) allMetrics.add(metric.name); for (let metric of globalMetrics) allMetrics.add(metric.name);
} else { } else {
allMetrics.clear(); allMetrics.clear();
for (let gm of globalMetrics) { for (let gm of globalMetrics) {
if (!subCluster) {
if (gm.availability.find((av) => av.cluster === cluster)) allMetrics.add(gm.name); if (gm.availability.find((av) => av.cluster === cluster)) allMetrics.add(gm.name);
} else {
if (gm.availability.find((av) => av.cluster === cluster && av.subClusters.includes(subCluster))) allMetrics.add(gm.name);
} }
} }
newMetricsOrder = [...allMetrics].filter((m) => !metrics.includes(m));
newMetricsOrder.unshift(...metrics.filter((m) => allMetrics.has(m)));
unorderedMetrics = unorderedMetrics.filter((m) => allMetrics.has(m));
} }
newMetricsOrder = [...allMetrics].filter((m) => !metrics.includes(m));
newMetricsOrder.unshift(...metrics.filter((m) => allMetrics.has(m)));
unorderedMetrics = unorderedMetrics.filter((m) => allMetrics.has(m));
} }
}
function printAvailability(metric, cluster) { function printAvailability(metric, cluster) {
const avail = globalMetrics.find((gm) => gm.name === metric)?.availability const avail = globalMetrics.find((gm) => gm.name === metric)?.availability
if (cluster == null) { if (!cluster) {
return avail.map((av) => av.cluster).join(',') return avail.map((av) => av.cluster).join(',')
} else { } else {
return avail.find((av) => av.cluster === cluster).subClusters.join(',') return avail.find((av) => av.cluster === cluster).subClusters.join(',')
@ -110,10 +115,17 @@
metrics = newMetricsOrder.filter((m) => unorderedMetrics.includes(m)); metrics = newMetricsOrder.filter((m) => unorderedMetrics.includes(m));
isOpen = false; isOpen = false;
showFootprint = !!pendingShowFootprint; let configKey;
if (cluster && subCluster) {
configKey = `${configName}:${cluster}:${subCluster}`;
} else if (cluster && !subCluster) {
configKey = `${configName}:${cluster}`;
} else {
configKey = `${configName}`;
}
updateConfigurationMutation({ updateConfigurationMutation({
name: cluster == null ? configName : `${configName}:${cluster}`, name: configKey,
value: JSON.stringify(metrics), value: JSON.stringify(metrics),
}).subscribe((res) => { }).subscribe((res) => {
if (res.fetching === false && res.error) { if (res.fetching === false && res.error) {
@ -121,17 +133,20 @@
} }
}); });
updateConfigurationMutation({ if (footprintSelect) {
name: showFootprint = !!pendingShowFootprint;
cluster == null updateConfigurationMutation({
? "plot_list_showFootprint" name:
: `plot_list_showFootprint:${cluster}`, !cluster
value: JSON.stringify(showFootprint), ? "plot_list_showFootprint"
}).subscribe((res) => { : `plot_list_showFootprint:${cluster}`,
if (res.fetching === false && res.error) { value: JSON.stringify(showFootprint),
throw res.error; }).subscribe((res) => {
} if (res.fetching === false && res.error) {
}); throw res.error;
}
});
};
dispatch('update-metrics', metrics); dispatch('update-metrics', metrics);
} }

View File

@ -17,6 +17,10 @@ export function formatNumber(x) {
} }
} }
export function roundTwoDigits(x) {
return Math.round(x * 100) / 100
}
export function scaleNumbers(x, y , p = '') { export function scaleNumbers(x, y , p = '') {
const oldPower = power[prefix.indexOf(p)] const oldPower = power[prefix.indexOf(p)]
const rawXValue = x * oldPower const rawXValue = x * oldPower
@ -31,4 +35,20 @@ export function scaleNumbers(x, y , p = '') {
return Math.abs(rawYValue) >= 1000 ? `${rawXValue.toExponential()} / ${rawYValue.toExponential()}` : `${rawYValue.toString()} / ${rawYValue.toString()}` return Math.abs(rawYValue) >= 1000 ? `${rawXValue.toExponential()} / ${rawYValue.toExponential()}` : `${rawYValue.toString()} / ${rawYValue.toString()}`
} }
export function formatTime(t, forNode = false) {
if (t !== null) {
if (isNaN(t)) {
return t;
} else {
const tAbs = Math.abs(t);
const h = Math.floor(tAbs / 3600);
const m = Math.floor((tAbs % 3600) / 60);
// Re-Add "negativity" to time ticks only as string, so that if-cases work as intended
if (h == 0) return `${forNode && m != 0 ? "-" : ""}${m}m`;
else if (m == 0) return `${forNode ? "-" : ""}${h}h`;
else return `${forNode ? "-" : ""}${h}:${m}h`;
}
}
}
// export const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000); // export const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000);

View File

@ -461,11 +461,11 @@ export function convert2uplot(canvasData, secondsToMinutes = false, secondsToHou
} else { // Default -> Fill Histodata with zero values on unused value placing -> maybe allows zoom trigger as known } else { // Default -> Fill Histodata with zero values on unused value placing -> maybe allows zoom trigger as known
if (secondsToHours) { if (secondsToHours) {
let hours = cd.value / 3600 let hours = cd.value / 3600
console.log("x seconds to y hours", cd.value, hours) // console.log("x seconds to y hours", cd.value, hours)
uplotData[0].push(hours) uplotData[0].push(hours)
} else if (secondsToMinutes) { } else if (secondsToMinutes) {
let minutes = cd.value / 60 let minutes = cd.value / 60
console.log("x seconds to y minutes", cd.value, minutes) // console.log("x seconds to y minutes", cd.value, minutes)
uplotData[0].push(minutes) uplotData[0].push(minutes)
} else { } else {
uplotData[0].push(cd.value) uplotData[0].push(cd.value)

View File

@ -14,7 +14,6 @@
<script> <script>
import { import {
getContext, getContext,
createEventDispatcher
} from "svelte"; } from "svelte";
import { import {
queryStore, queryStore,
@ -56,7 +55,6 @@
let pendingZoomState = null; let pendingZoomState = null;
let thresholdState = null; let thresholdState = null;
const dispatch = createEventDispatcher();
const statsPattern = /(.*)-stat$/; const statsPattern = /(.*)-stat$/;
const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : ""); const unit = (metricUnit?.prefix ? metricUnit.prefix : "") + (metricUnit?.base ? metricUnit.base : "");
const client = getContextClient(); const client = getContextClient();
@ -150,11 +148,6 @@
// On additional scope request // On additional scope request
if (selectedScope == "load-all") { if (selectedScope == "load-all") {
// Push scope to statsTable (Needs to be in this case, else newly selected 'Metric.svelte' renders cause statsTable race condition)
const statsTableData = $metricData.data.singleUpdate.filter((x) => x.scope !== "node")
if (statsTableData.length > 0) {
dispatch("more-loaded", statsTableData);
}
// Set selected scope to min of returned scopes // Set selected scope to min of returned scopes
selectedScope = minScope(scopes) selectedScope = minScope(scopes)
nodeOnly = (selectedScope == "node") // "node" still only scope after load-all nodeOnly = (selectedScope == "node") // "node" still only scope after load-all

View File

@ -0,0 +1,145 @@
<!--
@component Job-View subcomponent; Wraps the statsTable in a TabPane and contains GQL query for scoped statsData
Properties:
- `job Object`: The job object
- `clusters Object`: The clusters object
- `tabActive bool`: Boolean if StatsTabe Tab is Active on Creation
-->
<script>
import {
queryStore,
gql,
getContextClient
} from "@urql/svelte";
import { getContext } from "svelte";
import {
Card,
Button,
Row,
Col,
TabPane,
Spinner,
Icon
} from "@sveltestrap/sveltestrap";
import MetricSelection from "../generic/select/MetricSelection.svelte";
import StatsTable from "./statstab/StatsTable.svelte";
export let job;
export let clusters;
export let tabActive;
let loadScopes = false;
let selectedScopes = [];
let selectedMetrics = [];
let availableMetrics = new Set(); // For Info Only, filled by MetricSelection Component
let isMetricSelectionOpen = false;
const client = getContextClient();
const query = gql`
query ($dbid: ID!, $selectedMetrics: [String!]!, $selectedScopes: [MetricScope!]!) {
scopedJobStats(id: $dbid, metrics: $selectedMetrics, scopes: $selectedScopes) {
name
scope
stats {
hostname
id
data {
min
avg
max
}
}
}
}
`;
$: scopedStats = queryStore({
client: client,
query: query,
variables: { dbid: job.id, selectedMetrics, selectedScopes },
});
$: if (loadScopes) {
selectedScopes = ["node", "socket", "core", "hwthread", "accelerator"];
}
// Handle Job Query on Init -> is not executed anymore
getContext("on-init")(() => {
if (!job) return;
const pendingMetrics = (
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}:${job.subCluster}`] ||
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`]
) || getContext("cc-config")["job_view_nodestats_selectedMetrics"];
// Select default Scopes to load: Check before if any metric has accelerator scope by default
const accScopeDefault = [...pendingMetrics].some(function (m) {
const cluster = clusters.find((c) => c.name == job.cluster);
const subCluster = cluster.subClusters.find((sc) => sc.name == job.subCluster);
return subCluster.metricConfig.find((smc) => smc.name == m)?.scope === "accelerator";
});
const pendingScopes = ["node"]
if (job.numNodes === 1) {
pendingScopes.push("socket")
pendingScopes.push("core")
pendingScopes.push("hwthread")
if (accScopeDefault) { pendingScopes.push("accelerator") }
}
selectedMetrics = [...pendingMetrics];
selectedScopes = [...pendingScopes];
});
</script>
<TabPane tabId="stats" tab="Statistics Table" class="overflow-x-auto" active={tabActive}>
<Row>
<Col class="m-2">
<Button outline on:click={() => (isMetricSelectionOpen = true)} class="px-2" color="primary" style="margin-right:0.5rem">
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
</Button>
{#if job.numNodes > 1}
<Button class="px-2 ml-auto" color="success" outline on:click={() => (loadScopes = !loadScopes)} disabled={loadScopes}>
{#if !loadScopes}
<Icon name="plus-square-fill" style="margin-right:0.25rem"/> Add More Scopes
{:else}
<Icon name="check-square-fill" style="margin-right:0.25rem"/> OK: Scopes Added
{/if}
</Button>
{/if}
</Col>
</Row>
<hr class="mb-1 mt-1"/>
<!-- ROW1: Status-->
{#if $scopedStats.fetching}
<Row>
<Col class="m-3" style="text-align: center;">
<Spinner secondary/>
</Col>
</Row>
{:else if $scopedStats.error}
<Row>
<Col class="m-2">
<Card body color="danger">{$scopedStats.error.message}</Card>
</Col>
</Row>
{:else}
<StatsTable
hosts={job.resources.map((r) => r.hostname).sort()}
data={$scopedStats?.data?.scopedJobStats}
{selectedMetrics}
/>
{/if}
</TabPane>
<MetricSelection
cluster={job.cluster}
subCluster={job.subCluster}
configName="job_view_nodestats_selectedMetrics"
bind:allMetrics={availableMetrics}
bind:metrics={selectedMetrics}
bind:isOpen={isMetricSelectionOpen}
/>

View File

@ -1,176 +0,0 @@
<!--
@component Job-View subcomponent; display table of metric data statistics with selectable scopes
Properties:
- `job Object`: The job object
- `jobMetrics [Object]`: The jobs metricdata
Exported:
- `moreLoaded`: Adds additional scopes requested from Metric.svelte in Job-View
-->
<script>
import { getContext } from "svelte";
import {
Button,
Table,
Input,
InputGroup,
InputGroupText,
Icon,
Row,
Col
} from "@sveltestrap/sveltestrap";
import { maxScope } from "../generic/utils.js";
import StatsTableEntry from "./StatsTableEntry.svelte";
import MetricSelection from "../generic/select/MetricSelection.svelte";
export let job;
export let jobMetrics;
const sortedJobMetrics = [...new Set(jobMetrics.map((m) => m.name))].sort()
const scopesForMetric = (metric) =>
jobMetrics.filter((jm) => jm.name == metric).map((jm) => jm.scope);
let hosts = job.resources.map((r) => r.hostname).sort(),
selectedScopes = {},
sorting = {},
isMetricSelectionOpen = false,
availableMetrics = new Set(),
selectedMetrics =
getContext("cc-config")[`job_view_nodestats_selectedMetrics:${job.cluster}`] ||
getContext("cc-config")["job_view_nodestats_selectedMetrics"];
for (let metric of sortedJobMetrics) {
// Not Exclusive or Multi-Node: get maxScope directly (mostly: node)
// -> Else: Load smallest available granularity as default as per availability
const availableScopes = scopesForMetric(metric);
if (job.exclusive != 1 || job.numNodes == 1) {
if (availableScopes.includes("accelerator")) {
selectedScopes[metric] = "accelerator";
} else if (availableScopes.includes("core")) {
selectedScopes[metric] = "core";
} else if (availableScopes.includes("socket")) {
selectedScopes[metric] = "socket";
} else {
selectedScopes[metric] = "node";
}
} else {
selectedScopes[metric] = maxScope(availableScopes);
}
sorting[metric] = {
min: { dir: "up", active: false },
avg: { dir: "up", active: false },
max: { dir: "up", active: false },
};
}
function sortBy(metric, stat) {
let s = sorting[metric][stat];
if (s.active) {
s.dir = s.dir == "up" ? "down" : "up";
} else {
for (let metric in sorting)
for (let stat in sorting[metric]) sorting[metric][stat].active = false;
s.active = true;
}
let series = jobMetrics.find(
(jm) => jm.name == metric && jm.scope == "node",
)?.metric.series;
sorting = { ...sorting };
hosts = hosts.sort((h1, h2) => {
let s1 = series.find((s) => s.hostname == h1)?.statistics;
let s2 = series.find((s) => s.hostname == h2)?.statistics;
if (s1 == null || s2 == null) return -1;
return s.dir != "up" ? s1[stat] - s2[stat] : s2[stat] - s1[stat];
});
}
export function moreLoaded(moreJobMetrics) {
moreJobMetrics.forEach(function (newMetric) {
if (!jobMetrics.some((m) => m.scope == newMetric.scope)) {
jobMetrics = [...jobMetrics, newMetric]
}
});
};
</script>
<Row>
<Col class="m-2">
<Button outline on:click={() => (isMetricSelectionOpen = true)} class="w-auto px-2" color="primary">
Select Metrics (Selected {selectedMetrics.length} of {availableMetrics.size} available)
</Button>
</Col>
</Row>
<hr class="mb-1 mt-1"/>
<Table class="mb-0">
<thead>
<!-- Header Row 1: Selectors -->
<tr>
<th/>
{#each selectedMetrics as metric}
<!-- To Match Row-2 Header Field Count-->
<th colspan={selectedScopes[metric] == "node" ? 3 : 4}>
<InputGroup>
<InputGroupText>
{metric}
</InputGroupText>
<Input type="select" bind:value={selectedScopes[metric]}>
{#each scopesForMetric(metric, jobMetrics) as scope}
<option value={scope}>{scope}</option>
{/each}
</Input>
</InputGroup>
</th>
{/each}
</tr>
<!-- Header Row 2: Fields -->
<tr>
<th>Node</th>
{#each selectedMetrics as metric}
{#if selectedScopes[metric] != "node"}
<th>Id</th>
{/if}
{#each ["min", "avg", "max"] as stat}
<th on:click={() => sortBy(metric, stat)}>
{stat}
{#if selectedScopes[metric] == "node"}
<Icon
name="caret-{sorting[metric][stat].dir}{sorting[metric][stat]
.active
? '-fill'
: ''}"
/>
{/if}
</th>
{/each}
{/each}
</tr>
</thead>
<tbody>
{#each hosts as host (host)}
<tr>
<th scope="col">{host}</th>
{#each selectedMetrics as metric (metric)}
<StatsTableEntry
{host}
{metric}
scope={selectedScopes[metric]}
{jobMetrics}
/>
{/each}
</tr>
{/each}
</tbody>
</Table>
<MetricSelection
cluster={job.cluster}
configName="job_view_nodestats_selectedMetrics"
bind:allMetrics={availableMetrics}
bind:metrics={selectedMetrics}
bind:isOpen={isMetricSelectionOpen}
/>

View File

@ -40,14 +40,14 @@
const client = getContextClient(); const client = getContextClient();
const polarQuery = gql` const polarQuery = gql`
query ($dbid: ID!, $selectedMetrics: [String!]!) { query ($dbid: ID!, $selectedMetrics: [String!]!) {
jobMetricStats(id: $dbid, metrics: $selectedMetrics) { jobStats(id: $dbid, metrics: $selectedMetrics) {
name name
stats { data {
min min
avg avg
max max
}
} }
}
} }
`; `;
@ -66,7 +66,7 @@
{:else} {:else}
<Polar <Polar
{polarMetrics} {polarMetrics}
polarData={$polarData.data.jobMetricStats} polarData={$polarData.data.jobStats}
/> />
{/if} {/if}
</CardBody> </CardBody>

View File

@ -0,0 +1,139 @@
<!--:
@component Job-View subcomponent; display table of metric data statistics with selectable scopes
Properties:
- `data Object`: The data object
- `selectedMetrics [String]`: The selected metrics
- `hosts [String]`: The list of hostnames of this job
-->
<script>
import {
Table,
Input,
InputGroup,
InputGroupText,
Icon,
} from "@sveltestrap/sveltestrap";
import StatsTableEntry from "./StatsTableEntry.svelte";
export let data = [];
export let selectedMetrics = [];
export let hosts = [];
let sorting = {};
let availableScopes = {};
let selectedScopes = {};
const scopesForMetric = (metric) =>
data?.filter((jm) => jm.name == metric)?.map((jm) => jm.scope) || [];
const setScopeForMetric = (metric, scope) =>
selectedScopes[metric] = scope
$: if (data && selectedMetrics) {
for (let metric of selectedMetrics) {
availableScopes[metric] = scopesForMetric(metric);
// Set Initial Selection, but do not use selectedScopes: Skips reactivity
if (availableScopes[metric].includes("accelerator")) {
setScopeForMetric(metric, "accelerator");
} else if (availableScopes[metric].includes("core")) {
setScopeForMetric(metric, "core");
} else if (availableScopes[metric].includes("socket")) {
setScopeForMetric(metric, "socket");
} else {
setScopeForMetric(metric, "node");
}
sorting[metric] = {
min: { dir: "up", active: false },
avg: { dir: "up", active: false },
max: { dir: "up", active: false },
};
}
}
function sortBy(metric, stat) {
let s = sorting[metric][stat];
if (s.active) {
s.dir = s.dir == "up" ? "down" : "up";
} else {
for (let metric in sorting)
for (let stat in sorting[metric]) sorting[metric][stat].active = false;
s.active = true;
}
let stats = data.find(
(d) => d.name == metric && d.scope == "node",
)?.stats || [];
sorting = { ...sorting };
hosts = hosts.sort((h1, h2) => {
let s1 = stats.find((s) => s.hostname == h1)?.data;
let s2 = stats.find((s) => s.hostname == h2)?.data;
if (s1 == null || s2 == null) return -1;
return s.dir != "up" ? s1[stat] - s2[stat] : s2[stat] - s1[stat];
});
}
</script>
<Table class="mb-0">
<thead>
<!-- Header Row 1: Selectors -->
<tr>
<th/>
{#each selectedMetrics as metric}
<!-- To Match Row-2 Header Field Count-->
<th colspan={selectedScopes[metric] == "node" ? 3 : 4}>
<InputGroup>
<InputGroupText>
{metric}
</InputGroupText>
<Input type="select" bind:value={selectedScopes[metric]} disabled={availableScopes[metric].length === 1}>
{#each (availableScopes[metric] || []) as scope}
<option value={scope}>{scope}</option>
{/each}
</Input>
</InputGroup>
</th>
{/each}
</tr>
<!-- Header Row 2: Fields -->
<tr>
<th>Node</th>
{#each selectedMetrics as metric}
{#if selectedScopes[metric] != "node"}
<th>Id</th>
{/if}
{#each ["min", "avg", "max"] as stat}
<th on:click={() => sortBy(metric, stat)}>
{stat}
{#if selectedScopes[metric] == "node"}
<Icon
name="caret-{sorting[metric][stat].dir}{sorting[metric][stat]
.active
? '-fill'
: ''}"
/>
{/if}
</th>
{/each}
{/each}
</tr>
</thead>
<tbody>
{#each hosts as host (host)}
<tr>
<th scope="col">{host}</th>
{#each selectedMetrics as metric (metric)}
<StatsTableEntry
{data}
{host}
{metric}
scope={selectedScopes[metric]}
/>
{/each}
</tr>
{/each}
</tbody>
</Table>

View File

@ -1,11 +1,11 @@
<!-- <!--
@component Job-View subcomponent; Single Statistics entry component fpr statstable @component Job-View subcomponent; Single Statistics entry component for statstable
Properties: Properties:
- `host String`: The hostname (== node) - `host String`: The hostname (== node)
- `metric String`: The metric name - `metric String`: The metric name
- `scope String`: The selected scope - `scope String`: The selected scope
- `jobMetrics [Object]`: The jobs metricdata - `data [Object]`: The jobs statsdata
--> -->
<script> <script>
@ -14,59 +14,61 @@
export let host; export let host;
export let metric; export let metric;
export let scope; export let scope;
export let jobMetrics; export let data;
function compareNumbers(a, b) { let entrySorting = {
return a.id - b.id;
}
function sortByField(field) {
let s = sorting[field];
if (s.active) {
s.dir = s.dir == "up" ? "down" : "up";
} else {
for (let field in sorting) sorting[field].active = false;
s.active = true;
}
sorting = { ...sorting };
series = series.sort((a, b) => {
if (a == null || b == null) return -1;
if (field === "id") {
return s.dir != "up" ? a[field] - b[field] : b[field] - a[field];
} else {
return s.dir != "up"
? a.statistics[field] - b.statistics[field]
: b.statistics[field] - a.statistics[field];
}
});
}
let sorting = {
id: { dir: "down", active: true }, id: { dir: "down", active: true },
min: { dir: "up", active: false }, min: { dir: "up", active: false },
avg: { dir: "up", active: false }, avg: { dir: "up", active: false },
max: { dir: "up", active: false }, max: { dir: "up", active: false },
}; };
$: series = jobMetrics function compareNumbers(a, b) {
.find((jm) => jm.name == metric && jm.scope == scope) return a.id - b.id;
?.metric.series.filter((s) => s.hostname == host && s.statistics != null) }
?.sort(compareNumbers);
function sortByField(field) {
let s = entrySorting[field];
if (s.active) {
s.dir = s.dir == "up" ? "down" : "up";
} else {
for (let field in entrySorting) entrySorting[field].active = false;
s.active = true;
}
entrySorting = { ...entrySorting };
stats = stats.sort((a, b) => {
if (a == null || b == null) return -1;
if (field === "id") {
return s.dir != "up" ?
a[field].localeCompare(b[field], undefined, {numeric: true, sensitivity: 'base'}) :
b[field].localeCompare(a[field], undefined, {numeric: true, sensitivity: 'base'})
} else {
return s.dir != "up"
? a.data[field] - b.data[field]
: b.data[field] - a.data[field];
}
});
}
$: stats = data
?.find((d) => d.name == metric && d.scope == scope)
?.stats.filter((s) => s.hostname == host && s.data != null)
?.sort(compareNumbers) || [];
</script> </script>
{#if series == null || series.length == 0} {#if stats == null || stats.length == 0}
<td colspan={scope == "node" ? 3 : 4}><i>No data</i></td> <td colspan={scope == "node" ? 3 : 4}><i>No data</i></td>
{:else if series.length == 1 && scope == "node"} {:else if stats.length == 1 && scope == "node"}
<td> <td>
{series[0].statistics.min} {stats[0].data.min}
</td> </td>
<td> <td>
{series[0].statistics.avg} {stats[0].data.avg}
</td> </td>
<td> <td>
{series[0].statistics.max} {stats[0].data.max}
</td> </td>
{:else} {:else}
<td colspan="4"> <td colspan="4">
@ -77,19 +79,19 @@
<th on:click={() => sortByField(field)}> <th on:click={() => sortByField(field)}>
Sort Sort
<Icon <Icon
name="caret-{sorting[field].dir}{sorting[field].active name="caret-{entrySorting[field].dir}{entrySorting[field].active
? '-fill' ? '-fill'
: ''}" : ''}"
/> />
</th> </th>
{/each} {/each}
</tr> </tr>
{#each series as s, i} {#each stats as s, i}
<tr> <tr>
<th>{s.id ?? i}</th> <th>{s.id ?? i}</th>
<td>{s.statistics.min}</td> <td>{s.data.min}</td>
<td>{s.statistics.avg}</td> <td>{s.data.avg}</td>
<td>{s.statistics.max}</td> <td>{s.data.max}</td>
</tr> </tr>
{/each} {/each}
</tbody> </tbody>

View File

@ -205,7 +205,7 @@
</Col> </Col>
</Row> </Row>
{:else} {:else}
{#each nodes as nodeData} {#each nodes as nodeData (nodeData.host)}
<NodeListRow {nodeData} {cluster} {selectedMetrics}/> <NodeListRow {nodeData} {cluster} {selectedMetrics}/>
{:else} {:else}
<tr> <tr>
@ -221,7 +221,7 @@
<p><b> <p><b>
Loading nodes {nodes.length + 1} to Loading nodes {nodes.length + 1} to
{ matchedNodes { matchedNodes
? `${((nodes.length + paging.itemsPerPage) > matchedNodes) ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total` ? `${(nodes.length + paging.itemsPerPage) > matchedNodes ? matchedNodes : (nodes.length + paging.itemsPerPage)} of ${matchedNodes} total`
: (nodes.length + paging.itemsPerPage) : (nodes.length + paging.itemsPerPage)
} }
</b></p> </b></p>

View File

@ -17,6 +17,9 @@
Input, Input,
InputGroup, InputGroup,
InputGroupText, } from "@sveltestrap/sveltestrap"; InputGroupText, } from "@sveltestrap/sveltestrap";
import {
scramble,
scrambleNames, } from "../../generic/utils.js";
export let cluster; export let cluster;
export let subCluster export let subCluster
@ -32,8 +35,8 @@
let userList; let userList;
let projectList; let projectList;
$: if (nodeJobsData) { $: if (nodeJobsData) {
userList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => j.user))).sort((a, b) => a.localeCompare(b)); userList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => scrambleNames ? scramble(j.user) : j.user))).sort((a, b) => a.localeCompare(b));
projectList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => j.project))).sort((a, b) => a.localeCompare(b)); projectList = Array.from(new Set(nodeJobsData.jobs.items.map((j) => scrambleNames ? scramble(j.project) : j.project))).sort((a, b) => a.localeCompare(b));
} }
</script> </script>
@ -102,6 +105,19 @@
Shared Shared
</Button> </Button>
</InputGroup> </InputGroup>
<!-- Fallback -->
{:else if nodeJobsData.jobs.count >= 1}
<InputGroup>
<InputGroupText>
<Icon name="circle-fill"/>
</InputGroupText>
<InputGroupText>
Status
</InputGroupText>
<Button color="success" disabled>
Allocated Jobs
</Button>
</InputGroup>
{:else} {:else}
<InputGroup> <InputGroup>
<InputGroupText> <InputGroupText>

View File

@ -14,7 +14,7 @@
getContextClient, getContextClient,
} from "@urql/svelte"; } from "@urql/svelte";
import { Card, CardBody, Spinner } from "@sveltestrap/sveltestrap"; import { Card, CardBody, Spinner } from "@sveltestrap/sveltestrap";
import { maxScope, checkMetricDisabled } from "../../generic/utils.js"; import { maxScope, checkMetricDisabled, scramble, scrambleNames } from "../../generic/utils.js";
import MetricPlot from "../../generic/plots/MetricPlot.svelte"; import MetricPlot from "../../generic/plots/MetricPlot.svelte";
import NodeInfo from "./NodeInfo.svelte"; import NodeInfo from "./NodeInfo.svelte";
@ -98,21 +98,24 @@
let extendedLegendData = null; let extendedLegendData = null;
$: if ($nodeJobsData?.data) { $: if ($nodeJobsData?.data) {
// Get Shared State of Node: Only Build extended Legend For Shared Nodes // Build Extended for allocated nodes [Commented: Only Build extended Legend For Shared Nodes]
if ($nodeJobsData.data.jobs.count >= 1 && !$nodeJobsData.data.jobs.items[0].exclusive) { if ($nodeJobsData.data.jobs.count >= 1) { // "&& !$nodeJobsData.data.jobs.items[0].exclusive)"
const accSet = Array.from(new Set($nodeJobsData.data.jobs.items const accSet = Array.from(new Set($nodeJobsData.data.jobs.items
.map((i) => i.resources .map((i) => i.resources
.filter((r) => r.hostname === nodeData.host) .filter((r) => (r.hostname === nodeData.host) && r?.accelerators)
.map((r) => r.accelerators) .map((r) => r?.accelerators)
) )
)).flat(2) )).flat(2)
extendedLegendData = {} extendedLegendData = {}
for (const accId of accSet) { for (const accId of accSet) {
const matchJob = $nodeJobsData.data.jobs.items.find((i) => i.resources.find((r) => r.accelerators.includes(accId))) const matchJob = $nodeJobsData.data.jobs.items.find((i) => i.resources.find((r) => r.accelerators.includes(accId)))
const matchUser = matchJob?.user ? matchJob.user : null
extendedLegendData[accId] = { extendedLegendData[accId] = {
user: matchJob?.user ? matchJob?.user : '-', user: (scrambleNames && matchUser)
job: matchJob?.jobId ? matchJob?.jobId : '-', ? scramble(matchUser)
: (matchUser ? matchUser : '-'),
job: matchJob?.jobId ? matchJob.jobId : '-',
} }
} }
// Theoretically extendable for hwthreadIDs // Theoretically extendable for hwthreadIDs

View File

@ -0,0 +1,15 @@
import { mount } from 'svelte';
import {} from './header.entrypoint.js'
import Tags from './Tags.root.svelte'
mount(Tags, {
target: document.getElementById('svelte-app'),
props: {
username: username,
isAdmin: isAdmin,
tagmap: tagmap,
},
context: new Map([
['cc-config', clusterCockpitConfig]
])
})

View File

@ -1,37 +1,15 @@
{{define "content"}} {{define "content"}}
<div class="container"> <div id="svelte-app"></div>
<div class="row justify-content-center"> {{end}}
<div class="col-10"> {{define "stylesheets"}}
{{ range $tagType, $tagList := .Infos.tagmap }} <link rel='stylesheet' href='/build/taglist.css'>
<div class="my-3 p-2 bg-secondary rounded text-white"> <!-- text-capitalize --> {{end}}
Tag Type: <b>{{ $tagType }}</b> {{define "javascript"}}
<span style="float: right; padding-bottom: 0.4rem; padding-top: 0.4rem;" class="badge bg-light text-secondary"> <script>
{{len $tagList}} Tag{{if ne (len $tagList) 1}}s{{end}} const username = {{ .User.Username }};
</span> const isAdmin = {{ .User.HasRole .Roles.admin }};
</div> const tagmap = {{ .Infos.tagmap }};
{{ range $tagList }} const clusterCockpitConfig = {{ .Config }};
{{if eq .scope "global"}} </script>
<a class="btn btn-outline-secondary" href="/monitoring/jobs/?tag={{ .id }}" role="button"> <script src='/build/taglist.js'></script>
{{ .name }}
<span class="badge bg-primary mr-1">{{ .count }} Job{{if ne .count 1}}s{{end}}</span>
<span style="background-color:#c85fc8;" class="badge text-dark">Global</span>
</a>
{{else if eq .scope "admin"}}
<a class="btn btn-outline-secondary" href="/monitoring/jobs/?tag={{ .id }}" role="button">
{{ .name }}
<span class="badge bg-primary mr-1">{{ .count }} Job{{if ne .count 1}}s{{end}}</span>
<span style="background-color:#19e5e6;" class="badge text-dark">Admin</span>
</a>
{{else}}
<a class="btn btn-outline-secondary" href="/monitoring/jobs/?tag={{ .id }}" role="button">
{{ .name }}
<span class="badge bg-primary mr-1">{{ .count }} Job{{if ne .count 1}}s{{end}}</span>
<span class="badge bg-warning text-dark">Private</span>
</a>
{{end}}
{{end}}
{{end}}
</div>
</div>
</div>
{{end}} {{end}}

View File

@ -26,8 +26,7 @@ var frontendFiles embed.FS
func ServeFiles() http.Handler { func ServeFiles() http.Handler {
publicFiles, err := fs.Sub(frontendFiles, "frontend/public") publicFiles, err := fs.Sub(frontendFiles, "frontend/public")
if err != nil { if err != nil {
log.Fatalf("WEB/WEB > cannot find frontend public files") log.Abortf("Serve Files: Could not find 'frontend/public' file directory.\nError: %s\n", err.Error())
panic(err)
} }
return http.FileServer(http.FS(publicFiles)) return http.FileServer(http.FS(publicFiles))
} }
@ -75,8 +74,7 @@ func init() {
templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path)) templates[strings.TrimPrefix(path, "templates/")] = template.Must(template.Must(base.Clone()).ParseFS(templateFiles, path))
return nil return nil
}); err != nil { }); err != nil {
log.Fatalf("WEB/WEB > cannot find frontend template files") log.Abortf("Web init(): Could not find frontend template files.\nError: %s\n", err.Error())
panic(err)
} }
_ = base _ = base