diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index db99fb2..6c2fc9b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ jobs: - name: Install Go uses: actions/setup-go@v4 with: - go-version: 1.19.x + go-version: 1.20.x - name: Checkout code uses: actions/checkout@v3 - name: Build, Vet & Test diff --git a/LICENSE b/LICENSE index 790f298..b18d1d9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2022 NHR@FAU, University Erlangen-Nuremberg +Copyright (c) NHR@FAU, University Erlangen-Nuremberg Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 77c3a83..c6e1f34 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ TARGET = ./cc-backend VAR = ./var CFG = config.json .env FRONTEND = ./web/frontend -VERSION = 1.2.2 +VERSION = 1.3.0 GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development') CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S") LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}' @@ -76,90 +76,3 @@ config.json: $(SVELTE_TARGETS): $(SVELTE_SRC) $(info ===> BUILD frontend) cd web/frontend && npm install && npm run build - -install: $(TARGET) - @WORKSPACE=$(PREFIX) - @if [ -z "$${WORKSPACE}" ]; then exit 1; fi - @mkdir --parents --verbose $${WORKSPACE}/usr/$(BINDIR) - @install -Dpm 755 $(TARGET) $${WORKSPACE}/usr/$(BINDIR)/$(TARGET) - @install -Dpm 600 configs/config.json $${WORKSPACE}/etc/$(TARGET)/$(TARGET).json - -.ONESHELL: -.PHONY: RPM -RPM: build/package/cc-backend.spec - @WORKSPACE="$${PWD}" - @SPECFILE="$${WORKSPACE}/build/package/cc-backend.spec" - # Setup RPM build tree - @eval $$(rpm --eval "ARCH='%{_arch}' RPMDIR='%{_rpmdir}' SOURCEDIR='%{_sourcedir}' SPECDIR='%{_specdir}' SRPMDIR='%{_srcrpmdir}' BUILDDIR='%{_builddir}'") - @mkdir --parents --verbose "$${RPMDIR}" "$${SOURCEDIR}" "$${SPECDIR}" "$${SRPMDIR}" "$${BUILDDIR}" - # Create source tarball - @COMMITISH="HEAD" - @VERS=$$(git describe --tags $${COMMITISH}) - @VERS=$${VERS#v} - @VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g) - @if [ "$${VERS}" = "" ]; then VERS="$(VERSION)"; fi - @eval $$(rpmspec --query --queryformat "NAME='%{name}' VERSION='%{version}' RELEASE='%{release}' NVR='%{NVR}' NVRA='%{NVRA}'" --define="VERS $${VERS}" "$${SPECFILE}") - @PREFIX="$${NAME}-$${VERSION}" - @FORMAT="tar.gz" - @SRCFILE="$${SOURCEDIR}/$${PREFIX}.$${FORMAT}" - @git archive --verbose --format "$${FORMAT}" --prefix="$${PREFIX}/" --output="$${SRCFILE}" $${COMMITISH} - # Build RPM and SRPM - @rpmbuild -ba --define="VERS $${VERS}" --rmsource --clean "$${SPECFILE}" - # Report RPMs and SRPMs when in GitHub Workflow - @if [ "$${GITHUB_ACTIONS}" = true ]; then - @ RPMFILE="$${RPMDIR}/$${ARCH}/$${NVRA}.rpm" - @ SRPMFILE="$${SRPMDIR}/$${NVR}.src.rpm" - @ echo "RPM: $${RPMFILE}" - @ echo "SRPM: $${SRPMFILE}" - @ echo "::set-output name=SRPM::$${SRPMFILE}" - @ echo "::set-output name=RPM::$${RPMFILE}" - @fi - -.ONESHELL: -.PHONY: DEB -DEB: build/package/cc-backend.deb.control - @BASEDIR=$${PWD} - @WORKSPACE=$${PWD}/.dpkgbuild - @DEBIANDIR=$${WORKSPACE}/debian - @DEBIANBINDIR=$${WORKSPACE}/DEBIAN - @mkdir --parents --verbose $$WORKSPACE $$DEBIANBINDIR - #@mkdir --parents --verbose $$DEBIANDIR - @CONTROLFILE="$${BASEDIR}/build/package/cc-backend.deb.control" - @COMMITISH="HEAD" - @VERS=$$(git describe --tags --abbrev=0 $${COMMITISH}) - @VERS=$${VERS#v} - @VERS=$$(echo $$VERS | sed -e s+'-'+'_'+g) - @if [ "$${VERS}" = "" ]; then VERS="$(VERSION)"; fi - @ARCH=$$(uname -m) - @ARCH=$$(echo $$ARCH | sed -e s+'_'+'-'+g) - @if [ "$${ARCH}" = "x86-64" ]; then ARCH=amd64; fi - @PREFIX="$${NAME}-$${VERSION}_$${ARCH}" - @SIZE_BYTES=$$(du -bcs --exclude=.dpkgbuild "$${WORKSPACE}"/ | awk '{print $$1}' | head -1 | sed -e 's/^0\+//') - @SIZE="$$(awk -v size="$$SIZE_BYTES" 'BEGIN {print (size/1024)+1}' | awk '{print int($$0)}')" - #@sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANDIR}/control - @sed -e s+"{VERSION}"+"$$VERS"+g -e s+"{INSTALLED_SIZE}"+"$$SIZE"+g -e s+"{ARCH}"+"$$ARCH"+g $$CONTROLFILE > $${DEBIANBINDIR}/control - @mkdir --parents --verbose "$${WORKSPACE}"/$(VAR) - @touch "$${WORKSPACE}"/$(VAR)/job.db - @cd web/frontend && yarn install && yarn build && cd - - @go build -ldflags=${LD_FLAGS} ./cmd/cc-backend - @mkdir --parents --verbose $${WORKSPACE}/usr/$(BINDIR) - @cp $(TARGET) $${WORKSPACE}/usr/$(BINDIR)/$(TARGET) - @chmod 0755 $${WORKSPACE}/usr/$(BINDIR)/$(TARGET) - @mkdir --parents --verbose $${WORKSPACE}/etc/$(TARGET) - @cp configs/config.json $${WORKSPACE}/etc/$(TARGET)/$(TARGET).json - @chmod 0600 $${WORKSPACE}/etc/$(TARGET)/$(TARGET).json - @mkdir --parents --verbose $${WORKSPACE}/usr/lib/systemd/system - @cp build/package/$(TARGET).service $${WORKSPACE}/usr/lib/systemd/system/$(TARGET).service - @chmod 0644 $${WORKSPACE}/usr/lib/systemd/system/$(TARGET).service - @mkdir --parents --verbose $${WORKSPACE}/etc/default - @cp build/package/$(TARGET).config $${WORKSPACE}/etc/default/$(TARGET) - @chmod 0600 $${WORKSPACE}/etc/default/$(TARGET) - @mkdir --parents --verbose $${WORKSPACE}/usr/lib/sysusers.d - @cp build/package/$(TARGET).sysusers $${WORKSPACE}/usr/lib/sysusers.d/$(TARGET).conf - @chmod 0644 $${WORKSPACE}/usr/lib/sysusers.d/$(TARGET).conf - @DEB_FILE="cc-metric-store_$${VERS}_$${ARCH}.deb" - @dpkg-deb -b $${WORKSPACE} "$$DEB_FILE" - @rm -r "$${WORKSPACE}" - @if [ "$${GITHUB_ACTIONS}" = "true" ]; then - @ echo "::set-output name=DEB::$${DEB_FILE}" - @fi diff --git a/README.md b/README.md index 36d3c7d..5ce9125 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,16 @@ # NOTE -Please have a look at the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for breaking changes! + +Please have a look at the [Release +Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) +for breaking changes! # ClusterCockpit REST and GraphQL API backend [![Build](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml) This is a Golang backend implementation for a REST and GraphQL API according to -the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications). It also +the [ClusterCockpit +specifications](https://github.com/ClusterCockpit/cc-specifications). It also includes a web interface for ClusterCockpit. This implementation replaces the previous PHP Symfony based ClusterCockpit web interface. The reasons for switching from PHP Symfony to a Golang based solution are explained @@ -14,31 +18,31 @@ switching from PHP Symfony to a Golang based solution are explained ## Overview +This is a Golang web backend for the ClusterCockpit job-specific performance +monitoring framework. It provides a REST API for integrating ClusterCockpit with +an HPC cluster batch system and external analysis scripts. Data exchange between +the web front-end and the back-end is based on a GraphQL API. The web frontend +is also served by the backend using [Svelte](https://svelte.dev/) components. +Layout and styling are based on [Bootstrap 5](https://getbootstrap.com/) using +[Bootstrap Icons](https://icons.getbootstrap.com/). -This is a Golang web backend for the ClusterCockpit job-specific performance monitoring framework. -It provides a REST API for integrating ClusterCockpit with an HPC cluster batch system and external analysis scripts. -Data exchange between the web front-end and the back-end is based on a GraphQL API. -The web frontend is also served by the backend using [Svelte](https://svelte.dev/) components. -Layout and styling are based on [Bootstrap 5](https://getbootstrap.com/) using [Bootstrap Icons](https://icons.getbootstrap.com/). - -The backend uses [SQLite 3](https://sqlite.org/) as a relational SQL database by default. -Optionally it can use a MySQL/MariaDB database server. -While there are metric data backends for the InfluxDB and Prometheus time series databases, the only tested and supported setup is to use cc-metric-store as the metric data backend. -Documentation on how to integrate ClusterCockpit with other time series databases will be added in the future. +The backend uses [SQLite 3](https://sqlite.org/) as a relational SQL database by +default. Optionally it can use a MySQL/MariaDB database server. While there are +metric data backends for the InfluxDB and Prometheus time series databases, the +only tested and supported setup is to use cc-metric-store as the metric data +backend. Documentation on how to integrate ClusterCockpit with other time series +databases will be added in the future. Completed batch jobs are stored in a file-based job archive according to -[this specification] (https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive). +[this specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive). The backend supports authentication via local accounts, an external LDAP directory, and JWT tokens. Authorization for APIs is implemented with [JWT](https://jwt.io/) tokens created with public/private key encryption. -You find more detailed information here: -* `./configs/README.md`: Infos about configuration and setup of cc-backend. -* `./init/README.md`: Infos on how to setup cc-backend as systemd service on Linux. -* `./tools/README.md`: Infos on the JWT authorizatin token workflows in ClusterCockpit. -* `./docs`: You can find further documentation here. There is also a Hands-on tutorial that is recommended to get familiar with the ClusterCockpit setup. +You find a detailed documentation on the [ClusterCockpit +Webpage](https://clustercockpit.org). -**NOTE** +## Build requirements ClusterCockpit requires a current version of the golang toolchain and node.js. You can check `go.mod` to see what is the current minimal golang version needed. @@ -49,7 +53,7 @@ on the Go standard library, it is crucial for security and performance to use a current version of golang. In addition, an old golang toolchain may limit the supported versions of third-party packages. -## How to try ClusterCockpit with a demo setup. +## How to try ClusterCockpit with a demo setup We provide a shell script that downloads demo data and automatically starts the cc-backend. You will need `wget`, `go`, `node`, `npm` in your path to @@ -64,28 +68,32 @@ cd ./cc-backend You can also try the demo using the lates release binary. Create a folder and put the release binary `cc-backend` into this folder. Execute the following steps: -``` -$ ./cc-backend -init -$ vim config.json (Add a second cluster entry and name the clusters alex and fritz) -$ wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/job-archive-demo.tar -$ tar xf job-archive-demo.tar -$ ./cc-backend -init-db -add-user demo:admin:demo -loglevel info -$ ./cc-backend -server -dev -loglevel info + +``` shell +./cc-backend -init +vim config.json (Add a second cluster entry and name the clusters alex and fritz) +wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/job-archive-demo.tar +tar xf job-archive-demo.tar +./cc-backend -init-db -add-user demo:admin:demo -loglevel info +./cc-backend -server -dev -loglevel info ``` -You can access the web interface at http://localhost:8080. +You can access the web interface at [http://localhost:8080](http://localhost:8080). Credentials for login are `demo:demo`. Please note that some views do not work without a metric backend (e.g., the Analysis, Systems and Status views). -## Howto build and run +## How to build and run -There is a Makefile to automate the build of cc-backend. The Makefile supports the following targets: -* `$ make`: Initialize `var` directory and build svelte frontend and backend binary. Note that there is no proper prerequesite handling. Any change of frontend source files will result in a complete rebuild. -* `$ make clean`: Clean go build cache and remove binary. -* `$ make test`: Run the tests that are also run in the GitHub workflow setup. +There is a Makefile to automate the build of cc-backend. The Makefile supports +the following targets: + +* `make`: Initialize `var` directory and build svelte frontend and backend binary. Note that there is no proper prerequesite handling. Any change of frontend source files will result in a complete rebuild. +* `make clean`: Clean go build cache and remove binary. +* `make test`: Run the tests that are also run in the GitHub workflow setup. A common workflow for setting up cc-backend from scratch is: + ```sh git clone https://github.com/ClusterCockpit/cc-backend.git @@ -116,89 +124,45 @@ ln -s ./var/job-archive ./cc-backend -help ``` -### Run as systemd daemon - -To run this program as a daemon, cc-backend comes with a [example systemd setup](./init/README.md). - -## Configuration and setup - -cc-backend can be used as a local web interface for an existing job archive or -as a server for the ClusterCockpit monitoring framework. - -Create your job archive according to [this specification] (https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive). -At least one cluster directory with a valid `cluster.json` file is required. If -you configure the job archive from scratch, you must also create the job -archive version file that contains the job archive version as an integer. -You can retrieve the currently supported version by running the following -command: -``` -$ ./cc-backend -version -``` -It is ok to have no jobs in the job archive. - -### Configuration - -A configuration file in JSON format must be specified with `-config` to override the default settings. -By default, a `config.json` file located in the current directory of the `cc-backend` process will be loaded even without the `-config` flag. -Documentation of all supported configuration and command line options can be found [here](./configs/README.md). - -## Database initialization and migration - -Each `cc-backend` version supports a specific database version. -At startup, the version of the sqlite database is checked and `cc-backend` terminates if the version does not match. -`cc-backend` supports the migration of the database schema to the required version with the command line option `-migrate-db`. -If the database file does not exist yet, it will be created and initialized with the command line option `-migrate-db`. -If you want to use a newer database version with an older version of cc-backend, you can downgrade a database with the external tool [migrate](https://github.com/golang-migrate/migrate). -In this case, you must specify the path to the migration files in a current source tree: `./internal/repository/migrations/`. - -## Development and testing -When making changes to the REST or GraphQL API, the appropriate code generators must be used. -You must always rebuild `cc-backend` after updating the API files. - -### Update GraphQL schema - -This project uses [gqlgen](https://github.com/99designs/gqlgen) for the GraphQL API. -The schema can be found in `./api/schema.graphqls`. -After changing it, you need to run `go run github.com/99designs/gqlgen`, which will update `./internal/graph/model`. -If new resolvers are needed, they will be added to `./internal/graph/schema.resolvers.go`, where you will then need to implement them. -If you start `cc-backend` with the `-dev` flag, the GraphQL Playground UI is available at http://localhost:8080/playground. - -### Update Swagger UI - -This project integrates [swagger ui] (https://swagger.io/tools/swagger-ui/) to document and test its REST API. -The swagger documentation files can be found in `./api/`. -You can generate the swagger-ui configuration by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `. -You need to move the created `./api/docs.go` to `./internal/api/docs.go`. -If you start cc-backend with the `-dev` flag, the Swagger interface is available -at http://localhost:8080/swagger/. -You must enter a JWT key for a user with the API role. - -**NOTE** - -The user who owns the JWT key must not be logged into the same browser (have a -running session), or the Swagger requests will not work. It is recommended to -create a separate user that has only the API role. - -## Development and testing -In case the REST or GraphQL API is changed the according code generators have to be used. - ## Project file structure -- [`api/`](https://github.com/ClusterCockpit/cc-backend/tree/master/api) contains the API schema files for the REST and GraphQL APIs. The REST API is documented in the OpenAPI 3.0 format in [./api/openapi.yaml](./api/openapi.yaml). -- [`cmd/cc-backend`](https://github.com/ClusterCockpit/cc-backend/tree/master/cmd/cc-backend) contains `main.go` for the main application. -- [`configs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/configs) contains documentation about configuration and command line options and required environment variables. A sample configuration file is provided. -- [`docs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/docs) contains more in-depth documentation. -- [`init/`](https://github.com/ClusterCockpit/cc-backend/tree/master/init) contains an example of setting up systemd for production use. -- [`internal/`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal) contains library source code that is not intended for use by others. -- [`pkg/`](https://github.com/ClusterCockpit/cc-backend/tree/master/pkg) contains Go packages that can be used by other projects. -- [`tools/`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools) Additional command line helper tools. - - [`archive-manager`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-manager) Commands for getting infos about and existing job archive. - - [`archive-migration`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-migration) Tool to migrate from previous to current job archive version. - - [`convert-pem-pubkey`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/convert-pem-pubkey) Tool to convert external pubkey for use in `cc-backend`. - - [`gen-keypair`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/gen-keypair) contains a small application to generate a compatible JWT keypair. You find documentation on how to use it [here](https://github.com/ClusterCockpit/cc-backend/blob/master/docs/JWT-Handling.md). -- [`web/`](https://github.com/ClusterCockpit/cc-backend/tree/master/web) Server-side templates and frontend-related files: - - [`frontend`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/frontend) Svelte components and static assets for the frontend UI - - [`templates`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/templates) Server-side Go templates -- [`gqlgen.yml`](https://github.com/ClusterCockpit/cc-backend/blob/master/gqlgen.yml) Configures the behaviour and generation of [gqlgen](https://github.com/99designs/gqlgen). -- [`startDemo.sh`](https://github.com/ClusterCockpit/cc-backend/blob/master/startDemo.sh) is a shell script that sets up demo data, and builds and starts `cc-backend`. - +* [`api/`](https://github.com/ClusterCockpit/cc-backend/tree/master/api) +contains the API schema files for the REST and GraphQL APIs. The REST API is +documented in the OpenAPI 3.0 format in +[./api/openapi.yaml](./api/openapi.yaml). +* [`cmd/cc-backend`](https://github.com/ClusterCockpit/cc-backend/tree/master/cmd/cc-backend) +contains `main.go` for the main application. +* [`configs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/configs) +contains documentation about configuration and command line options and required +environment variables. A sample configuration file is provided. +* [`docs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/docs) +contains more in-depth documentation. +* [`init/`](https://github.com/ClusterCockpit/cc-backend/tree/master/init) +contains an example of setting up systemd for production use. +* [`internal/`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal) +contains library source code that is not intended for use by others. +* [`pkg/`](https://github.com/ClusterCockpit/cc-backend/tree/master/pkg) +contains Go packages that can be used by other projects. +* [`tools/`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools) +Additional command line helper tools. + * [`archive-manager`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-manager) + Commands for getting infos about and existing job archive. + * [`archive-migration`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-migration) + Tool to migrate from previous to current job archive version. + * [`convert-pem-pubkey`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/convert-pem-pubkey) + Tool to convert external pubkey for use in `cc-backend`. + * [`gen-keypair`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/gen-keypair) + contains a small application to generate a compatible JWT keypair. You find + documentation on how to use it + [here](https://github.com/ClusterCockpit/cc-backend/blob/master/docs/JWT-Handling.md). +* [`web/`](https://github.com/ClusterCockpit/cc-backend/tree/master/web) +Server-side templates and frontend-related files: + * [`frontend`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/frontend) + Svelte components and static assets for the frontend UI + * [`templates`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/templates) + Server-side Go templates +* [`gqlgen.yml`](https://github.com/ClusterCockpit/cc-backend/blob/master/gqlgen.yml) +Configures the behaviour and generation of +[gqlgen](https://github.com/99designs/gqlgen). +* [`startDemo.sh`](https://github.com/ClusterCockpit/cc-backend/blob/master/startDemo.sh) +is a shell script that sets up demo data, and builds and starts `cc-backend`. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 54aaf90..a4f085e 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,40 +1,12 @@ -# `cc-backend` version 1.2.2 +# `cc-backend` version 1.3.0 -Supports job archive version 1 and database version 6. +Supports job archive version 1 and database version 7. This is a minor release of `cc-backend`, the API backend and frontend implementation of ClusterCockpit. +For release specific notes visit the [ClusterCockpit Documentation](https://clusterockpit.org/docs/release/). -** Breaking changes ** +## Breaking changes -* The LDAP configuration option `user_filter` was changed and now should not include -the uid wildcard. Example: - - Old: `"user_filter": "(&(objectclass=posixAccount)(uid=*))"` - - New: `"user_filter": "(&(objectclass=posixAccount))"` - -* The aggregate job statistic core hours is now computed using the job table -column `num_hwthreads`. In a future release this column will be renamed to -`num_cores`. For correct display of core hours `num_hwthreads` must be correctly -filled on job start. If your existing jobs do not provide the correct value in -this column then you can set this with one SQL INSERT statement. This only applies -if you have exclusive jobs, only. Please be aware that we treat this column as -it is the number of cores. In case you have SMT enabled and `num_hwthreads` -is not the number of cores the core hours will be too high by a factor! - -* The jwts key is now mandatory in config.json. It has to set max-age for - validity. Some key names have changed, please refer to - [config documentation](./configs/README.md) for details. - -* The following API endpoints are only accessible from IPs registered using the apiAllowedIPs configuration option: - - `/users/` [GET, POST, DELETE] - - `/user/{id}` [POST] - -** NOTE ** -If you are using the sqlite3 backend the `PRAGMA` option `foreign_keys` must be -explicitly set to ON. If using the sqlite3 console it is per default set to -OFF! On every console session you must set: -``` -sqlite> PRAGMA foreign_keys = ON; - -``` -Otherwise if you delete jobs the jobtag relation table will not be updated accordingly! +* This release fixes bugs in the MySQL/MariaDB database schema. For this reason + you have to migrate your database using the `-migrate-db` switch. diff --git a/api/schema.graphqls b/api/schema.graphqls index 69e32e2..5c5bc2c 100644 --- a/api/schema.graphqls +++ b/api/schema.graphqls @@ -28,6 +28,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } @@ -198,7 +203,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -273,6 +278,7 @@ type JobResultList { offset: Int limit: Int count: Int + hasNextPage: Boolean } type JobLinkResultList { @@ -286,6 +292,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If `groupBy` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -303,6 +322,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { diff --git a/api/swagger.json b/api/swagger.json index 6c3bc5c..7f5eaf7 100644 --- a/api/swagger.json +++ b/api/swagger.json @@ -17,6 +17,63 @@ "host": "localhost:8080", "basePath": "/api", "paths": { + "/clusters/": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Get a list of all cluster configs. Specific cluster can be requested using query parameter.", + "produces": [ + "application/json" + ], + "tags": [ + "Cluster query" + ], + "summary": "Lists all cluster configs", + "parameters": [ + { + "type": "string", + "description": "Job Cluster", + "name": "cluster", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Array of clusters", + "schema": { + "$ref": "#/definitions/api.GetClustersApiResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, "/jobs/": { "get": { "security": [ @@ -327,6 +384,76 @@ } } }, + "/jobs/edit_meta/{id}": { + "post": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Edit key value pairs in job metadata json\nIf a key already exists its content will be overwritten", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Job add and modify" + ], + "summary": "Edit meta-data json", + "parameters": [ + { + "type": "integer", + "description": "Job Database ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Kay value pair to add", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.EditMetaRequest" + } + } + ], + "responses": { + "200": { + "description": "Updated job resource", + "schema": { + "$ref": "#/definitions/schema.Job" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Job does not exist", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, "/jobs/start_job/": { "post": { "security": [ @@ -624,6 +751,80 @@ } }, "/jobs/{id}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.", + "produces": [ + "application/json" + ], + "tags": [ + "Job query" + ], + "summary": "Get job meta and optional all metric data", + "parameters": [ + { + "type": "integer", + "description": "Database ID of Job", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Include all available metrics", + "name": "all-metrics", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Job resource", + "schema": { + "$ref": "#/definitions/api.GetJobApiResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Resource not found", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "422": { + "description": "Unprocessable Entity: finding job failed: sql: no rows in result set", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + }, "post": { "security": [ { @@ -640,7 +841,7 @@ "tags": [ "Job query" ], - "summary": "Get complete job meta and metric data", + "summary": "Get job meta and configurable metric data", "parameters": [ { "type": "integer", @@ -1114,6 +1315,19 @@ } } }, + "api.EditMetaRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "example": "jobScript" + }, + "value": { + "type": "string", + "example": "bash script" + } + } + }, "api.ErrorResponse": { "type": "object", "properties": { @@ -1127,6 +1341,18 @@ } } }, + "api.GetClustersApiResponse": { + "type": "object", + "properties": { + "clusters": { + "description": "Array of clusters", + "type": "array", + "items": { + "$ref": "#/definitions/schema.Cluster" + } + } + } + }, "api.GetJobApiResponse": { "type": "object", "properties": { @@ -1222,6 +1448,40 @@ } } }, + "schema.Accelerator": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "schema.Cluster": { + "type": "object", + "properties": { + "metricConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.MetricConfig" + } + }, + "name": { + "type": "string" + }, + "subClusters": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.SubCluster" + } + } + } + }, "schema.Job": { "description": "Information of a HPC job.", "type": "object", @@ -1252,6 +1512,10 @@ "minimum": 0, "example": 1 }, + "flopsAnyAvg": { + "description": "FlopsAnyAvg as Float64", + "type": "number" + }, "id": { "description": "The unique identifier of a job in the database", "type": "integer" @@ -1278,6 +1542,18 @@ ], "example": "completed" }, + "loadAvg": { + "description": "LoadAvg as Float64", + "type": "number" + }, + "memBwAvg": { + "description": "MemBwAvg as Float64", + "type": "number" + }, + "memUsedMax": { + "description": "MemUsedMax as Float64", + "type": "number" + }, "metaData": { "description": "Additional information about the job", "type": "object", @@ -1604,6 +1880,44 @@ } } }, + "schema.MetricConfig": { + "type": "object", + "properties": { + "aggregation": { + "type": "string" + }, + "alert": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "name": { + "type": "string" + }, + "normal": { + "type": "number" + }, + "peak": { + "type": "number" + }, + "scope": { + "$ref": "#/definitions/schema.MetricScope" + }, + "subClusters": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.SubClusterConfig" + } + }, + "timestep": { + "type": "integer" + }, + "unit": { + "$ref": "#/definitions/schema.Unit" + } + } + }, "schema.MetricScope": { "type": "string", "enum": [ @@ -1639,6 +1953,17 @@ } } }, + "schema.MetricValue": { + "type": "object", + "properties": { + "unit": { + "$ref": "#/definitions/schema.Unit" + }, + "value": { + "type": "number" + } + } + }, "schema.Resource": { "description": "A resource used by a job", "type": "object", @@ -1719,6 +2044,64 @@ } } }, + "schema.SubCluster": { + "type": "object", + "properties": { + "coresPerSocket": { + "type": "integer" + }, + "flopRateScalar": { + "$ref": "#/definitions/schema.MetricValue" + }, + "flopRateSimd": { + "$ref": "#/definitions/schema.MetricValue" + }, + "memoryBandwidth": { + "$ref": "#/definitions/schema.MetricValue" + }, + "name": { + "type": "string" + }, + "nodes": { + "type": "string" + }, + "processorType": { + "type": "string" + }, + "socketsPerNode": { + "type": "integer" + }, + "threadsPerCore": { + "type": "integer" + }, + "topology": { + "$ref": "#/definitions/schema.Topology" + } + } + }, + "schema.SubClusterConfig": { + "type": "object", + "properties": { + "alert": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "name": { + "type": "string" + }, + "normal": { + "type": "number" + }, + "peak": { + "type": "number" + }, + "remove": { + "type": "boolean" + } + } + }, "schema.Tag": { "description": "Defines a tag using name and type.", "type": "object", @@ -1739,6 +2122,59 @@ } } }, + "schema.Topology": { + "type": "object", + "properties": { + "accelerators": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.Accelerator" + } + }, + "core": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "die": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "memoryDomain": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "node": { + "type": "array", + "items": { + "type": "integer" + } + }, + "socket": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + } + } + }, "schema.Unit": { "type": "object", "properties": { diff --git a/api/swagger.yaml b/api/swagger.yaml index cf3b3e3..f47ac3f 100644 --- a/api/swagger.yaml +++ b/api/swagger.yaml @@ -50,6 +50,15 @@ definitions: msg: type: string type: object + api.EditMetaRequest: + properties: + key: + example: jobScript + type: string + value: + example: bash script + type: string + type: object api.ErrorResponse: properties: error: @@ -59,6 +68,14 @@ definitions: description: Statustext of Errorcode type: string type: object + api.GetClustersApiResponse: + properties: + clusters: + description: Array of clusters + items: + $ref: '#/definitions/schema.Cluster' + type: array + type: object api.GetJobApiResponse: properties: data: @@ -124,6 +141,28 @@ definitions: - jobState - stopTime type: object + schema.Accelerator: + properties: + id: + type: string + model: + type: string + type: + type: string + type: object + schema.Cluster: + properties: + metricConfig: + items: + $ref: '#/definitions/schema.MetricConfig' + type: array + name: + type: string + subClusters: + items: + $ref: '#/definitions/schema.SubCluster' + type: array + type: object schema.Job: description: Information of a HPC job. properties: @@ -150,6 +189,9 @@ definitions: maximum: 2 minimum: 0 type: integer + flopsAnyAvg: + description: FlopsAnyAvg as Float64 + type: number id: description: The unique identifier of a job in the database type: integer @@ -169,6 +211,15 @@ definitions: - timeout - out_of_memory example: completed + loadAvg: + description: LoadAvg as Float64 + type: number + memBwAvg: + description: MemBwAvg as Float64 + type: number + memUsedMax: + description: MemUsedMax as Float64 + type: number metaData: additionalProperties: type: string @@ -427,6 +478,31 @@ definitions: unit: $ref: '#/definitions/schema.Unit' type: object + schema.MetricConfig: + properties: + aggregation: + type: string + alert: + type: number + caution: + type: number + name: + type: string + normal: + type: number + peak: + type: number + scope: + $ref: '#/definitions/schema.MetricScope' + subClusters: + items: + $ref: '#/definitions/schema.SubClusterConfig' + type: array + timestep: + type: integer + unit: + $ref: '#/definitions/schema.Unit' + type: object schema.MetricScope: enum: - invalid_scope @@ -454,6 +530,13 @@ definitions: min: type: number type: object + schema.MetricValue: + properties: + unit: + $ref: '#/definitions/schema.Unit' + value: + type: number + type: object schema.Resource: description: A resource used by a job properties: @@ -508,6 +591,44 @@ definitions: type: array type: object type: object + schema.SubCluster: + properties: + coresPerSocket: + type: integer + flopRateScalar: + $ref: '#/definitions/schema.MetricValue' + flopRateSimd: + $ref: '#/definitions/schema.MetricValue' + memoryBandwidth: + $ref: '#/definitions/schema.MetricValue' + name: + type: string + nodes: + type: string + processorType: + type: string + socketsPerNode: + type: integer + threadsPerCore: + type: integer + topology: + $ref: '#/definitions/schema.Topology' + type: object + schema.SubClusterConfig: + properties: + alert: + type: number + caution: + type: number + name: + type: string + normal: + type: number + peak: + type: number + remove: + type: boolean + type: object schema.Tag: description: Defines a tag using name and type. properties: @@ -523,6 +644,41 @@ definitions: example: Debug type: string type: object + schema.Topology: + properties: + accelerators: + items: + $ref: '#/definitions/schema.Accelerator' + type: array + core: + items: + items: + type: integer + type: array + type: array + die: + items: + items: + type: integer + type: array + type: array + memoryDomain: + items: + items: + type: integer + type: array + type: array + node: + items: + type: integer + type: array + socket: + items: + items: + type: integer + type: array + type: array + type: object schema.Unit: properties: base: @@ -543,6 +699,43 @@ info: title: ClusterCockpit REST API version: 1.0.0 paths: + /clusters/: + get: + description: Get a list of all cluster configs. Specific cluster can be requested + using query parameter. + parameters: + - description: Job Cluster + in: query + name: cluster + type: string + produces: + - application/json + responses: + "200": + description: Array of clusters + schema: + $ref: '#/definitions/api.GetClustersApiResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.ErrorResponse' + "401": + description: Unauthorized + schema: + $ref: '#/definitions/api.ErrorResponse' + "403": + description: Forbidden + schema: + $ref: '#/definitions/api.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.ErrorResponse' + security: + - ApiKeyAuth: [] + summary: Lists all cluster configs + tags: + - Cluster query /jobs/: get: description: |- @@ -609,6 +802,57 @@ paths: tags: - Job query /jobs/{id}: + get: + description: |- + Job to get is specified by database ID + Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'. + parameters: + - description: Database ID of Job + in: path + name: id + required: true + type: integer + - description: Include all available metrics + in: query + name: all-metrics + type: boolean + produces: + - application/json + responses: + "200": + description: Job resource + schema: + $ref: '#/definitions/api.GetJobApiResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.ErrorResponse' + "401": + description: Unauthorized + schema: + $ref: '#/definitions/api.ErrorResponse' + "403": + description: Forbidden + schema: + $ref: '#/definitions/api.ErrorResponse' + "404": + description: Resource not found + schema: + $ref: '#/definitions/api.ErrorResponse' + "422": + description: 'Unprocessable Entity: finding job failed: sql: no rows in + result set' + schema: + $ref: '#/definitions/api.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.ErrorResponse' + security: + - ApiKeyAuth: [] + summary: Get job meta and optional all metric data + tags: + - Job query post: consumes: - application/json @@ -663,7 +907,7 @@ paths: $ref: '#/definitions/api.ErrorResponse' security: - ApiKeyAuth: [] - summary: Get complete job meta and metric data + summary: Get job meta and configurable metric data tags: - Job query /jobs/delete_job/: @@ -810,6 +1054,53 @@ paths: summary: Remove a job from the sql database tags: - Job remove + /jobs/edit_meta/{id}: + post: + consumes: + - application/json + description: |- + Edit key value pairs in job metadata json + If a key already exists its content will be overwritten + parameters: + - description: Job Database ID + in: path + name: id + required: true + type: integer + - description: Kay value pair to add + in: body + name: request + required: true + schema: + $ref: '#/definitions/api.EditMetaRequest' + produces: + - application/json + responses: + "200": + description: Updated job resource + schema: + $ref: '#/definitions/schema.Job' + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.ErrorResponse' + "401": + description: Unauthorized + schema: + $ref: '#/definitions/api.ErrorResponse' + "404": + description: Job does not exist + schema: + $ref: '#/definitions/api.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/api.ErrorResponse' + security: + - ApiKeyAuth: [] + summary: Edit meta-data json + tags: + - Job add and modify /jobs/start_job/: post: consumes: diff --git a/build/package/cc-backend.config b/build/package/cc-backend.config deleted file mode 100644 index e80285b..0000000 --- a/build/package/cc-backend.config +++ /dev/null @@ -1,17 +0,0 @@ -CC_USER=clustercockpit - -CC_GROUP=clustercockpit - -CC_HOME=/tmp - -LOG_DIR=/var/log - -DATA_DIR=/var/run/cc-backend - -MAX_OPEN_FILES=10000 - -CONF_DIR=/etc/cc-backend - -CONF_FILE=/etc/cc-backend/cc-backend.json - -RESTART_ON_UPGRADE=true diff --git a/build/package/cc-backend.deb.control b/build/package/cc-backend.deb.control deleted file mode 100644 index 1612c12..0000000 --- a/build/package/cc-backend.deb.control +++ /dev/null @@ -1,12 +0,0 @@ -Package: cc-backend -Version: {VERSION} -Installed-Size: {INSTALLED_SIZE} -Architecture: {ARCH} -Maintainer: thomas.gruber@fau.de -Depends: libc6 (>= 2.2.1) -Build-Depends: debhelper-compat (= 13), git, golang-go, npm, yarn -Description: ClusterCockpit backend and web frontend -Homepage: https://github.com/ClusterCockpit/cc-backend -Source: cc-backend -Rules-Requires-Root: no - diff --git a/build/package/cc-backend.service b/build/package/cc-backend.service deleted file mode 100644 index e29c2e7..0000000 --- a/build/package/cc-backend.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=ClusterCockpit backend and web frontend (cc-backend) -Documentation=https://github.com/ClusterCockpit/cc-backend -Wants=network-online.target -After=network-online.target - -[Service] -EnvironmentFile=/etc/default/cc-backend -Type=simple -User=clustercockpit -Group=clustercockpit -Restart=on-failure -TimeoutStopSec=100 -LimitNOFILE=infinity -ExecStart=/usr/bin/cc-backend --config ${CONF_FILE} - -[Install] -WantedBy=multi-user.target diff --git a/build/package/cc-backend.spec b/build/package/cc-backend.spec deleted file mode 100644 index d96d94e..0000000 --- a/build/package/cc-backend.spec +++ /dev/null @@ -1,70 +0,0 @@ -Name: cc-backend -Version: %{VERS} -Release: 1%{?dist} -Summary: ClusterCockpit backend and web frontend - -License: MIT -Source0: %{name}-%{version}.tar.gz - -#BuildRequires: go-toolset -#BuildRequires: systemd-rpm-macros -#BuildRequires: npm - -Provides: %{name} = %{version} - -%description -ClusterCockpit backend and web frontend - -%global debug_package %{nil} - -%prep -%autosetup - - -%build -#CURRENT_TIME=$(date +%Y-%m-%d:T%H:%M:\%S) -#LD_FLAGS="-s -X main.buildTime=${CURRENT_TIME} -X main.version=%{VERS}" -mkdir ./var -touch ./var/job.db -cd web/frontend && yarn install && yarn build && cd - -go build -ldflags="-s -X main.version=%{VERS}" ./cmd/cc-backend - - -%install -# Install cc-backend -#make PREFIX=%{buildroot} install -install -Dpm 755 cc-backend %{buildroot}/%{_bindir}/%{name} -install -Dpm 0600 configs/config.json %{buildroot}%{_sysconfdir}/%{name}/%{name}.json -# Integrate into system -install -Dpm 0644 build/package/%{name}.service %{buildroot}%{_unitdir}/%{name}.service -install -Dpm 0600 build/package/%{name}.config %{buildroot}%{_sysconfdir}/default/%{name} -install -Dpm 0644 build/package/%{name}.sysusers %{buildroot}%{_sysusersdir}/%{name}.conf - - -%check -# go test should be here... :) - -%pre -%sysusers_create_package scripts/%{name}.sysusers - -%post -%systemd_post %{name}.service - -%preun -%systemd_preun %{name}.service - -%files -# Binary -%attr(-,clustercockpit,clustercockpit) %{_bindir}/%{name} -# Config -%dir %{_sysconfdir}/%{name} -%attr(0600,clustercockpit,clustercockpit) %config(noreplace) %{_sysconfdir}/%{name}/%{name}.json -# Systemd -%{_unitdir}/%{name}.service -%{_sysconfdir}/default/%{name} -%{_sysusersdir}/%{name}.conf - -%changelog -* Mon Mar 07 2022 Thomas Gruber - 0.1 -- Initial metric store implementation - diff --git a/build/package/cc-backend.sysusers b/build/package/cc-backend.sysusers deleted file mode 100644 index 5d4abc5..0000000 --- a/build/package/cc-backend.sysusers +++ /dev/null @@ -1,2 +0,0 @@ -#Type Name ID GECOS Home directory Shell -u clustercockpit - "User for ClusterCockpit" /run/cc-backend /sbin/nologin diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index e956503..9d084f2 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -134,7 +134,7 @@ func initEnv() { } func main() { - var flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagDev, flagVersion, flagLogDateTime bool + var flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagRevertDB, flagForceDB, flagDev, flagVersion, flagLogDateTime bool var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env") flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") @@ -144,6 +144,8 @@ func main() { flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI") flag.BoolVar(&flagVersion, "version", false, "Show version information and exit") flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit") + flag.BoolVar(&flagRevertDB, "revert-db", false, "Migrate database to previous version and exit") + flag.BoolVar(&flagForceDB, "force-db", false, "Force database version, clear dirty flag and exit") flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages") flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin,support,manager,api,user]:`") @@ -205,6 +207,22 @@ func main() { os.Exit(0) } + if flagRevertDB { + err := repository.RevertDB(config.Keys.DBDriver, config.Keys.DB) + if err != nil { + log.Fatal(err) + } + os.Exit(0) + } + + if flagForceDB { + err := repository.ForceDB(config.Keys.DBDriver, config.Keys.DB) + if err != nil { + log.Fatal(err) + } + os.Exit(0) + } + repository.Connect(config.Keys.DBDriver, config.Keys.DB) db := repository.GetConnection() @@ -268,6 +286,7 @@ func main() { fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt) } + } else if flagNewUser != "" || flagDelUser != "" { log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled") } @@ -325,9 +344,19 @@ func main() { r := mux.NewRouter() buildInfo := web.Build{Version: version, Hash: commit, Buildtime: date} + info := map[string]interface{}{} + info["hasOpenIDConnect"] = false + + if config.Keys.OpenIDConfig != nil { + openIDConnect := auth.NewOIDC(authentication) + openIDConnect.RegisterEndpoints(r) + info["hasOpenIDConnect"] = true + } + r.HandleFunc("/login", func(rw http.ResponseWriter, r *http.Request) { rw.Header().Add("Content-Type", "text/html; charset=utf-8") - web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo}) + log.Debugf("##%v##", info) + web.RenderTemplate(rw, "login.tmpl", &web.Page{Title: "Login", Build: buildInfo, Infos: info}) }).Methods(http.MethodGet) r.HandleFunc("/imprint", func(rw http.ResponseWriter, r *http.Request) { rw.Header().Add("Content-Type", "text/html; charset=utf-8") @@ -354,6 +383,7 @@ func main() { MsgType: "alert-warning", Message: err.Error(), Build: buildInfo, + Infos: info, }) })).Methods(http.MethodPost) @@ -370,6 +400,7 @@ func main() { MsgType: "alert-warning", Message: err.Error(), Build: buildInfo, + Infos: info, }) })) @@ -382,6 +413,7 @@ func main() { MsgType: "alert-info", Message: "Logout successful", Build: buildInfo, + Infos: info, }) }))).Methods(http.MethodPost) @@ -398,6 +430,7 @@ func main() { MsgType: "alert-danger", Message: err.Error(), Build: buildInfo, + Infos: info, }) }) }) @@ -536,8 +569,8 @@ func main() { } var cfg struct { - Compression int `json:"compression"` Retention schema.Retention `json:"retention"` + Compression int `json:"compression"` } cfg.Retention.IncludeDB = true diff --git a/configs/README.md b/configs/README.md deleted file mode 100644 index 1ee8cb8..0000000 --- a/configs/README.md +++ /dev/null @@ -1,93 +0,0 @@ -## Intro - -cc-backend requires a configuration file that specifies the cluster systems to be used. -To override the default, specify the location of a json configuration file with the `-config ` command line option. -All security-related configurations, e.g. keys and passwords, are set using -environment variables. -It is supported to set these by means of a `.env` file in the project root. - -## Configuration Options - -* `addr`: Type string. Address where the http (or https) server will listen on (for example: 'localhost:80'). Default `:8080`. -* `apiAllowedIPs`: Type string array. Addresses from which the secured API endpoints (/users and other auth related endpoints) can be reached -* `user`: Type string. Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port. -* `group`: Type string. Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port. -* `disable-authentication`: Type bool. Disable authentication (for everything: API, Web-UI, ...). Default `false`. -* `embed-static-files`: Type bool. If all files in `web/frontend/public` should be served from within the binary itself (they are embedded) or not. Default `true`. -* `static-files`: Type string. Folder where static assets can be found, if `embed-static-files` is `false`. No default. -* `db-driver`: Type string. 'sqlite3' or 'mysql' (mysql will work for mariadb as well). Default `sqlite3`. -* `db`: Type string. For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!). Default: `./var/job.db`. -* `job-archive`: Type object. - - `kind`: Type string. At them moment only file is supported as value. - - `path`: Type string. Path to the job-archive. Default: `./var/job-archive`. - - `compression`: Type integer. Setup automatic compression for jobs older than number of days. - - `retention`: Type object. - - `policy`: Type string (required). Retention policy. Possible values none, delete, - move. - - `includeDB`: Type boolean. Also remove jobs from database. - - `age`: Type integer. Act on jobs with startTime older than age (in days). - - `location`: Type string. The target directory for retention. Only applicable for retention policy move. -* `disable-archive`: Type bool. Keep all metric data in the metric data repositories, do not write to the job-archive. Default `false`. -* `validate`: Type bool. Validate all input json documents against json schema. -* `session-max-age`: Type string. Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `168h`. -* `https-cert-file` and `https-key-file`: Type string. If both those options are not empty, use HTTPS using those certificates. -* `redirect-http-to`: Type string. If not the empty string and `addr` does not end in ":80", redirect every request incoming at port 80 to that url. -* `machine-state-dir`: Type string. Where to store MachineState files. TODO: Explain in more detail! -* `stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`. -* `short-running-jobs-duration`: Type int. Do not show running jobs shorter than X seconds. Default `300`. -* `jwts`: Type object (required). For JWT Authentication. - - `max-age`: Type string (required). Configure how long a token is valid. As string parsable by time.ParseDuration(). - - `cookieName`: Type string. Cookie that should be checked for a JWT token. - - `vaidateUser`: Type boolean. Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles. - - `trustedIssuer`: Type string. Issuer that should be accepted when validating external JWTs. - - `syncUserOnLogin`: Type boolean. Add non-existent user to DB at login attempt with values provided in JWT. -* `ldap`: Type object. For LDAP Authentication and user synchronisation. Default `nil`. - - `url`: Type string (required). URL of LDAP directory server. - - `user_base`: Type string (required). Base DN of user tree root. - - `search_dn`: Type string (required). DN for authenticating LDAP admin account with general read rights. - - `user_bind`: Type string (required). Expression used to authenticate users via LDAP bind. Must contain `uid={username}`. - - `user_filter`: Type string (required). Filter to extract users for syncing. - - `username_attr`: Type string. Attribute with full user name. Defaults to `gecos` if not provided. - - `sync_interval`: Type string. Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration. - - `sync_del_old_users`: Type boolean. Delete obsolete users in database. - - `syncUserOnLogin`: Type boolean. Add non-existent user to DB at login attempt if user exists in Ldap directory. -* `clusters`: Type array of objects (required) - - `name`: Type string. The name of the cluster. - - `metricDataRepository`: Type object with properties: `kind` (Type string, can be one of `cc-metric-store`, `influxdb` ), `url` (Type string), `token` (Type string) - - `filterRanges` Type object. This option controls the slider ranges for the UI controls of numNodes, duration, and startTime. Example: - ``` - "filterRanges": { - "numNodes": { "from": 1, "to": 64 }, - "duration": { "from": 0, "to": 86400 }, - "startTime": { "from": "2022-01-01T00:00:00Z", "to": null } - } - ``` -* `ui-defaults`: Type object. Default configuration for ui views. If overwritten, all options must be provided! Most options can be overwritten by the user via the web interface. - - `analysis_view_histogramMetrics`: Type string array. Metrics to show as job count histograms in analysis view. Default `["flops_any", "mem_bw", "mem_used"]`. - - `analysis_view_scatterPlotMetrics`: Type array of string array. Initial - scatter plot configuration in analysis view. Default `[["flops_any", "mem_bw"], ["flops_any", "cpu_load"], ["cpu_load", "mem_bw"]]`. - - `job_view_nodestats_selectedMetrics`: Type string array. Initial metrics shown in node statistics table of single job view. Default `["flops_any", "mem_bw", "mem_used"]`. - - `job_view_polarPlotMetrics`: Type string array. Metrics shown in polar plot of single job view. Default `["flops_any", "mem_bw", "mem_used", "net_bw", "file_bw"]`. - - `job_view_selectedMetrics`: Type string array. Default `["flops_any", "mem_bw", "mem_used"]`. - - `plot_general_colorBackground`: Type bool. Color plot background according to job average threshold limits. Default `true`. - - `plot_general_colorscheme`: Type string array. Initial color scheme. Default `"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"`. - - `plot_general_lineWidth`: Type int. Initial linewidth. Default `3`. - - `plot_list_jobsPerPage`: Type int. Jobs shown per page in job lists. Default `50`. - - `plot_list_selectedMetrics`: Type string array. Initial metric plots shown in jobs lists. Default `"cpu_load", "ipc", "mem_used", "flops_any", "mem_bw"`. - - `plot_view_plotsPerRow`: Type int. Number of plots per row in single job view. Default `3`. - - `plot_view_showPolarplot`: Type bool. Option to toggle polar plot in single job view. Default `true`. - - `plot_view_showRoofline`: Type bool. Option to toggle roofline plot in single job view. Default `true`. - - `plot_view_showStatTable`: Type bool. Option to toggle the node statistic table in single job view. Default `true`. - - `system_view_selectedMetric`: Type string. Initial metric shown in system view. Default `cpu_load`. - -Some of the `ui-defaults` values can be appended by `:` in order to have different settings depending on the current cluster. Those are notably `job_view_nodestats_selectedMetrics`, `job_view_polarPlotMetrics`, `job_view_selectedMetrics` and `plot_list_selectedMetrics`. - -## Environment Variables - -An example env file is found in this directory. Copy it to `.env` in the project root and adapt it for your needs. - -* `JWT_PUBLIC_KEY` and `JWT_PRIVATE_KEY`: Base64 encoded Ed25519 keys used for JSON Web Token (JWT) authentication. You can generate your own keypair using `go run ./cmd/gen-keypair/gen-keypair.go`. More information in [README_TOKENS.md](./README_TOKENS.md). -* `SESSION_KEY`: Some random bytes used as secret for cookie-based sessions. -* `LDAP_ADMIN_PASSWORD`: The LDAP admin user password (optional). -* `CROSS_LOGIN_JWT_HS512_KEY`: Used for token based logins via another authentication service. -* `LOGLEVEL`: Can be `err`, `warn`, `info` or `debug` (optional, `warn` by default). Can be used to reduce logging. diff --git a/configs/README_TOKENS.md b/configs/README_TOKENS.md deleted file mode 100644 index be8a912..0000000 --- a/configs/README_TOKENS.md +++ /dev/null @@ -1,51 +0,0 @@ -## Introduction - -ClusterCockpit uses JSON Web Tokens (JWT) for authorization of its APIs. -JSON Web Token (JWT) is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. -This information can be verified and trusted because it is digitally signed. -In ClusterCockpit JWTs are signed using a public/private key pair using ECDSA. -Because tokens are signed using public/private key pairs, the signature also certifies that only the party holding the private key is the one that signed it. -Expiration of the generated tokens as well as the max. length of a browser session can be configured in the `config.json` file described [here](./README.md). - -The [Ed25519](https://ed25519.cr.yp.to/) algorithm for signatures was used because it is compatible with other tools that require authentication, such as NATS.io, and because these elliptic-curve methods provide simillar security with smaller keys compared to something like RSA. They are sligthly more expensive to validate, but that effect is negligible. - -## JWT Payload - -You may view the payload of a JWT token at [https://jwt.io/#debugger-io](https://jwt.io/#debugger-io). -Currently ClusterCockpit sets the following claims: -* `iat`: Issued at claim. The “iat” claim is used to identify the the time at which the JWT was issued. This claim can be used to determine the age of the JWT. -* `sub`: Subject claim. Identifies the subject of the JWT, in our case this is the username. -* `roles`: An array of strings specifying the roles set for the subject. -* `exp`: Expiration date of the token (only if explicitly configured) - -It is important to know that JWTs are not encrypted, only signed. This means that outsiders cannot create new JWTs or modify existing ones, but they are able to read out the username. - -## Workflow - -1. Create a new ECDSA Public/private keypair: -``` -$ go build ./cmd/gen-keypair/ -$ ./gen-keypair -``` -2. Add keypair in your `.env` file. A template can be found in `./configs`. - -When a user logs in via the `/login` page using a browser, a session cookie (secured using the random bytes in the `SESSION_KEY` env. variable you shoud change as well) is used for all requests after the successfull login. The JWTs make it easier to use the APIs of ClusterCockpit using scripts or other external programs. The token is specified n the `Authorization` HTTP header using the [Bearer schema](https://datatracker.ietf.org/doc/html/rfc6750) (there is an example below). Tokens can be issued to users from the configuration view in the Web-UI or the command line. In order to use the token for API endpoints such as `/api/jobs/start_job/`, the user that executes it needs to have the `api` role. Regular users can only perform read-only queries and only look at data connected to jobs they started themselves. - -## cc-metric-store - -The [cc-metric-store](https://github.com/ClusterCockpit/cc-metric-store) also uses JWTs for authentication. As it does not issue new tokens, it does not need to kown the private key. The public key of the keypair that is used to generate the JWTs that grant access to the `cc-metric-store` can be specified in its `config.json`. When configuring the `metricDataRepository` object in the `cluster.json` file, you can put a token issued by ClusterCockpit itself. - -## Setup user and JWT token for REST API authorization - -1. Create user: -``` -$ ./cc-backend --add-user :api: --no-server -``` -2. Issue token for user: -``` -$ ./cc-backend --jwt --no-server -``` -3. Use issued token token on client side: -``` -$ curl -X GET "" -H "accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer " -``` diff --git a/docs/ConfigurationManagement.md b/docs/ConfigurationManagement.md deleted file mode 100644 index 4340741..0000000 --- a/docs/ConfigurationManagement.md +++ /dev/null @@ -1,38 +0,0 @@ -# Release versions - -Versions are marked according to [semantic versioning] (https://semver.org). -Each version embeds the following static assets in the binary: -* Web frontend with javascript files and all static assets. -* Golang template files for server-side rendering. -* JSON schema files for validation. -* Database migration files. - -The remaining external assets are: -* The SQL database used. -* The job archive -* The configuration files `config.json` and `.env`. - -The external assets are versioned with integer IDs. -This means that each release binary is bound to specific versions of the SQL -database and the job archive. -The configuration file is checked against the current schema at startup. -The `-migrate-db` command line switch can be used to upgrade the SQL database -to migrate from a previous version to the latest one. -We offer a separate tool `archive-migration` to migrate an existing job archive -archive from the previous to the latest version. - -# Versioning of APIs - -cc-backend provides two API backends: -* A REST API for querying jobs. -* A GraphQL API for data exchange between web frontend and cc-backend. - -The REST API will also be versioned. We still have to decide whether we will also -support older REST API versions by versioning the endpoint URLs. -The GraphQL API is for internal use and will not be versioned. - -# How to build - -In general it is recommended to use the provided release binary. -In case you want to build build `cc-backend` please always use the provided makefile. This will ensure -that the frontend is also built correctly and that the version in the binary is encoded in the binary. diff --git a/docs/Hands-on.md b/docs/Hands-on.md deleted file mode 100644 index 7f9d2f4..0000000 --- a/docs/Hands-on.md +++ /dev/null @@ -1,234 +0,0 @@ -# Hands-on setup ClusterCockpit from scratch (w/o docker) - -## Prerequisites -* perl -* go -* npm -* Optional: curl -* Script migrateTimestamp.pl - -## Documentation -You find READMEs or api docs in -* ./cc-backend/configs -* ./cc-backend/init -* ./cc-backend/api - -## ClusterCockpit configuration files -### cc-backend -* `./.env` Passwords and Tokens set in the environment -* `./config.json` Configuration options for cc-backend - -### cc-metric-store -* `./config.json` Optional to overwrite configuration options - -### cc-metric-collector -Not yet included in the hands-on setup. - -## Setup Components -Start by creating a base folder for all of the following steps. -* `mkdir clustercockpit` -* `cd clustercockpit` - -### Setup cc-backend -* Clone Repository - - `git clone https://github.com/ClusterCockpit/cc-backend.git` - - `cd cc-backend` -* Build - - `make` -* Activate & configure environment for cc-backend - - `cp configs/env-template.txt .env` - - Optional: Have a look via `vim .env` - - Copy the `config.json` file included in this tarball into the root directory of cc-backend: `cp ../../config.json ./` -* Back to toplevel `clustercockpit` - - `cd ..` -* Prepare Datafolder and Database file - - `mkdir var` - - `./cc-backend -migrate-db` - -### Setup cc-metric-store -* Clone Repository - - `git clone https://github.com/ClusterCockpit/cc-metric-store.git` - - `cd cc-metric-store` -* Build Go Executable - - `go get` - - `go build` -* Prepare Datafolders - - `mkdir -p var/checkpoints` - - `mkdir -p var/archive` -* Update Config - - `vim config.json` - - Exchange existing setting in `metrics` with the following: -``` -"clock": { "frequency": 60, "aggregation": null }, -"cpi": { "frequency": 60, "aggregation": null }, -"cpu_load": { "frequency": 60, "aggregation": null }, -"flops_any": { "frequency": 60, "aggregation": null }, -"flops_dp": { "frequency": 60, "aggregation": null }, -"flops_sp": { "frequency": 60, "aggregation": null }, -"ib_bw": { "frequency": 60, "aggregation": null }, -"lustre_bw": { "frequency": 60, "aggregation": null }, -"mem_bw": { "frequency": 60, "aggregation": null }, -"mem_used": { "frequency": 60, "aggregation": null }, -"rapl_power": { "frequency": 60, "aggregation": null } -``` -* Back to toplevel `clustercockpit` - - `cd ..` - -### Setup Demo Data -* `mkdir source-data` -* `cd source-data` -* Download JobArchive-Source: - - `wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/job-archive-dev.tar.xz` - - `tar xJf job-archive-dev.tar.xz` - - `mv ./job-archive ./job-archive-source` - - `rm ./job-archive-dev.tar.xz` -* Download CC-Metric-Store Checkpoints: - - `mkdir -p cc-metric-store-source/checkpoints` - - `cd cc-metric-store-source/checkpoints` - - `wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/cc-metric-store-checkpoints.tar.xz` - - `tar xf cc-metric-store-checkpoints.tar.xz` - - `rm cc-metric-store-checkpoints.tar.xz` -* Back to `source-data` - - `cd ../..` -* Run timestamp migration script. This may take tens of minutes! - - `cp ../migrateTimestamps.pl .` - - `./migrateTimestamps.pl` - - Expected output: -``` -Starting to update start- and stoptimes in job-archive for emmy -Starting to update start- and stoptimes in job-archive for woody -Done for job-archive -Starting to update checkpoint filenames and data starttimes for emmy -Starting to update checkpoint filenames and data starttimes for woody -Done for checkpoints -``` -* Copy `cluster.json` files from source to migrated folders - - `cp source-data/job-archive-source/emmy/cluster.json cc-backend/var/job-archive/emmy/` - - `cp source-data/job-archive-source/woody/cluster.json cc-backend/var/job-archive/woody/` -* Initialize Job-Archive in SQLite3 job.db and add demo user - - `cd cc-backend` - - `./cc-backend -init-db -add-user demo:admin:demo` - - Expected output: -``` -<6>[INFO] new user "demo" created (roles: ["admin"], auth-source: 0) -<6>[INFO] Building job table... -<6>[INFO] A total of 3936 jobs have been registered in 1.791 seconds. -``` -* Back to toplevel `clustercockpit` - - `cd ..` - -### Startup both Apps -* In cc-backend root: `$./cc-backend -server -dev` - - Starts Clustercockpit at `http:localhost:8080` - - Log: `<6>[INFO] HTTP server listening at :8080...` - - Use local internet browser to access interface - - You should see and be able to browse finished Jobs - - Metadata is read from SQLite3 database - - Metricdata is read from job-archive/JSON-Files - - Create User in settings (top-right corner) - - Name `apiuser` - - Username `apiuser` - - Role `API` - - Submit & Refresh Page - - Create JTW for `apiuser` - - In Userlist, press `Gen. JTW` for `apiuser` - - Save JWT for later use -* In cc-metric-store root: `$./cc-metric-store` - - Start the cc-metric-store on `http:localhost:8081`, Log: -``` -2022/07/15 17:17:42 Loading checkpoints newer than 2022-07-13T17:17:42+02:00 -2022/07/15 17:17:45 Checkpoints loaded (5621 files, 319 MB, that took 3.034652s) -2022/07/15 17:17:45 API http endpoint listening on '0.0.0.0:8081' -``` - - Does *not* have a graphical interface - - Otpional: Test function by executing: -``` -$ curl -H "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFZERTQSJ9.eyJ1c2VyIjoiYWRtaW4iLCJyb2xlcyI6WyJST0xFX0FETUlOIiwiUk9MRV9BTkFMWVNUIiwiUk9MRV9VU0VSIl19.d-3_3FZTsadPjDEdsWrrQ7nS0edMAR4zjl-eK7rJU3HziNBfI9PDHDIpJVHTNN5E5SlLGLFXctWyKAkwhXL-Dw" -D - "http://localhost:8081/api/query" -d "{ \"cluster\": \"emmy\", \"from\": $(expr $(date +%s) - 60), \"to\": $(date +%s), \"queries\": [{ - \"metric\": \"flops_any\", - \"host\": \"e1111\" -}] }" - -HTTP/1.1 200 OK -Content-Type: application/json -Date: Fri, 15 Jul 2022 13:57:22 GMT -Content-Length: 119 -{"results":[[JSON-DATA-ARRAY]]} -``` - -### Development API web interfaces -The `-dev` flag enables web interfaces to document and test the apis: -* http://localhost:8080/playground - A GraphQL playground. To use it you must have a authenticated session in the same browser. -* http://localhost:8080/swagger - A Swagger UI. To use it you have to be logged out, so no user session in the same browser. Use the JWT token with role Api generate previously to authenticate via http header. - -### Use cc-backend API to start job -* Enter the URL `http://localhost:8080/swagger/index.html` in your browser. -* Enter your JWT token you generated for the API user by clicking the green Authorize button in the upper right part of the window. -* Click the `/job/start_job` endpoint and click the Try it out button. -* Enter the following json into the request body text area and fill in a recent start timestamp by executing `date +%s`.: -``` -{ - "jobId": 100000, - "arrayJobId": 0, - "user": "ccdemouser", - "subCluster": "main", - "cluster": "emmy", - "startTime": , - "project": "ccdemoproject", - "resources": [ - {"hostname": "e0601"}, - {"hostname": "e0823"}, - {"hostname": "e0337"}, - {"hostname": "e1111"}], - "numNodes": 4, - "numHwthreads": 80, - "walltime": 86400 -} -``` -* The response body should be the database id of the started job, for example: -``` -{ - "id": 3937 -} -``` -* Check in ClusterCockpit - - User `ccdemouser` should appear in Users-Tab with one running job - - It could take up to 5 Minutes until the Job is displayed with some current data (5 Min Short-Job Filter) - - Job then is marked with a green `running` tag - - Metricdata displayed is read from cc-metric-store! - - -### Use cc-backend API to stop job -* Enter the URL `http://localhost:8080/swagger/index.html` in your browser. -* Enter your JWT token you generated for the API user by clicking the green Authorize button in the upper right part of the window. -* Click the `/job/stop_job/{id}` endpoint and click the Try it out button. -* Enter the database id at id that was returned by `start_job` and copy the following into the request body. Replace the timestamp with a recent one: -``` -{ - "cluster": "emmy", - "jobState": "completed", - "stopTime": -} -``` -* On success a json document with the job meta data is returned. - -* Check in ClusterCockpit - - User `ccdemouser` should appear in Users-Tab with one completed job - - Job is no longer marked with a green `running` tag -> Completed! - - Metricdata displayed is now read from job-archive! -* Check in job-archive - - `cd ./cc-backend/var/job-archive/emmy/100/000` - - `cd $STARTTIME` - - Inspect `meta.json` and `data.json` - -## Helper scripts -* In this tarball you can find the perl script `generate_subcluster.pl` that helps to generate the subcluster section for your system. -Usage: -* Log into an exclusive cluster node. -* The LIKWID tools likwid-topology and likwid-bench must be in the PATH! -* `$./generate_subcluster.pl` outputs the subcluster section on `stdout` - -Please be aware that -* You have to enter the name and node list for the subCluster manually. -* GPU detection only works if LIKWID was build with Cuda avalable and you run likwid-topology also with Cuda loaded. -* Do not blindly trust the measured peakflops values. -* Because the script blindly relies on the CSV format output by likwid-topology this is a fragile undertaking! diff --git a/docs/JWT-Handling.md b/docs/JWT-Handling.md deleted file mode 100644 index 8b8d000..0000000 --- a/docs/JWT-Handling.md +++ /dev/null @@ -1,99 +0,0 @@ -## Introduction - -ClusterCockpit uses JSON Web Tokens (JWT) for authorization of its APIs. JSON -Web Token (JWT) is an open standard (RFC 7519) that defines a compact and -self-contained way for securely transmitting information between parties as a -JSON object. This information can be verified and trusted because it is -digitally signed. In ClusterCockpit JWTs are signed using a public/private key -pair using ECDSA. Because tokens are signed using public/private key pairs, the -signature also certifies that only the party holding the private key is the one -that signed it. Token expiration is set to the configuration option MaxAge. - -## JWT Payload - -You may view the payload of a JWT token at [https://jwt.io/#debugger-io](https://jwt.io/#debugger-io). -Currently ClusterCockpit sets the following claims: -* `iat`: Issued at claim. The “iat” claim is used to identify the the time at which the JWT was issued. This claim can be used to determine the age of the JWT. -* `sub`: Subject claim. Identifies the subject of the JWT, in our case this is the username. -* `roles`: An array of strings specifying the roles set for the subject. - -## Workflow - -1. Create a new ECDSA Public/private keypair: -``` -$ go build ./tools/gen-keypair.go -$ ./gen-keypair -``` -2. Add keypair in your `.env` file. A template can be found in `./configs`. - -There are two usage scenarios: -* The APIs are used during a browser session. API accesses are authorized with - the active session. -* The REST API is used outside a browser session, e.g. by scripts. In this case - you have to issue a token manually. This possible from within the - configuration view or on the command line. It is recommended to issue a JWT - token in this case for a special user that only has the `api` role. By using - different users for different purposes a fine grained access control and - access revocation management is possible. - -The token is commonly specified in the Authorization HTTP header using the Bearer schema. - -## Setup user and JWT token for REST API authorization - -1. Create user: -``` -$ ./cc-backend --add-user :api: --no-server -``` -2. Issue token for user: -``` -$ ./cc-backend -jwt -no-server -``` -3. Use issued token token on client side: -``` -$ curl -X GET "" -H "accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer " -``` - -## Accept externally generated JWTs provided via cookie -If there is an external service like an AuthAPI that can generate JWTs and hand -them over to ClusterCockpit via cookies, CC can be configured to accept them: - -1. `.env`: CC needs a public ed25519 key to verify foreign JWT signatures. - Public keys in PEM format can be converted with the instructions in - [/tools/convert-pem-pubkey-for-cc](../tools/convert-pem-pubkey-for-cc/Readme.md) - . - -``` -CROSS_LOGIN_JWT_PUBLIC_KEY="+51iXX8BdLFocrppRxIw52xCOf8xFSH/eNilN5IHVGc=" -``` - -2. `config.json`: Insert a name for the cookie (set by the external service) - containing the JWT so that CC knows where to look at. Define a trusted issuer - (JWT claim 'iss'), otherwise it will be rejected. If you want usernames and - user roles from JWTs ('sub' and 'roles' claim) to be validated against CC's - internal database, you need to enable it here. Unknown users will then be - rejected and roles set via JWT will be ignored. - -```json -"jwts": { - "cookieName": "access_cc", - "forceJWTValidationViaDatabase": true, - "trustedExternalIssuer": "auth.example.com" -} -``` - -3. Make sure your external service includes the same issuer (`iss`) in its JWTs. - Example JWT payload: - -```json -{ - "iat": 1668161471, - "nbf": 1668161471, - "exp": 1668161531, - "sub": "alice", - "roles": [ - "user" - ], - "jti": "a1b2c3d4-1234-5678-abcd-a1b2c3d4e5f6", - "iss": "auth.example.com" -} -``` diff --git a/docs/Job-Archive.md b/docs/Job-Archive.md deleted file mode 100644 index 601f32d..0000000 --- a/docs/Job-Archive.md +++ /dev/null @@ -1,78 +0,0 @@ -The job archive specifies an exchange format for job meta and performance metric -data. It consists of two parts: -* a [SQLite database schema](https://github.com/ClusterCockpit/cc-backend/wiki/Job-Archive#sqlite-database-schema) for job meta data and performance statistics -* a [Json file format](https://github.com/ClusterCockpit/cc-backend/wiki/Job-Archive#json-file-format) together with a [Directory hierarchy specification](https://github.com/ClusterCockpit/cc-backend/wiki/Job-Archive#directory-hierarchy-specification) - -By using an open, portable and simple specification based on files it is -possible to exchange job performance data for research and analysis purposes as -well as use it as a robust way for archiving job performance data to disk. - -# SQLite database schema -## Introduction - -A SQLite 3 database schema is provided to standardize the job meta data -information in a portable way. The schema also includes optional columns for job -performance statistics (called a job performance footprint). The database acts -as a front end to filter and select subsets of job IDs, that are the keys to get -the full job performance data in the job performance tree hierarchy. - -## Database schema - -The schema includes 3 tables: the job table, a tag table and a jobtag table -representing the MANY-TO-MANY relation between jobs and tags. The SQL schema is -specified -[here](https://github.com/ClusterCockpit/cc-specifications/blob/master/schemas/jobs-sqlite.sql). -Explanation of the various columns including the JSON datatypes is documented -[here](https://github.com/ClusterCockpit/cc-specifications/blob/master/datastructures/job-meta.schema.json). - -# Directory hierarchy specification - -## Specification - -To manage the number of directories within a single directory a tree approach is -used splitting the integer job ID. The job id is split in junks of 1000 each. -Usually 2 layers of directories is sufficient but the concept can be used for an -arbitrary number of layers. - -For a 2 layer schema this can be achieved with (code example in Perl): -``` perl -$level1 = $jobID/1000; -$level2 = $jobID%1000; -$dstPath = sprintf("%s/%s/%d/%03d", $trunk, $destdir, $level1, $level2); -``` - -## Example - -For the job ID 1034871 the directory path is `./1034/871/`. - -# Json file format -## Overview - -Every cluster must be configured in a `cluster.json` file. - -The job data consists of two files: -* `meta.json`: Contains job meta information and job statistics. -* `data.json`: Contains complete job data with time series - -The description of the json format specification is available as [[json -schema|https://json-schema.org/]] format file. The latest version of the json -schema is part of the `cc-backend` source tree. For external reference it is -also available in a separate repository. - -## Specification `cluster.json` - -The json schema specification is available -[here](https://github.com/ClusterCockpit/cc-specifications/blob/master/datastructures/cluster.schema.json). - -## Specification `meta.json` - -The json schema specification is available -[here](https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-meta.schema.json). - -## Specification `data.json` - -The json schema specification is available -[here](https://github.com/RRZE-HPC/HPCJobDatabase/blob/master/json-schema/job-data.schema.json). -Metric time series data is stored for a fixed time step. The time step is set -per metric. If no value is available for a metric time series data timestamp -`null` is entered. diff --git a/docs/adm-customization.md b/docs/adm-customization.md deleted file mode 100644 index da2c7eb..0000000 --- a/docs/adm-customization.md +++ /dev/null @@ -1,29 +0,0 @@ -# Overview - -Customizing `cc-backend` means changing the logo, legal texts, and the login -template instead of the placeholders. You can also place a text file in `./var` -to add dynamic status or notification messages to the clusterCockpit homepage. - -# Replace legal texts - -To replace the `imprint.tmpl` and `privacy.tmpl` legal texts, you can place your -version in `./var/`. At startup `cc-backend` will check if `./var/imprint.tmpl` and/or -`./var/privacy.tmpl` exist and use them instead of the built-in placeholders. -You can use the placeholders in `web/templates` as a blueprint. - -# Replace login template -To replace the default login layout and styling, you can place your version in -`./var/`. At startup `cc-backend` will check if `./var/login.tmpl` exist and use -it instead of the built-in placeholder. You can use the default temaplte -`web/templates/login.tmpl` as a blueprint. - -# Replace logo -To change the logo displayed in the navigation bar, you can provide the file -`logo.png` in the folder `./var/img/`. On startup `cc-backend` will check if the -folder exists and use the images provided there instead of the built-in images. -You may also place additional images there you use in a custom login template. - -# Add notification banner on homepage -To add a notification banner you can add a file `notice.txt` to `./var`. As long -as this file is present all text in this file is shown in an info banner on the -homepage. diff --git a/docs/adm-upgrade.md b/docs/adm-upgrade.md deleted file mode 100644 index bfe2933..0000000 --- a/docs/adm-upgrade.md +++ /dev/null @@ -1,78 +0,0 @@ -In general, an upgrade is nothing more than a replacement of the binary file. -All the necessary files, except the database file, the configuration file and -the job archive, are embedded in the binary file. It is recommended to use a -directory where the file names of the binary files are named with a version -indicator. This can be, for example, the date or the Unix epoch time. A symbolic -link points to the version to be used. This makes it easier to switch to earlier -versions. - -The database and the job archive are versioned. Each release binary supports -specific versions of the database and job archive. If a version mismatch is -detected, the application is terminated and migration is required. - -**IMPORTANT NOTE** - -It is recommended to make a backup copy of the database before each update. This -is mandatory in case the database needs to be migrated. In the case of sqlite, -this means to stopping `cc-backend` and copying the sqlite database file -somewhere. - -# Migrating the database - -After you have backed up the database, run the following command to migrate the -database to the latest version: -``` -$ ./cc-backend -migrate-db -``` - -The migration files are embedded in the binary and can also be viewed in the cc -backend [source tree](https://github.com/ClusterCockpit/cc-backend/tree/master/internal/repository/migrations). -There are separate migration files for both supported -database backends. -We use the [migrate library](https://github.com/golang-migrate/migrate). - -If something goes wrong, you can check the status and get the current schema -(here for sqlite): -``` -$ sqlite3 var/job.db -``` -In the sqlite console execute: -``` -.schema -``` -to get the current databse schema. -You can query the current version and whether the migration failed with: -``` -SELECT * FROM schema_migrations; -``` -The first column indicates the current database version and the second column is -a dirty flag indicating whether the migration was successful. - -# Migrating the job archive - -Job archive migration requires a separate tool (`archive-migration`), which is -part of the cc-backend source tree (build with `go build ./tools/archive-migration`) -and is also provided as part of the releases. - -Migration is supported only between two successive releases. The migration tool -migrates the existing job archive to a new job archive. This means that there -must be enough disk space for two complete job archives. If the tool is called -without options: -``` -$ ./archive-migration -``` - -it is assumed that a job archive exists in `./var/job-archive`. The new job -archive is written to `./var/job-archive-new`. Since execution is threaded in case -of a fatal error, it is impossible to determine in which job the error occurred. -In this case, you can run the tool in debug mode (with the `-debug` flag). In -debug mode, threading is disabled and the job ID of each migrated job is output. -Jobs with empty files will be skipped. Between multiple runs of the tools, the -`job-archive-new` directory must be moved or deleted. - -The `cluster.json` files in `job-archive-new` must be checked for errors, especially -whether the aggregation attribute is set correctly for all metrics. - -Migration takes several hours for relatively large job archives (several hundred -GB). A versioned job archive contains a version.txt file in the root directory -of the job archive. This file contains the version as an unsigned integer. diff --git a/docs/dev-authentication.md b/docs/dev-authentication.md deleted file mode 100644 index 4346e97..0000000 --- a/docs/dev-authentication.md +++ /dev/null @@ -1,180 +0,0 @@ -# Overview - -The authentication is implemented in `internal/auth/`. In `auth.go` -an interface is defined that any authentication provider must fulfill. It also -acts as a dispatcher to delegate the calls to the available authentication -providers. - -Two authentication types are available: -* JWT authentication for the REST API that does not create a session cookie -* Session based authentication using a session cookie - -The most important routines in auth are: -* `Login()` Handle POST request to login user and start a new session -* `Auth()` Authenticate user and put User Object in context of the request - -The http router calls auth in the following cases: -* `r.Handle("/login", authentication.Login( ... )).Methods(http.MethodPost)`: - The POST request on the `/login` route will call the Login callback. -* `r.Handle("/jwt-login", authentication.Login( ... ))`: - Any request on the `/jwt-login` route will call the Login callback. Intended - for use for the JWT token based authenticators. -* Any route in the secured subrouter will always call Auth(), on success it will - call the next handler in the chain, on failure it will render the login - template. -``` -secured.Use(func(next http.Handler) http.Handler { - return authentication.Auth( - // On success; - next, - - // On failure: - func(rw http.ResponseWriter, r *http.Request, err error) { - // Render login form - }) -}) -``` - -A JWT token can be used to initiate an authenticated user -session. This can either happen by calling the login route with a token -provided in a header or via a special cookie containing the JWT token. -For API routes the access is authenticated on every request using the JWT token -and no session is initiated. - -# Login - -The Login function (located in `auth.go`): -* Extracts the user name and gets the user from the user database table. In case the - user is not found the user object is set to nil. -* Iterates over all authenticators and: - - Calls its `CanLogin` function which checks if the authentication method is - supported for this user. - - Calls its `Login` function to authenticate the user. On success a valid user - object is returned. - - Creates a new session object, stores the user attributes in the session and - saves the session. - - Starts the `onSuccess` http handler - -## Local authenticator - -This authenticator is applied if -``` -return user != nil && user.AuthSource == AuthViaLocalPassword -``` - -Compares the password provided by the login form to the password hash stored in -the user database table: -``` -if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil { - log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username) - return nil, fmt.Errorf("Authentication failed") -} -``` - -## LDAP authenticator - -This authenticator is applied if the user was found in the database and its -AuthSource is LDAP: -``` -if user != nil { - if user.AuthSource == schema.AuthViaLDAP { - return user, true - } -} -``` - -If the option `SyncUserOnLogin` is set it tried to sync the user from the LDAP -directory. In case this succeeds the user is persisted to the database and can -login. - -Gets the LDAP connection and tries a bind with the provided credentials: -``` -if err := l.Bind(userDn, r.FormValue("password")); err != nil { - log.Errorf("AUTH/LDAP > Authentication for user %s failed: %v", user.Username, err) - return nil, fmt.Errorf("Authentication failed") -} -``` - -## JWT Session authenticator - -Login via JWT token will create a session without password. -For login the `X-Auth-Token` header is not supported. This authenticator is -applied if the Authorization header or query parameter login-token is present: -``` - return user, r.Header.Get("Authorization") != "" || - r.URL.Query().Get("login-token") != "" -``` - -The Login function: -* Parses the token and checks if it is expired -* Check if the signing method is EdDSA or HS256 or HS512 -* Check if claims are valid and extracts the claims -* The following claims have to be present: - - `sub`: The subject, in this case this is the username - - `exp`: Expiration in Unix epoch time - - `roles`: String array with roles of user -* In case user does not exist in the database and the option `SyncUserOnLogin` - is set add user to user database table with `AuthViaToken` AuthSource. -* Return valid user object - -## JWT Cookie Session authenticator - -Login via JWT cookie token will create a session without password. -It is first checked if the required configuration options are set: -* `trustedIssuer` -* `CookieName` - -and optionally the environment variable `CROSS_LOGIN_JWT_PUBLIC_KEY` is set. - -This authenticator is applied if the configured cookie is present: -``` - jwtCookie, err := r.Cookie(cookieName) - - if err == nil && jwtCookie.Value != "" { - return true - } -``` - -The Login function: -* Extracts and parses the token -* Checks if signing method is Ed25519/EdDSA -* In case publicKeyCrossLogin is configured: - - Check if `iss` issuer claim matched trusted issuer from configuration - - Return public cross login key - - Otherwise return standard public key -* Check if claims are valid -* Depending on the option `validateUser` the roles are - extracted from JWT token or taken from user object fetched from database -* Ask browser to delete the JWT cookie -* In case user does not exist in the database and the option `SyncUserOnLogin` - is set add user to user database table with `AuthViaToken` AuthSource. -* Return valid user object - -# Auth - -The Auth function (located in `auth.go`): -* Returns a new http handler function that is defined right away -* This handler tries two methods to authenticate a user: - - Via a JWT API token in `AuthViaJWT()` - - Via a valid session in `AuthViaSession()` -* If err is not nil and the user object is valid it puts the user object in the - request context and starts the onSuccess http handler -* Otherwise it calls the onFailure handler - -## AuthViaJWT - -Implemented in JWTAuthenticator: -* Extract token either from header `X-Auth-Token` or `Authorization` with Bearer - prefix -* Parse token and check if it is valid. The Parse routine will also check if the - token is expired. -* If the option `validateUser` is set it will ensure the - user object exists in the database and takes the roles from the database user -* Otherwise the roles are extracted from the roles claim -* Returns a valid user object with AuthType set to AuthToken - -## AuthViaSession - -* Extracts session -* Get values username, projects, and roles from session -* Returns a valid user object with AuthType set to AuthSession diff --git a/docs/dev-frontend.md b/docs/dev-frontend.md deleted file mode 100644 index f1bffd4..0000000 --- a/docs/dev-frontend.md +++ /dev/null @@ -1,33 +0,0 @@ -## Tips for frontend development - -The frontend assets including the Svelte js files are per default embedded in -the bgo binary. To enable a quick turnaround cycle for web development of the -frontend disable embedding of static assets in `config.json`: -``` -"embed-static-files": false, -"static-files": "./web/frontend/public/", - -``` - -Start the node build process (in directory `./web/frontend`) in development mode: -``` -$ npm run dev -``` - -This will start the build process in listen mode. Whenever you change a source -files the depending javascript targets will be automatically rebuild. -In case the javascript files are minified you may need to set the production -flag by hand to false in `./web/frontend/rollup.config.mjs`: -``` -const production = false -``` - -Usually this should work automatically. - -Because the files are still served by ./cc-backend you have to reload the view -explicitly in your browser. - -A common setup is to have three terminals open: -* One running cc-backend (working directory repository root): `./cc-backend -server -dev` -* Another running npm in developer mode (working directory `./web/frontend`): `npm run dev` -* And the last one editing the frontend source files diff --git a/docs/dev-release.md b/docs/dev-release.md deleted file mode 100644 index 30c352f..0000000 --- a/docs/dev-release.md +++ /dev/null @@ -1,13 +0,0 @@ -# Steps to prepare a release - -1. On `hotfix` branch: - * Update ReleaseNotes.md - * Update version in Makefile - * Commit, push, and pull request - * Merge in master - -2. On Linux host: - * Pull master - * Ensure that GitHub Token environment variable `GITHUB_TOKEN` is set - * Create release tag: `git tag v1.1.0 -m release` - * Execute `goreleaser release` diff --git a/docs/dev-testing.md b/docs/dev-testing.md deleted file mode 100644 index 9ca39c3..0000000 --- a/docs/dev-testing.md +++ /dev/null @@ -1,34 +0,0 @@ -## Overview - -We use the standard golang testing environment. - -The following conventions are used: - -* *White box unit tests*: Tests for internal functionality are placed in files -* *Black box unit tests*: Tests for public interfaces are placed in files -with `_test.go` and belong to the package `_test`. -There only exists one package test file per package. -* *Integration tests*: Tests that use multiple componenents are placed in a -package test file. These are named `_test.go` and belong to the -package `_test`. -* *Test assets*: Any required files are placed in a directory `./testdata` -within each package directory. - -## Executing tests - -Visual Studio Code has a very good golang test integration. -For debugging a test this is the recommended solution. - -The Makefile provided by us has a `test` target that executes: -``` -$ go clean -testcache -$ go build ./... -$ go vet ./... -$ go test ./... -``` - -Of course the commands can also be used on the command line. -For details about golang testing refer to the standard documentation: - -* [Testing package](https://pkg.go.dev/testing) -* [go test command](https://pkg.go.dev/cmd/go#hdr-Test_packages) diff --git a/docs/migrateTimestamps.pl b/docs/migrateTimestamps.pl deleted file mode 100755 index cc9e2a2..0000000 --- a/docs/migrateTimestamps.pl +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env perl -use strict; -use warnings; -use utf8; - -use JSON::PP; # from Perl default install -use Time::Local qw( timelocal ); # from Perl default install -use Time::Piece; # from Perl default install - -### JSON -my $json = JSON::PP->new->allow_nonref; - -### TIME AND DATE -# now -my $localtime = localtime; -my $epochtime = $localtime->epoch; -# 5 days ago: Via epoch due to possible reverse month borders -my $epochlessfive = $epochtime - (86400 * 5); -my $locallessfive = localtime($epochlessfive); -# Calc like `date --date 'TZ="Europe/Berlin" 0:00 5 days ago' +%s`) -my ($day, $month, $year) = ($locallessfive->mday, $locallessfive->_mon, $locallessfive->year); -my $checkpointStart = timelocal(0, 0, 0, $day, $month, $year); -# for checkpoints -my $halfday = 43200; - -### JOB-ARCHIVE -my $archiveTarget = './cc-backend/var/job-archive'; -my $archiveSrc = './source-data/job-archive-source'; -my @ArchiveClusters; - -# Gen folder -if ( not -d $archiveTarget ){ - mkdir( $archiveTarget ) or die "Couldn't create $archiveTarget directory, $!"; -} - -# Get clusters by job-archive/$subfolder -opendir my $dh, $archiveSrc or die "can't open directory: $!"; -while ( readdir $dh ) { - chomp; next if $_ eq '.' or $_ eq '..' or $_ eq 'job-archive'; - my $cluster = $_; - push @ArchiveClusters, $cluster; -} - -# start for jobarchive -foreach my $cluster ( @ArchiveClusters ) { - print "Starting to update start- and stoptimes in job-archive for $cluster\n"; - - my $clusterTarget = "$archiveTarget/$cluster"; - - if ( not -d $clusterTarget ){ - mkdir( $clusterTarget ) or die "Couldn't create $clusterTarget directory, $!"; - } - - opendir my $dhLevel1, "$archiveSrc/$cluster" or die "can't open directory: $!"; - while ( readdir $dhLevel1 ) { - chomp; next if $_ eq '.' or $_ eq '..'; - my $level1 = $_; - - if ( -d "$archiveSrc/$cluster/$level1" ) { - opendir my $dhLevel2, "$archiveSrc/$cluster/$level1" or die "can't open directory: $!"; - while ( readdir $dhLevel2 ) { - chomp; next if $_ eq '.' or $_ eq '..'; - my $level2 = $_; - my $jobSource = "$archiveSrc/$cluster/$level1/$level2"; - my $jobOrigin = "$jobSource"; - my $jobTargetL1 = "$clusterTarget/$level1"; - my $jobTargetL2 = "$jobTargetL1/$level2"; - - # check if files are directly accessible (old format) else get subfolders as file and update path - if ( ! -e "$jobSource/meta.json") { - opendir(D, "$jobSource") || die "Can't open directory $jobSource: $!\n"; - my @folders = readdir(D); - closedir(D); - if (!@folders) { - next; - } - - foreach my $folder ( @folders ) { - next if $folder eq '.' or $folder eq '..'; - $jobSource = "$jobSource/".$folder; - } - } - # check if subfolder contains file, else skip - if ( ! -e "$jobSource/meta.json") { - print "$jobSource skipped\n"; - next; - } - - open my $metafh, '<', "$jobSource/meta.json" or die "Can't open file $!"; - my $rawstr = do { local $/; <$metafh> }; - close($metafh); - my $metadata = $json->decode($rawstr); - - # NOTE Start meta.json iteration here - # my $random_number = int(rand(UPPERLIMIT)) + LOWERLIMIT; - # Set new startTime: Between 5 days and 1 day before now - - # Remove id from attributes - $metadata->{startTime} = $epochtime - (int(rand(432000)) + 86400); - $metadata->{stopTime} = $metadata->{startTime} + $metadata->{duration}; - - # Add starttime subfolder to target path - my $jobTargetL3 = "$jobTargetL2/".$metadata->{startTime}; - - if ( not -d $jobTargetL1 ){ - mkdir( $jobTargetL1 ) or die "Couldn't create $jobTargetL1 directory, $!"; - } - - if ( not -d $jobTargetL2 ){ - mkdir( $jobTargetL2 ) or die "Couldn't create $jobTargetL2 directory, $!"; - } - - # target is not directory - if ( not -d $jobTargetL3 ){ - mkdir( $jobTargetL3 ) or die "Couldn't create $jobTargetL3 directory, $!"; - - my $outstr = $json->encode($metadata); - open my $metaout, '>', "$jobTargetL3/meta.json" or die "Can't write to file $!"; - print $metaout $outstr; - close($metaout); - - open my $datafh, '<', "$jobSource/data.json" or die "Can't open file $!"; - my $datastr = do { local $/; <$datafh> }; - close($datafh); - - open my $dataout, '>', "$jobTargetL3/data.json" or die "Can't write to file $!"; - print $dataout $datastr; - close($dataout); - } - } - } - } -} -print "Done for job-archive\n"; -sleep(1); -exit; - -## CHECKPOINTS -my $checkpTarget = './cc-metric-store/var/checkpoints'; -my $checkpSource = './source-data/cc-metric-store-source/checkpoints'; -my @CheckpClusters; - -# Gen folder -if ( not -d $checkpTarget ){ - mkdir( $checkpTarget ) or die "Couldn't create $checkpTarget directory, $!"; -} - -# Get clusters by cc-metric-store/$subfolder -opendir my $dhc, $checkpSource or die "can't open directory: $!"; -while ( readdir $dhc ) { - chomp; next if $_ eq '.' or $_ eq '..' or $_ eq 'job-archive'; - my $cluster = $_; - push @CheckpClusters, $cluster; -} -closedir($dhc); - -# start for checkpoints -foreach my $cluster ( @CheckpClusters ) { - print "Starting to update checkpoint filenames and data starttimes for $cluster\n"; - - my $clusterTarget = "$checkpTarget/$cluster"; - - if ( not -d $clusterTarget ){ - mkdir( $clusterTarget ) or die "Couldn't create $clusterTarget directory, $!"; - } - - opendir my $dhLevel1, "$checkpSource/$cluster" or die "can't open directory: $!"; - while ( readdir $dhLevel1 ) { - chomp; next if $_ eq '.' or $_ eq '..'; - # Nodename as level1-folder - my $level1 = $_; - - if ( -d "$checkpSource/$cluster/$level1" ) { - - my $nodeSource = "$checkpSource/$cluster/$level1/"; - my $nodeOrigin = "$nodeSource"; - my $nodeTarget = "$clusterTarget/$level1"; - my @files; - - if ( -e "$nodeSource/1609459200.json") { # 1609459200 == First Checkpoint time in latest dump - opendir(D, "$nodeSource") || die "Can't open directory $nodeSource: $!\n"; - while ( readdir D ) { - chomp; next if $_ eq '.' or $_ eq '..'; - my $nodeFile = $_; - push @files, $nodeFile; - } - closedir(D); - my $length = @files; - if (!@files || $length != 14) { # needs 14 files == 7 days worth of data - next; - } - } else { - next; - } - - # sort for integer timestamp-filename-part (moduleless): Guarantees start with index == 0 == 1609459200.json - my @sortedFiles = sort { ($a =~ /^([0-9]{10}).json$/)[0] <=> ($b =~ /^([0-9]{10}).json$/)[0] } @files; - - if ( not -d $nodeTarget ){ - mkdir( $nodeTarget ) or die "Couldn't create $nodeTarget directory, $!"; - - while (my ($index, $file) = each(@sortedFiles)) { - open my $checkfh, '<', "$nodeSource/$file" or die "Can't open file $!"; - my $rawstr = do { local $/; <$checkfh> }; - close($checkfh); - my $checkpdata = $json->decode($rawstr); - - my $newTimestamp = $checkpointStart + ($index * $halfday); - # Get Diff from old Timestamp - my $timeDiff = $newTimestamp - $checkpdata->{from}; - # Set new timestamp - $checkpdata->{from} = $newTimestamp; - - foreach my $metric (keys %{$checkpdata->{metrics}}) { - $checkpdata->{metrics}->{$metric}->{start} += $timeDiff; - } - - my $outstr = $json->encode($checkpdata); - - open my $checkout, '>', "$nodeTarget/$newTimestamp.json" or die "Can't write to file $!"; - print $checkout $outstr; - close($checkout); - } - } - } - } - closedir($dhLevel1); -} -print "Done for checkpoints\n"; diff --git a/docs/searchbar.md b/docs/searchbar.md deleted file mode 100644 index 0ecef53..0000000 --- a/docs/searchbar.md +++ /dev/null @@ -1,36 +0,0 @@ -# Docs for ClusterCockpit Searchbar - -## Usage - -* Searchtags are implemented as `type:` search-string - * Types `jobId, jobName, projectId, username, name, arrayJobId` for roles `admin` and `support` - * `jobName` is jobName as persisted in `job.meta_data` table-column - * `username` is actual account identifier as persisted in `job.user` table-column - * `name` is account owners name as persisted in `user.name` table-column - * Types `jobId, jobName, projectId, arrayJobId` for role `user` - * Examples: - * `jobName:myJob12` - * `jobId:123456` - * `username:abcd100` - * `name:Paul` -* If no searchTag used: Best guess search with the following hierarchy - * `jobId -> username -> name -> projectId -> jobName` -* Destinations: - * JobId: Job-Table (Allows multiple identical matches, e.g. JobIds from different clusters) - * JobName: Job-Table (Allows multiple identical matches, e.g. JobNames from different clusters) - * ProjectId: Job-Table - * Username: Users-Table - * **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table. Also, a `Last 30 Days` is active by default and might filter out expected users. - * Name: Users-Table - * **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table. Also, a `Last 30 Days` is active by default and might filter out expected users. - * ArrayJobId: Job-Table (Lists all Jobs of Queried ArrayJobId) - * Best guess search always redirects to Job-Table or `/monitoring/user/$USER` (first username match) - * Unprocessable queries will display messages detailing the cause (Info, Warning, Error) -* Spaces trimmed (both for searchTag and queryString) - * ` job12` == `job12` - * `projectID : abcd ` == `projectId:abcd` -* `jobName`- and `name-`queries work with a part of the target-string - * `jobName:myjob` for jobName "myjob_cluster1" - * `name:Paul` for name "Paul Atreides" - -* JobName GQL Query is resolved as matching the query as a part of the whole metaData-JSON in the SQL DB. diff --git a/go.mod b/go.mod index b0ce960..fddcfbc 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,14 @@ module github.com/ClusterCockpit/cc-backend go 1.18 require ( - github.com/99designs/gqlgen v0.17.36 + github.com/99designs/gqlgen v0.17.45 github.com/ClusterCockpit/cc-units v0.4.0 github.com/Masterminds/squirrel v1.5.3 + github.com/coreos/go-oidc/v3 v3.9.0 github.com/go-co-op/gocron v1.25.0 github.com/go-ldap/ldap/v3 v3.4.4 github.com/go-sql-driver/mysql v1.7.0 - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/google/gops v0.3.27 github.com/gorilla/handlers v1.5.1 @@ -23,10 +24,11 @@ require ( github.com/qustavo/sqlhooks/v2 v2.1.0 github.com/santhosh-tekuri/jsonschema/v5 v5.2.0 github.com/swaggo/http-swagger v1.3.3 - github.com/swaggo/swag v1.16.1 - github.com/vektah/gqlparser/v2 v2.5.8 - golang.org/x/crypto v0.12.0 + github.com/swaggo/swag v1.16.3 + github.com/vektah/gqlparser/v2 v2.5.11 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea + golang.org/x/oauth2 v0.13.0 ) require ( @@ -36,22 +38,23 @@ require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/containerd v1.6.18 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/containerd/containerd v1.6.26 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/deepmap/oapi-codegen v1.12.4 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect @@ -64,25 +67,26 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sosodev/duration v1.2.0 // indirect github.com/swaggo/files v1.0.0 // indirect - github.com/urfave/cli/v2 v2.25.7 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + github.com/urfave/cli/v2 v2.27.1 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/atomic v1.10.0 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.12.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.19.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 379bda7..94d59c8 100644 --- a/go.sum +++ b/go.sum @@ -50,8 +50,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/99designs/gqlgen v0.17.36 h1:u/o/rv2SZ9s5280dyUOOrkpIIkr/7kITMXYD3rkJ9go= -github.com/99designs/gqlgen v0.17.36/go.mod h1:6RdyY8puhCoWAQVr2qzF2OMVfudQzc8ACxzpzluoQm4= +github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik= +github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -108,11 +108,13 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -130,7 +132,7 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= @@ -273,8 +275,8 @@ github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTV github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= -github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= +github.com/containerd/containerd v1.6.26 h1:VVfrE6ZpyisvB1fzoY8Vkiq4sy+i5oF4uk7zu03RaHs= +github.com/containerd/containerd v1.6.26/go.mod h1:I4TRdsdoo5MlKob5khDJS2EPT1l1oMNaE2MBm6FrwxM= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -303,6 +305,7 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= @@ -340,6 +343,8 @@ github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= +github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -353,8 +358,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -452,6 +457,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -473,28 +480,24 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -548,8 +551,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -587,8 +590,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -610,6 +614,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -642,8 +647,9 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -700,8 +706,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE= -github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -969,8 +975,8 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1064,7 +1070,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -1084,7 +1090,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1097,14 +1102,16 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us= +github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1130,8 +1137,6 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1139,18 +1144,15 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4= github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= -github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg= -github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto= +github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= +github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1165,10 +1167,10 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/vektah/gqlparser/v2 v2.5.8 h1:pm6WOnGdzFOCfcQo9L3+xzW51mKrlwTEg4Wr7AH1JW4= -github.com/vektah/gqlparser/v2 v2.5.8/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= +github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1188,8 +1190,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1284,8 +1286,9 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1336,8 +1339,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1407,8 +1411,10 @@ golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1427,8 +1433,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1442,7 +1448,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1566,14 +1573,20 @@ golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1583,9 +1596,12 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1674,8 +1690,9 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1728,8 +1745,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1801,7 +1819,8 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1835,7 +1854,7 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1850,8 +1869,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1986,5 +2005,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/api/api_test.go b/internal/api/api_test.go index ecffc82..871afc9 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/api/docs.go b/internal/api/docs.go index bf70cdb..e5ec50b 100644 --- a/internal/api/docs.go +++ b/internal/api/docs.go @@ -1,5 +1,4 @@ -// Code generated by swaggo/swag. DO NOT EDIT. - +// Package api Code generated by swaggo/swag. DO NOT EDIT package api import "github.com/swaggo/swag" @@ -24,6 +23,63 @@ const docTemplate = `{ "host": "{{.Host}}", "basePath": "{{.BasePath}}", "paths": { + "/clusters/": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Get a list of all cluster configs. Specific cluster can be requested using query parameter.", + "produces": [ + "application/json" + ], + "tags": [ + "Cluster query" + ], + "summary": "Lists all cluster configs", + "parameters": [ + { + "type": "string", + "description": "Job Cluster", + "name": "cluster", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Array of clusters", + "schema": { + "$ref": "#/definitions/api.GetClustersApiResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, "/jobs/": { "get": { "security": [ @@ -334,6 +390,76 @@ const docTemplate = `{ } } }, + "/jobs/edit_meta/{id}": { + "post": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Edit key value pairs in job metadata json\nIf a key already exists its content will be overwritten", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Job add and modify" + ], + "summary": "Edit meta-data json", + "parameters": [ + { + "type": "integer", + "description": "Job Database ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Kay value pair to add", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.EditMetaRequest" + } + } + ], + "responses": { + "200": { + "description": "Updated job resource", + "schema": { + "$ref": "#/definitions/schema.Job" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Job does not exist", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + } + }, "/jobs/start_job/": { "post": { "security": [ @@ -631,6 +757,80 @@ const docTemplate = `{ } }, "/jobs/{id}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Job to get is specified by database ID\nReturns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.", + "produces": [ + "application/json" + ], + "tags": [ + "Job query" + ], + "summary": "Get job meta and optional all metric data", + "parameters": [ + { + "type": "integer", + "description": "Database ID of Job", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Include all available metrics", + "name": "all-metrics", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Job resource", + "schema": { + "$ref": "#/definitions/api.GetJobApiResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "401": { + "description": "Unauthorized", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "404": { + "description": "Resource not found", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "422": { + "description": "Unprocessable Entity: finding job failed: sql: no rows in result set", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/api.ErrorResponse" + } + } + } + }, "post": { "security": [ { @@ -647,7 +847,7 @@ const docTemplate = `{ "tags": [ "Job query" ], - "summary": "Get complete job meta and metric data", + "summary": "Get job meta and configurable metric data", "parameters": [ { "type": "integer", @@ -1121,6 +1321,19 @@ const docTemplate = `{ } } }, + "api.EditMetaRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "example": "jobScript" + }, + "value": { + "type": "string", + "example": "bash script" + } + } + }, "api.ErrorResponse": { "type": "object", "properties": { @@ -1134,6 +1347,18 @@ const docTemplate = `{ } } }, + "api.GetClustersApiResponse": { + "type": "object", + "properties": { + "clusters": { + "description": "Array of clusters", + "type": "array", + "items": { + "$ref": "#/definitions/schema.Cluster" + } + } + } + }, "api.GetJobApiResponse": { "type": "object", "properties": { @@ -1229,6 +1454,40 @@ const docTemplate = `{ } } }, + "schema.Accelerator": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "schema.Cluster": { + "type": "object", + "properties": { + "metricConfig": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.MetricConfig" + } + }, + "name": { + "type": "string" + }, + "subClusters": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.SubCluster" + } + } + } + }, "schema.Job": { "description": "Information of a HPC job.", "type": "object", @@ -1259,6 +1518,10 @@ const docTemplate = `{ "minimum": 0, "example": 1 }, + "flopsAnyAvg": { + "description": "FlopsAnyAvg as Float64", + "type": "number" + }, "id": { "description": "The unique identifier of a job in the database", "type": "integer" @@ -1285,6 +1548,18 @@ const docTemplate = `{ ], "example": "completed" }, + "loadAvg": { + "description": "LoadAvg as Float64", + "type": "number" + }, + "memBwAvg": { + "description": "MemBwAvg as Float64", + "type": "number" + }, + "memUsedMax": { + "description": "MemUsedMax as Float64", + "type": "number" + }, "metaData": { "description": "Additional information about the job", "type": "object", @@ -1611,6 +1886,44 @@ const docTemplate = `{ } } }, + "schema.MetricConfig": { + "type": "object", + "properties": { + "aggregation": { + "type": "string" + }, + "alert": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "name": { + "type": "string" + }, + "normal": { + "type": "number" + }, + "peak": { + "type": "number" + }, + "scope": { + "$ref": "#/definitions/schema.MetricScope" + }, + "subClusters": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.SubClusterConfig" + } + }, + "timestep": { + "type": "integer" + }, + "unit": { + "$ref": "#/definitions/schema.Unit" + } + } + }, "schema.MetricScope": { "type": "string", "enum": [ @@ -1646,6 +1959,17 @@ const docTemplate = `{ } } }, + "schema.MetricValue": { + "type": "object", + "properties": { + "unit": { + "$ref": "#/definitions/schema.Unit" + }, + "value": { + "type": "number" + } + } + }, "schema.Resource": { "description": "A resource used by a job", "type": "object", @@ -1726,6 +2050,64 @@ const docTemplate = `{ } } }, + "schema.SubCluster": { + "type": "object", + "properties": { + "coresPerSocket": { + "type": "integer" + }, + "flopRateScalar": { + "$ref": "#/definitions/schema.MetricValue" + }, + "flopRateSimd": { + "$ref": "#/definitions/schema.MetricValue" + }, + "memoryBandwidth": { + "$ref": "#/definitions/schema.MetricValue" + }, + "name": { + "type": "string" + }, + "nodes": { + "type": "string" + }, + "processorType": { + "type": "string" + }, + "socketsPerNode": { + "type": "integer" + }, + "threadsPerCore": { + "type": "integer" + }, + "topology": { + "$ref": "#/definitions/schema.Topology" + } + } + }, + "schema.SubClusterConfig": { + "type": "object", + "properties": { + "alert": { + "type": "number" + }, + "caution": { + "type": "number" + }, + "name": { + "type": "string" + }, + "normal": { + "type": "number" + }, + "peak": { + "type": "number" + }, + "remove": { + "type": "boolean" + } + } + }, "schema.Tag": { "description": "Defines a tag using name and type.", "type": "object", @@ -1746,6 +2128,59 @@ const docTemplate = `{ } } }, + "schema.Topology": { + "type": "object", + "properties": { + "accelerators": { + "type": "array", + "items": { + "$ref": "#/definitions/schema.Accelerator" + } + }, + "core": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "die": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "memoryDomain": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + }, + "node": { + "type": "array", + "items": { + "type": "integer" + } + }, + "socket": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + } + } + }, "schema.Unit": { "type": "object", "properties": { diff --git a/internal/api/rest.go b/internal/api/rest.go index 11682ee..e43cf51 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -70,12 +70,16 @@ func (api *RestApi) MountRoutes(r *mux.Router) { r.HandleFunc("/jobs/", api.getJobs).Methods(http.MethodGet) r.HandleFunc("/jobs/{id}", api.getJobById).Methods(http.MethodPost) + r.HandleFunc("/jobs/{id}", api.getCompleteJobById).Methods(http.MethodGet) r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch) + r.HandleFunc("/jobs/edit_meta/{id}", api.editMeta).Methods(http.MethodPost, http.MethodPatch) r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet) r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete) r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete) + r.HandleFunc("/clusters/", api.getClusters).Methods(http.MethodGet) + if api.MachineStateDir != "" { r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet) r.HandleFunc("/machine_state/{cluster}/{host}", api.putMachineState).Methods(http.MethodPut, http.MethodPost) @@ -110,12 +114,11 @@ type UpdateUserApiResponse struct { // StopJobApiRequest model type StopJobApiRequest struct { - // Stop Time of job as epoch + JobId *int64 `json:"jobId" example:"123000"` + Cluster *string `json:"cluster" example:"fritz"` + StartTime *int64 `json:"startTime" example:"1649723812"` + State schema.JobState `json:"jobState" validate:"required" example:"completed"` StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"` - State schema.JobState `json:"jobState" validate:"required" example:"completed"` // Final job state - JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job - Cluster *string `json:"cluster" example:"fritz"` // Cluster of job - StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch } // DeleteJobApiRequest model @@ -132,6 +135,11 @@ type GetJobsApiResponse struct { Page int `json:"page"` // Page id returned } +// GetClustersApiResponse model +type GetClustersApiResponse struct { + Clusters []*schema.Cluster `json:"clusters"` // Array of clusters +} + // ErrorResponse model type ErrorResponse struct { // Statustext of Errorcode @@ -146,6 +154,12 @@ type ApiTag struct { Name string `json:"name" example:"Testjob"` // Tag Name } +// ApiMeta model +type EditMetaRequest struct { + Key string `json:"key" example:"jobScript"` + Value string `json:"value" example:"bash script"` +} + type TagJobApiRequest []*ApiTag type GetJobApiRequest []string @@ -155,10 +169,15 @@ type GetJobApiResponse struct { Data []*JobMetricWithName } +type GetCompleteJobApiResponse struct { + Meta *schema.Job + Data schema.JobData +} + type JobMetricWithName struct { + Metric *schema.JobMetric `json:"metric"` Name string `json:"name"` Scope schema.MetricScope `json:"scope"` - Metric *schema.JobMetric `json:"metric"` } type ApiReturnedUser struct { @@ -223,6 +242,55 @@ func securedCheck(r *http.Request) error { return nil } +// getClusters godoc +// @summary Lists all cluster configs +// @tags Cluster query +// @description Get a list of all cluster configs. Specific cluster can be requested using query parameter. +// @produce json +// @param cluster query string false "Job Cluster" +// @success 200 {object} api.GetClustersApiResponse "Array of clusters" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 403 {object} api.ErrorResponse "Forbidden" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /clusters/ [get] +func (api *RestApi) getClusters(rw http.ResponseWriter, r *http.Request) { + if user := repository.GetUserFromContext(r.Context()); user != nil && + !user.HasRole(schema.RoleApi) { + + handleError(fmt.Errorf("missing role: %v", schema.GetRoleString(schema.RoleApi)), http.StatusForbidden, rw) + return + } + + rw.Header().Add("Content-Type", "application/json") + bw := bufio.NewWriter(rw) + defer bw.Flush() + + var clusters []*schema.Cluster + + if r.URL.Query().Has("cluster") { + name := r.URL.Query().Get("cluster") + cluster := archive.GetCluster(name) + if cluster == nil { + handleError(fmt.Errorf("unknown cluster: %s", name), http.StatusBadRequest, rw) + return + } + clusters = append(clusters, cluster) + } else { + clusters = archive.Clusters + } + + payload := GetClustersApiResponse{ + Clusters: clusters, + } + + if err := json.NewEncoder(bw).Encode(payload); err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + } +} + // getJobs godoc // @summary Lists all jobs // @tags Job query @@ -243,7 +311,6 @@ func securedCheck(r *http.Request) error { // @security ApiKeyAuth // @router /jobs/ [get] func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { - if user := repository.GetUserFromContext(r.Context()); user != nil && !user.HasRole(schema.RoleApi) { @@ -342,10 +409,8 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { if res.MonitoringStatus == schema.MonitoringStatusArchivingSuccessful { res.Statistics, err = archive.GetStatistics(job) if err != nil { - if err != nil { - handleError(err, http.StatusInternalServerError, rw) - return - } + handleError(err, http.StatusInternalServerError, rw) + return } } @@ -370,14 +435,107 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { } // getJobById godoc -// @summary Get complete job meta and metric data +// @summary Get job meta and optional all metric data +// @tags Job query +// @description Job to get is specified by database ID +// @description Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'. +// @produce json +// @param id path int true "Database ID of Job" +// @param all-metrics query bool false "Include all available metrics" +// @success 200 {object} api.GetJobApiResponse "Job resource" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 403 {object} api.ErrorResponse "Forbidden" +// @failure 404 {object} api.ErrorResponse "Resource not found" +// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /jobs/{id} [get] +func (api *RestApi) getCompleteJobById(rw http.ResponseWriter, r *http.Request) { + if user := repository.GetUserFromContext(r.Context()); user != nil && + !user.HasRole(schema.RoleApi) { + + handleError(fmt.Errorf("missing role: %v", + schema.GetRoleString(schema.RoleApi)), http.StatusForbidden, rw) + return + } + + // Fetch job from db + id, ok := mux.Vars(r)["id"] + var job *schema.Job + var err error + if ok { + id, e := strconv.ParseInt(id, 10, 64) + if e != nil { + handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) + return + } + + job, err = api.JobRepository.FindById(id) + } else { + handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) + return + } + if err != nil { + handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + return + } + + job.Tags, err = api.JobRepository.GetTags(&job.ID) + if err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + + } + if _, err = api.JobRepository.FetchMetadata(job); err != nil { + + handleError(err, http.StatusInternalServerError, rw) + return + } + + var scopes []schema.MetricScope + + if job.NumNodes == 1 { + scopes = []schema.MetricScope{"core"} + } else { + scopes = []schema.MetricScope{"node"} + } + + var data schema.JobData + + if r.URL.Query().Get("all-metrics") == "true" { + data, err = metricdata.LoadData(job, nil, scopes, r.Context()) + if err != nil { + log.Warn("Error while loading job data") + return + } + } + + log.Debugf("/api/job/%s: get job %d", id, job.JobID) + rw.Header().Add("Content-Type", "application/json") + bw := bufio.NewWriter(rw) + defer bw.Flush() + + payload := GetCompleteJobApiResponse{ + Meta: job, + Data: data, + } + + if err := json.NewEncoder(bw).Encode(payload); err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + } +} + +// getJobById godoc +// @summary Get job meta and configurable metric data // @tags Job query // @description Job to get is specified by database ID // @description Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'. // @accept json // @produce json -// @param id path int true "Database ID of Job" -// @param request body api.GetJobApiRequest true "Array of metric names" +// @param id path int true "Database ID of Job" +// @param request body api.GetJobApiRequest true "Array of metric names" // @success 200 {object} api.GetJobApiResponse "Job resource" // @failure 400 {object} api.ErrorResponse "Bad Request" // @failure 401 {object} api.ErrorResponse "Unauthorized" @@ -417,6 +575,18 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { return } + job.Tags, err = api.JobRepository.GetTags(&job.ID) + if err != nil { + handleError(err, http.StatusInternalServerError, rw) + return + + } + if _, err = api.JobRepository.FetchMetadata(job); err != nil { + + handleError(err, http.StatusInternalServerError, rw) + return + } + var metrics GetJobApiRequest if err = decode(r.Body, &metrics); err != nil { http.Error(rw, err.Error(), http.StatusBadRequest) @@ -464,6 +634,57 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) { } } +// editMeta godoc +// @summary Edit meta-data json +// @tags Job add and modify +// @description Edit key value pairs in job metadata json +// @description If a key already exists its content will be overwritten +// @accept json +// @produce json +// @param id path int true "Job Database ID" +// @param request body api.EditMetaRequest true "Kay value pair to add" +// @success 200 {object} schema.Job "Updated job resource" +// @failure 400 {object} api.ErrorResponse "Bad Request" +// @failure 401 {object} api.ErrorResponse "Unauthorized" +// @failure 404 {object} api.ErrorResponse "Job does not exist" +// @failure 500 {object} api.ErrorResponse "Internal Server Error" +// @security ApiKeyAuth +// @router /jobs/edit_meta/{id} [post] +func (api *RestApi) editMeta(rw http.ResponseWriter, r *http.Request) { + if user := repository.GetUserFromContext(r.Context()); user != nil && + !user.HasRole(schema.RoleApi) { + handleError(fmt.Errorf("missing role: %v", schema.GetRoleString(schema.RoleApi)), http.StatusForbidden, rw) + return + } + + iid, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + job, err := api.JobRepository.FindById(iid) + if err != nil { + http.Error(rw, err.Error(), http.StatusNotFound) + return + } + + var req EditMetaRequest + if err := decode(r.Body, &req); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + if err := api.JobRepository.UpdateMetadata(job, req.Key, req.Value); err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + rw.Header().Add("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + json.NewEncoder(rw).Encode(job) +} + // tagJob godoc // @summary Adds one or more tags to a job // @tags Job add and modify @@ -873,7 +1094,6 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { } func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) { - // Sanity checks if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) @@ -1015,12 +1235,13 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { Password: password, Email: email, Projects: []string{project}, - Roles: []string{role}}); err != nil { + Roles: []string{role}, + }); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } - rw.Write([]byte(fmt.Sprintf("User %v successfully created!\n", username))) + fmt.Fprintf(rw, "User %v successfully created!\n", username) } // deleteUser godoc diff --git a/internal/auth/auth.go b/internal/auth/auth.go index e8f0db4..bedd9c7 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -27,18 +27,18 @@ type Authenticator interface { } type Authentication struct { - sessionStore *sessions.CookieStore - SessionMaxAge time.Duration - - authenticators []Authenticator + sessionStore *sessions.CookieStore LdapAuth *LdapAuthenticator JwtAuth *JWTAuthenticator LocalAuth *LocalAuthenticator + authenticators []Authenticator + SessionMaxAge time.Duration } func (auth *Authentication) AuthViaSession( rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { + r *http.Request, +) (*schema.User, error) { session, err := auth.sessionStore.Get(r, "session") if err != nil { log.Error("Error while getting session store") @@ -129,10 +129,46 @@ func Init() (*Authentication, error) { return auth, nil } +func persistUser(user *schema.User) { + r := repository.GetUserRepository() + _, err := r.GetUser(user.Username) + + if err != nil && err != sql.ErrNoRows { + log.Errorf("Error while loading user '%s': %v", user.Username, err) + } else if err == sql.ErrNoRows { + if err := r.AddUser(user); err != nil { + log.Errorf("Error while adding user '%s' to DB: %v", user.Username, err) + } + } +} + +func (auth *Authentication) SaveSession(rw http.ResponseWriter, r *http.Request, user *schema.User) error { + session, err := auth.sessionStore.New(r, "session") + if err != nil { + log.Errorf("session creation failed: %s", err.Error()) + http.Error(rw, err.Error(), http.StatusInternalServerError) + return err + } + + if auth.SessionMaxAge != 0 { + session.Options.MaxAge = int(auth.SessionMaxAge.Seconds()) + } + session.Values["username"] = user.Username + session.Values["projects"] = user.Projects + session.Values["roles"] = user.Roles + if err := auth.sessionStore.Save(r, rw, session); err != nil { + log.Warnf("session save failed: %s", err.Error()) + http.Error(rw, err.Error(), http.StatusInternalServerError) + return err + } + + return nil +} + func (auth *Authentication) Login( onsuccess http.Handler, - onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error)) http.Handler { - + onfailure func(rw http.ResponseWriter, r *http.Request, loginErr error), +) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { username := r.FormValue("username") var dbUser *schema.User @@ -161,22 +197,7 @@ func (auth *Authentication) Login( return } - session, err := auth.sessionStore.New(r, "session") - if err != nil { - log.Errorf("session creation failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - if auth.SessionMaxAge != 0 { - session.Options.MaxAge = int(auth.SessionMaxAge.Seconds()) - } - session.Values["username"] = user.Username - session.Values["projects"] = user.Projects - session.Values["roles"] = user.Roles - if err := auth.sessionStore.Save(r, rw, session); err != nil { - log.Warnf("session save failed: %s", err.Error()) - http.Error(rw, err.Error(), http.StatusInternalServerError) + if err := auth.SaveSession(rw, r, user); err != nil { return } @@ -193,10 +214,9 @@ func (auth *Authentication) Login( func (auth *Authentication) Auth( onsuccess http.Handler, - onfailure func(rw http.ResponseWriter, r *http.Request, authErr error)) http.Handler { - + onfailure func(rw http.ResponseWriter, r *http.Request, authErr error), +) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - user, err := auth.JwtAuth.AuthViaJWT(rw, r) if err != nil { log.Infof("authentication failed: %s", err.Error()) diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go index 83bfee3..7bac278 100644 --- a/internal/auth/jwt.go +++ b/internal/auth/jwt.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTAuthenticator struct { @@ -49,8 +49,8 @@ func (ja *JWTAuthenticator) Init() error { func (ja *JWTAuthenticator) AuthViaJWT( rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { rawtoken := r.Header.Get("X-Auth-Token") if rawtoken == "" { rawtoken = r.Header.Get("Authorization") @@ -73,9 +73,9 @@ func (ja *JWTAuthenticator) AuthViaJWT( log.Warn("Error while parsing JWT token") return nil, err } - if err := token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } // Token is valid, extract payload @@ -88,7 +88,6 @@ func (ja *JWTAuthenticator) AuthViaJWT( if config.Keys.JwtConfig.ValidateUser { ur := repository.GetUserRepository() user, err := ur.GetUser(sub) - // Deny any logins for unknown usernames if err != nil { log.Warn("Could not find user from JWT in internal database.") @@ -117,7 +116,6 @@ func (ja *JWTAuthenticator) AuthViaJWT( // Generate a new JWT that can be used for authentication func (ja *JWTAuthenticator) ProvideJWT(user *schema.User) (string, error) { - if ja.privateKey == nil { return "", errors.New("environment variable 'JWT_PRIVATE_KEY' not set") } diff --git a/internal/auth/jwtCookieSession.go b/internal/auth/jwtCookieSession.go index 3748836..926f7ba 100644 --- a/internal/auth/jwtCookieSession.go +++ b/internal/auth/jwtCookieSession.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTCookieSessionAuthenticator struct { @@ -90,8 +90,8 @@ func (ja *JWTCookieSessionAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { jc := config.Keys.JwtConfig cookieName := "" if jc.CookieName != "" { @@ -113,8 +113,8 @@ func (ja *JWTCookieSessionAuthenticator) CanLogin( func (ja *JWTCookieSessionAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { jc := config.Keys.JwtConfig jwtCookie, err := r.Cookie(jc.CookieName) var rawtoken string @@ -144,10 +144,9 @@ func (ja *JWTCookieSessionAuthenticator) Login( return nil, err } - // Check token validity and extract paypload - if err := token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } claims := token.Claims.(jwt.MapClaims) @@ -200,9 +199,7 @@ func (ja *JWTCookieSessionAuthenticator) Login( } if jc.SyncUserOnLogin { - if err := repository.GetUserRepository().AddUser(user); err != nil { - log.Errorf("Error while adding user '%s' to DB", user.Username) - } + persistUser(user) } } diff --git a/internal/auth/jwtSession.go b/internal/auth/jwtSession.go index 286bb82..765a9fd 100644 --- a/internal/auth/jwtSession.go +++ b/internal/auth/jwtSession.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -17,7 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/schema" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" ) type JWTSessionAuthenticator struct { @@ -44,8 +44,8 @@ func (ja *JWTSessionAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { return user, r.Header.Get("Authorization") != "" || r.URL.Query().Get("login-token") != "" } @@ -53,8 +53,8 @@ func (ja *JWTSessionAuthenticator) CanLogin( func (ja *JWTSessionAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { rawtoken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ") if rawtoken == "" { rawtoken = r.URL.Query().Get("login-token") @@ -71,9 +71,9 @@ func (ja *JWTSessionAuthenticator) Login( return nil, err } - if err = token.Claims.Valid(); err != nil { + if !token.Valid { log.Warn("jwt token claims are not valid") - return nil, err + return nil, errors.New("jwt token claims are not valid") } claims := token.Claims.(jwt.MapClaims) @@ -139,9 +139,7 @@ func (ja *JWTSessionAuthenticator) Login( } if config.Keys.JwtConfig.SyncUserOnLogin { - if err := repository.GetUserRepository().AddUser(user); err != nil { - log.Errorf("Error while adding user '%s' to DB", user.Username) - } + persistUser(user) } } diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go index b800ca7..05672c5 100644 --- a/internal/auth/ldap.go +++ b/internal/auth/ldap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -21,7 +21,7 @@ import ( type LdapAuthenticator struct { syncPassword string - UserAttr string + UserAttr string } var _ Authenticator = (*LdapAuthenticator)(nil) @@ -74,8 +74,8 @@ func (la *LdapAuthenticator) CanLogin( user *schema.User, username string, rw http.ResponseWriter, - r *http.Request) (*schema.User, bool) { - + r *http.Request, +) (*schema.User, bool) { lc := config.Keys.LdapConfig if user != nil { @@ -138,8 +138,8 @@ func (la *LdapAuthenticator) CanLogin( func (la *LdapAuthenticator) Login( user *schema.User, rw http.ResponseWriter, - r *http.Request) (*schema.User, error) { - + r *http.Request, +) (*schema.User, error) { l, err := la.getLdapConnection(false) if err != nil { log.Warn("Error while getting ldap connection") @@ -238,7 +238,6 @@ func (la *LdapAuthenticator) Sync() error { } func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) { - lc := config.Keys.LdapConfig conn, err := ldap.DialURL(lc.Url) if err != nil { diff --git a/internal/auth/local.go b/internal/auth/local.go index 1fe8521..8d39793 100644 --- a/internal/auth/local.go +++ b/internal/auth/local.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/auth/oidc.go b/internal/auth/oidc.go new file mode 100644 index 0000000..5cfb563 --- /dev/null +++ b/internal/auth/oidc.go @@ -0,0 +1,196 @@ +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. +// All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +package auth + +import ( + "context" + "crypto/rand" + "encoding/base64" + "io" + "net/http" + "os" + "time" + + "github.com/ClusterCockpit/cc-backend/internal/config" + "github.com/ClusterCockpit/cc-backend/internal/repository" + "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/coreos/go-oidc/v3/oidc" + "github.com/gorilla/mux" + "golang.org/x/oauth2" +) + +type OIDC struct { + client *oauth2.Config + provider *oidc.Provider + authentication *Authentication + clientID string +} + +func randString(nByte int) (string, error) { + b := make([]byte, nByte) + if _, err := io.ReadFull(rand.Reader, b); err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +func setCallbackCookie(w http.ResponseWriter, r *http.Request, name, value string) { + c := &http.Cookie{ + Name: name, + Value: value, + MaxAge: int(time.Hour.Seconds()), + Secure: r.TLS != nil, + HttpOnly: true, + } + http.SetCookie(w, c) +} + +func NewOIDC(a *Authentication) *OIDC { + provider, err := oidc.NewProvider(context.Background(), config.Keys.OpenIDConfig.Provider) + if err != nil { + log.Fatal(err) + } + clientID := os.Getenv("OID_CLIENT_ID") + if clientID == "" { + log.Warn("environment variable 'OID_CLIENT_ID' not set (Open ID connect auth will not work)") + } + clientSecret := os.Getenv("OID_CLIENT_SECRET") + if clientSecret == "" { + log.Warn("environment variable 'OID_CLIENT_SECRET' not set (Open ID connect auth will not work)") + } + + client := &oauth2.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + Endpoint: provider.Endpoint(), + RedirectURL: "oidc-callback", + Scopes: []string{oidc.ScopeOpenID, "profile", "email"}, + } + + oa := &OIDC{provider: provider, client: client, clientID: clientID, authentication: a} + + return oa +} + +func (oa *OIDC) RegisterEndpoints(r *mux.Router) { + r.HandleFunc("/oidc-login", oa.OAuth2Login) + r.HandleFunc("/oidc-callback", oa.OAuth2Callback) +} + +func (oa *OIDC) OAuth2Callback(rw http.ResponseWriter, r *http.Request) { + c, err := r.Cookie("state") + if err != nil { + http.Error(rw, "state cookie not found", http.StatusBadRequest) + return + } + state := c.Value + + c, err = r.Cookie("verifier") + if err != nil { + http.Error(rw, "verifier cookie not found", http.StatusBadRequest) + return + } + codeVerifier := c.Value + + _ = r.ParseForm() + if r.Form.Get("state") != state { + http.Error(rw, "State invalid", http.StatusBadRequest) + return + } + code := r.Form.Get("code") + if code == "" { + http.Error(rw, "Code not found", http.StatusBadRequest) + return + } + token, err := oa.client.Exchange(context.Background(), code, oauth2.VerifierOption(codeVerifier)) + if err != nil { + http.Error(rw, "Failed to exchange token: "+err.Error(), http.StatusInternalServerError) + return + } + + userInfo, err := oa.provider.UserInfo(context.Background(), oauth2.StaticTokenSource(token)) + if err != nil { + http.Error(rw, "Failed to get userinfo: "+err.Error(), http.StatusInternalServerError) + return + } + + // // Extract the ID Token from OAuth2 token. + // rawIDToken, ok := token.Extra("id_token").(string) + // if !ok { + // http.Error(rw, "Cannot access idToken", http.StatusInternalServerError) + // } + // + // verifier := oa.provider.Verifier(&oidc.Config{ClientID: oa.clientID}) + // // Parse and verify ID Token payload. + // idToken, err := verifier.Verify(context.Background(), rawIDToken) + // if err != nil { + // http.Error(rw, "Failed to extract idToken: "+err.Error(), http.StatusInternalServerError) + // } + + projects := make([]string, 0) + + // Extract custom claims + var claims struct { + Username string `json:"preferred_username"` + Name string `json:"name"` + Profile struct { + Client struct { + Roles []string `json:"roles"` + } `json:"clustercockpit"` + } `json:"resource_access"` + } + if err := userInfo.Claims(&claims); err != nil { + http.Error(rw, "Failed to extract Claims: "+err.Error(), http.StatusInternalServerError) + } + + var roles []string + for _, r := range claims.Profile.Client.Roles { + switch r { + case "user": + roles = append(roles, schema.GetRoleString(schema.RoleUser)) + case "admin": + roles = append(roles, schema.GetRoleString(schema.RoleAdmin)) + } + } + + if len(roles) == 0 { + roles = append(roles, schema.GetRoleString(schema.RoleUser)) + } + + user := &schema.User{ + Username: claims.Username, + Name: claims.Name, + Roles: roles, + Projects: projects, + AuthSource: schema.AuthViaOIDC, + } + + if config.Keys.OpenIDConfig.SyncUserOnLogin { + persistUser(user) + } + + oa.authentication.SaveSession(rw, r, user) + log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects) + ctx := context.WithValue(r.Context(), repository.ContextUserKey, user) + http.RedirectHandler("/", http.StatusTemporaryRedirect).ServeHTTP(rw, r.WithContext(ctx)) +} + +func (oa *OIDC) OAuth2Login(rw http.ResponseWriter, r *http.Request) { + state, err := randString(16) + if err != nil { + http.Error(rw, "Internal error", http.StatusInternalServerError) + return + } + setCallbackCookie(rw, r, "state", state) + + // use PKCE to protect against CSRF attacks + codeVerifier := oauth2.GenerateVerifier() + setCallbackCookie(rw, r, "verifier", codeVerifier) + + // Redirect user to consent page to ask for permission + url := oa.client.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(codeVerifier)) + http.Redirect(rw, r, url, http.StatusFound) +} diff --git a/internal/config/config.go b/internal/config/config.go index 253951c..0217d85 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -31,6 +31,8 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{ "job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"}, "job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"}, + "job_view_showFootprint": true, + "job_list_usePaging": true, "plot_general_colorBackground": true, "plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"}, "plot_general_lineWidth": 3, diff --git a/internal/config/config_test.go b/internal/config/config_test.go index dc9d065..ed282be 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/graph/generated/generated.go b/internal/graph/generated/generated.go index f29e2a0..29a2a24 100644 --- a/internal/graph/generated/generated.go +++ b/internal/graph/generated/generated.go @@ -25,6 +25,7 @@ import ( // NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface. func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { return &executableSchema{ + schema: cfg.Schema, resolvers: cfg.Resolvers, directives: cfg.Directives, complexity: cfg.Complexity, @@ -32,6 +33,7 @@ func NewExecutableSchema(cfg Config) graphql.ExecutableSchema { } type Config struct { + Schema *ast.Schema Resolvers ResolverRoot Directives DirectiveRoot Complexity ComplexityRoot @@ -88,8 +90,12 @@ type ComplexityRoot struct { ConcurrentJobs func(childComplexity int) int Duration func(childComplexity int) int Exclusive func(childComplexity int) int + FlopsAnyAvg func(childComplexity int) int ID func(childComplexity int) int JobID func(childComplexity int) int + LoadAvg func(childComplexity int) int + MemBwAvg func(childComplexity int) int + MemUsedMax func(childComplexity int) int MetaData func(childComplexity int) int MonitoringStatus func(childComplexity int) int NumAcc func(childComplexity int) int @@ -133,14 +139,16 @@ type ComplexityRoot struct { } JobResultList struct { - Count func(childComplexity int) int - Items func(childComplexity int) int - Limit func(childComplexity int) int - Offset func(childComplexity int) int + Count func(childComplexity int) int + HasNextPage func(childComplexity int) int + Items func(childComplexity int) int + Limit func(childComplexity int) int + Offset func(childComplexity int) int } JobsStatistics struct { HistDuration func(childComplexity int) int + HistMetrics func(childComplexity int) int HistNumAccs func(childComplexity int) int HistNumCores func(childComplexity int) int HistNumNodes func(childComplexity int) int @@ -176,6 +184,19 @@ type ComplexityRoot struct { Metric func(childComplexity int) int } + MetricHistoPoint struct { + Bin func(childComplexity int) int + Count func(childComplexity int) int + Max func(childComplexity int) int + Min func(childComplexity int) int + } + + MetricHistoPoints struct { + Data func(childComplexity int) int + Metric func(childComplexity int) int + Unit func(childComplexity int) int + } + MetricStatistics struct { Avg func(childComplexity int) int Max func(childComplexity int) int @@ -208,7 +229,7 @@ type ComplexityRoot struct { JobMetrics func(childComplexity int, id string, metrics []string, scopes []schema.MetricScope) int Jobs func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) int JobsFootprints func(childComplexity int, filter []*model.JobFilter, metrics []string) int - JobsStatistics func(childComplexity int, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int + JobsStatistics func(childComplexity int, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) int NodeMetrics func(childComplexity int, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) int RooflineHeatmap func(childComplexity int, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) int Tags func(childComplexity int) int @@ -303,6 +324,7 @@ type JobResolver interface { Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) + MetaData(ctx context.Context, obj *schema.Job) (interface{}, error) UserData(ctx context.Context, obj *schema.Job) (*model.User, error) } @@ -322,7 +344,7 @@ type QueryResolver interface { JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) JobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) Jobs(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, order *model.OrderByInput) (*model.JobResultList, error) - JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) + JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) NodeMetrics(ctx context.Context, cluster string, nodes []string, scopes []schema.MetricScope, metrics []string, from time.Time, to time.Time) ([]*model.NodeMetrics, error) } @@ -331,12 +353,16 @@ type SubClusterResolver interface { } type executableSchema struct { + schema *ast.Schema resolvers ResolverRoot directives DirectiveRoot complexity ComplexityRoot } func (e *executableSchema) Schema() *ast.Schema { + if e.schema != nil { + return e.schema + } return parsedSchema } @@ -485,6 +511,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.Exclusive(childComplexity), true + case "Job.flopsAnyAvg": + if e.complexity.Job.FlopsAnyAvg == nil { + break + } + + return e.complexity.Job.FlopsAnyAvg(childComplexity), true + case "Job.id": if e.complexity.Job.ID == nil { break @@ -499,6 +532,27 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.Job.JobID(childComplexity), true + case "Job.loadAvg": + if e.complexity.Job.LoadAvg == nil { + break + } + + return e.complexity.Job.LoadAvg(childComplexity), true + + case "Job.memBwAvg": + if e.complexity.Job.MemBwAvg == nil { + break + } + + return e.complexity.Job.MemBwAvg(childComplexity), true + + case "Job.memUsedMax": + if e.complexity.Job.MemUsedMax == nil { + break + } + + return e.complexity.Job.MemUsedMax(childComplexity), true + case "Job.metaData": if e.complexity.Job.MetaData == nil { break @@ -702,6 +756,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobResultList.Count(childComplexity), true + case "JobResultList.hasNextPage": + if e.complexity.JobResultList.HasNextPage == nil { + break + } + + return e.complexity.JobResultList.HasNextPage(childComplexity), true + case "JobResultList.items": if e.complexity.JobResultList.Items == nil { break @@ -730,6 +791,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.JobsStatistics.HistDuration(childComplexity), true + case "JobsStatistics.histMetrics": + if e.complexity.JobsStatistics.HistMetrics == nil { + break + } + + return e.complexity.JobsStatistics.HistMetrics(childComplexity), true + case "JobsStatistics.histNumAccs": if e.complexity.JobsStatistics.HistNumAccs == nil { break @@ -919,6 +987,55 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.MetricFootprints.Metric(childComplexity), true + case "MetricHistoPoint.bin": + if e.complexity.MetricHistoPoint.Bin == nil { + break + } + + return e.complexity.MetricHistoPoint.Bin(childComplexity), true + + case "MetricHistoPoint.count": + if e.complexity.MetricHistoPoint.Count == nil { + break + } + + return e.complexity.MetricHistoPoint.Count(childComplexity), true + + case "MetricHistoPoint.max": + if e.complexity.MetricHistoPoint.Max == nil { + break + } + + return e.complexity.MetricHistoPoint.Max(childComplexity), true + + case "MetricHistoPoint.min": + if e.complexity.MetricHistoPoint.Min == nil { + break + } + + return e.complexity.MetricHistoPoint.Min(childComplexity), true + + case "MetricHistoPoints.data": + if e.complexity.MetricHistoPoints.Data == nil { + break + } + + return e.complexity.MetricHistoPoints.Data(childComplexity), true + + case "MetricHistoPoints.metric": + if e.complexity.MetricHistoPoints.Metric == nil { + break + } + + return e.complexity.MetricHistoPoints.Metric(childComplexity), true + + case "MetricHistoPoints.unit": + if e.complexity.MetricHistoPoints.Unit == nil { + break + } + + return e.complexity.MetricHistoPoints.Unit(childComplexity), true + case "MetricStatistics.avg": if e.complexity.MetricStatistics.Avg == nil { break @@ -1112,7 +1229,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true + return e.complexity.Query.JobsStatistics(childComplexity, args["filter"].([]*model.JobFilter), args["metrics"].([]string), args["page"].(*model.PageRequest), args["sortBy"].(*model.SortByAggregate), args["groupBy"].(*model.Aggregate)), true case "Query.nodeMetrics": if e.complexity.Query.NodeMetrics == nil { @@ -1587,14 +1704,14 @@ func (ec *executionContext) introspectSchema() (*introspection.Schema, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapSchema(parsedSchema), nil + return introspection.WrapSchema(ec.Schema()), nil } func (ec *executionContext) introspectType(name string) (*introspection.Type, error) { if ec.DisableIntrospection { return nil, errors.New("introspection disabled") } - return introspection.WrapTypeFromDef(parsedSchema, parsedSchema.Types[name]), nil + return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil } var sources = []*ast.Source{ @@ -1628,6 +1745,11 @@ type Job { resources: [Resource!]! concurrentJobs: JobLinkResultList + memUsedMax: Float + flopsAnyAvg: Float + memBwAvg: Float + loadAvg: Float + metaData: Any userData: User } @@ -1798,7 +1920,7 @@ type Query { jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList! - jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! + jobsStatistics(filter: [JobFilter!], metrics: [String!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]! rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]! @@ -1873,6 +1995,7 @@ type JobResultList { offset: Int limit: Int count: Int + hasNextPage: Boolean } type JobLinkResultList { @@ -1886,6 +2009,19 @@ type HistoPoint { value: Int! } +type MetricHistoPoints { + metric: String! + unit: String! + data: [MetricHistoPoint!] +} + +type MetricHistoPoint { + bin: Int + count: Int! + min: Int + max: Int +} + type JobsStatistics { id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster name: String! # if User-Statistics: Given Name of Account (ID) Owner @@ -1903,6 +2039,7 @@ type JobsStatistics { histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs + histMetrics: [MetricHistoPoints!]! # metric: metricname, data array of histopoints: value: metric average bin, count: number of jobs with that metric average } input PageRequest { @@ -2142,33 +2279,42 @@ func (ec *executionContext) field_Query_jobsStatistics_args(ctx context.Context, } } args["filter"] = arg0 - var arg1 *model.PageRequest + var arg1 []string + if tmp, ok := rawArgs["metrics"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("metrics")) + arg1, err = ec.unmarshalOString2ᚕstringᚄ(ctx, tmp) + if err != nil { + return nil, err + } + } + args["metrics"] = arg1 + var arg2 *model.PageRequest if tmp, ok := rawArgs["page"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) - arg1, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) + arg2, err = ec.unmarshalOPageRequest2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐPageRequest(ctx, tmp) if err != nil { return nil, err } } - args["page"] = arg1 - var arg2 *model.SortByAggregate + args["page"] = arg2 + var arg3 *model.SortByAggregate if tmp, ok := rawArgs["sortBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) - arg2, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) + arg3, err = ec.unmarshalOSortByAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortByAggregate(ctx, tmp) if err != nil { return nil, err } } - args["sortBy"] = arg2 - var arg3 *model.Aggregate + args["sortBy"] = arg3 + var arg4 *model.Aggregate if tmp, ok := rawArgs["groupBy"]; ok { ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("groupBy")) - arg3, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) + arg4, err = ec.unmarshalOAggregate2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐAggregate(ctx, tmp) if err != nil { return nil, err } } - args["groupBy"] = arg3 + args["groupBy"] = arg4 return args, nil } @@ -4054,6 +4200,170 @@ func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context, return fc, nil } +func (ec *executionContext) _Job_memUsedMax(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memUsedMax(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemUsedMax, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memUsedMax(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_flopsAnyAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.FlopsAnyAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_flopsAnyAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_memBwAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_memBwAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.MemBwAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_memBwAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Job_loadAvg(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Job_loadAvg(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.LoadAvg, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(float64) + fc.Result = res + return ec.marshalOFloat2float64(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Job_loadAvg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Job", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Float does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Job_metaData(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Job_metaData(ctx, field) if err != nil { @@ -4778,6 +5088,14 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -4912,6 +5230,47 @@ func (ec *executionContext) fieldContext_JobResultList_count(ctx context.Context return fc, nil } +func (ec *executionContext) _JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField, obj *model.JobResultList) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobResultList_hasNextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HasNextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*bool) + fc.Result = res + return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobResultList_hasNextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobResultList", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _JobsStatistics_id(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_JobsStatistics_id(ctx, field) if err != nil { @@ -5640,6 +5999,58 @@ func (ec *executionContext) fieldContext_JobsStatistics_histNumAccs(ctx context. return fc, nil } +func (ec *executionContext) _JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_JobsStatistics_histMetrics(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.HistMetrics, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoints) + fc.Result = res + return ec.marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_JobsStatistics_histMetrics(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "JobsStatistics", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "metric": + return ec.fieldContext_MetricHistoPoints_metric(ctx, field) + case "unit": + return ec.fieldContext_MetricHistoPoints_unit(ctx, field) + case "data": + return ec.fieldContext_MetricHistoPoints_data(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoints", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricConfig_name(ctx context.Context, field graphql.CollectedField, obj *schema.MetricConfig) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricConfig_name(ctx, field) if err != nil { @@ -6185,6 +6596,312 @@ func (ec *executionContext) fieldContext_MetricFootprints_data(ctx context.Conte return fc, nil } +func (ec *executionContext) _MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_bin(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Bin, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_bin(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_count(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Count, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_min(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Min, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_min(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoint) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoint_max(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Max, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoint_max(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoint", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_metric(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Metric, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_metric(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_unit(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Unit, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_unit(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField, obj *model.MetricHistoPoints) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_MetricHistoPoints_data(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Data, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.MetricHistoPoint) + fc.Result = res + return ec.marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_MetricHistoPoints_data(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "MetricHistoPoints", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "bin": + return ec.fieldContext_MetricHistoPoint_bin(ctx, field) + case "count": + return ec.fieldContext_MetricHistoPoint_count(ctx, field) + case "min": + return ec.fieldContext_MetricHistoPoint_min(ctx, field) + case "max": + return ec.fieldContext_MetricHistoPoint_max(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type MetricHistoPoint", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _MetricStatistics_avg(ctx context.Context, field graphql.CollectedField, obj *schema.MetricStatistics) (ret graphql.Marshaler) { fc, err := ec.fieldContext_MetricStatistics_avg(ctx, field) if err != nil { @@ -7152,6 +7869,14 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr return ec.fieldContext_Job_resources(ctx, field) case "concurrentJobs": return ec.fieldContext_Job_concurrentJobs(ctx, field) + case "memUsedMax": + return ec.fieldContext_Job_memUsedMax(ctx, field) + case "flopsAnyAvg": + return ec.fieldContext_Job_flopsAnyAvg(ctx, field) + case "memBwAvg": + return ec.fieldContext_Job_memBwAvg(ctx, field) + case "loadAvg": + return ec.fieldContext_Job_loadAvg(ctx, field) case "metaData": return ec.fieldContext_Job_metaData(ctx, field) case "userData": @@ -7342,6 +8067,8 @@ func (ec *executionContext) fieldContext_Query_jobs(ctx context.Context, field g return ec.fieldContext_JobResultList_limit(ctx, field) case "count": return ec.fieldContext_JobResultList_count(ctx, field) + case "hasNextPage": + return ec.fieldContext_JobResultList_hasNextPage(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobResultList", field.Name) }, @@ -7374,7 +8101,7 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) + return ec.resolvers.Query().JobsStatistics(rctx, fc.Args["filter"].([]*model.JobFilter), fc.Args["metrics"].([]string), fc.Args["page"].(*model.PageRequest), fc.Args["sortBy"].(*model.SortByAggregate), fc.Args["groupBy"].(*model.Aggregate)) }) if err != nil { ec.Error(ctx, err) @@ -7431,6 +8158,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex return ec.fieldContext_JobsStatistics_histNumCores(ctx, field) case "histNumAccs": return ec.fieldContext_JobsStatistics_histNumAccs(ctx, field) + case "histMetrics": + return ec.fieldContext_JobsStatistics_histMetrics(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type JobsStatistics", field.Name) }, @@ -11549,8 +12278,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalNFloat2float64(ctx, v) if err != nil { @@ -11558,8 +12285,6 @@ func (ec *executionContext) unmarshalInputFloatRange(ctx context.Context, obj in } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalNFloat2float64(ctx, v) if err != nil { @@ -11587,8 +12312,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -11596,8 +12319,6 @@ func (ec *executionContext) unmarshalInputIntRange(ctx context.Context, obj inte } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -11625,8 +12346,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } switch k { case "tags": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("tags")) data, err := ec.unmarshalOID2ᚕstringᚄ(ctx, v) if err != nil { @@ -11634,8 +12353,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Tags = data case "jobId": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobId")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11643,8 +12360,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.JobID = data case "arrayJobId": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("arrayJobId")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -11652,8 +12367,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.ArrayJobID = data case "user": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("user")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11661,8 +12374,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.User = data case "project": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("project")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11670,8 +12381,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Project = data case "jobName": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11679,8 +12388,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.JobName = data case "cluster": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("cluster")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11688,8 +12395,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Cluster = data case "partition": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("partition")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11697,8 +12402,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Partition = data case "duration": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("duration")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -11706,8 +12409,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Duration = data case "minRunningFor": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("minRunningFor")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -11715,8 +12416,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MinRunningFor = data case "numNodes": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numNodes")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -11724,8 +12423,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumNodes = data case "numAccelerators": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numAccelerators")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -11733,8 +12430,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumAccelerators = data case "numHWThreads": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("numHWThreads")) data, err := ec.unmarshalOIntRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐIntRange(ctx, v) if err != nil { @@ -11742,8 +12437,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.NumHWThreads = data case "startTime": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startTime")) data, err := ec.unmarshalOTimeRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐTimeRange(ctx, v) if err != nil { @@ -11751,8 +12444,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.StartTime = data case "state": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("state")) data, err := ec.unmarshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐJobStateᚄ(ctx, v) if err != nil { @@ -11760,8 +12451,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.State = data case "flopsAnyAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("flopsAnyAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -11769,8 +12458,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.FlopsAnyAvg = data case "memBwAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memBwAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -11778,8 +12465,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MemBwAvg = data case "loadAvg": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("loadAvg")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -11787,8 +12472,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.LoadAvg = data case "memUsedMax": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("memUsedMax")) data, err := ec.unmarshalOFloatRange2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐFloatRange(ctx, v) if err != nil { @@ -11796,8 +12479,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.MemUsedMax = data case "exclusive": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("exclusive")) data, err := ec.unmarshalOInt2ᚖint(ctx, v) if err != nil { @@ -11805,8 +12486,6 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int } it.Exclusive = data case "node": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node")) data, err := ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v) if err != nil { @@ -11838,8 +12517,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj } switch k { case "field": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("field")) data, err := ec.unmarshalNString2string(ctx, v) if err != nil { @@ -11847,8 +12524,6 @@ func (ec *executionContext) unmarshalInputOrderByInput(ctx context.Context, obj } it.Field = data case "order": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("order")) data, err := ec.unmarshalNSortDirectionEnum2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐSortDirectionEnum(ctx, v) if err != nil { @@ -11876,8 +12551,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i } switch k { case "itemsPerPage": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("itemsPerPage")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -11885,8 +12558,6 @@ func (ec *executionContext) unmarshalInputPageRequest(ctx context.Context, obj i } it.ItemsPerPage = data case "page": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("page")) data, err := ec.unmarshalNInt2int(ctx, v) if err != nil { @@ -11914,8 +12585,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } switch k { case "eq": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -11923,8 +12592,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Eq = data case "neq": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("neq")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -11932,8 +12599,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Neq = data case "contains": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("contains")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -11941,8 +12606,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.Contains = data case "startsWith": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("startsWith")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -11950,8 +12613,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.StartsWith = data case "endsWith": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("endsWith")) data, err := ec.unmarshalOString2ᚖstring(ctx, v) if err != nil { @@ -11959,8 +12620,6 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i } it.EndsWith = data case "in": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in")) data, err := ec.unmarshalOString2ᚕstringᚄ(ctx, v) if err != nil { @@ -11988,8 +12647,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int } switch k { case "from": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("from")) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) if err != nil { @@ -11997,8 +12654,6 @@ func (ec *executionContext) unmarshalInputTimeRange(ctx context.Context, obj int } it.From = data case "to": - var err error - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("to")) data, err := ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v) if err != nil { @@ -12504,6 +13159,14 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "memUsedMax": + out.Values[i] = ec._Job_memUsedMax(ctx, field, obj) + case "flopsAnyAvg": + out.Values[i] = ec._Job_flopsAnyAvg(ctx, field, obj) + case "memBwAvg": + out.Values[i] = ec._Job_memBwAvg(ctx, field, obj) + case "loadAvg": + out.Values[i] = ec._Job_loadAvg(ctx, field, obj) case "metaData": field := field @@ -12796,6 +13459,8 @@ func (ec *executionContext) _JobResultList(ctx context.Context, sel ast.Selectio out.Values[i] = ec._JobResultList_limit(ctx, field, obj) case "count": out.Values[i] = ec._JobResultList_count(ctx, field, obj) + case "hasNextPage": + out.Values[i] = ec._JobResultList_hasNextPage(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -12910,6 +13575,11 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti if out.Values[i] == graphql.Null { out.Invalids++ } + case "histMetrics": + out.Values[i] = ec._JobsStatistics_histMetrics(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -13058,6 +13728,97 @@ func (ec *executionContext) _MetricFootprints(ctx context.Context, sel ast.Selec return out } +var metricHistoPointImplementors = []string{"MetricHistoPoint"} + +func (ec *executionContext) _MetricHistoPoint(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoint) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoint") + case "bin": + out.Values[i] = ec._MetricHistoPoint_bin(ctx, field, obj) + case "count": + out.Values[i] = ec._MetricHistoPoint_count(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "min": + out.Values[i] = ec._MetricHistoPoint_min(ctx, field, obj) + case "max": + out.Values[i] = ec._MetricHistoPoint_max(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + +var metricHistoPointsImplementors = []string{"MetricHistoPoints"} + +func (ec *executionContext) _MetricHistoPoints(ctx context.Context, sel ast.SelectionSet, obj *model.MetricHistoPoints) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, metricHistoPointsImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("MetricHistoPoints") + case "metric": + out.Values[i] = ec._MetricHistoPoints_metric(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "unit": + out.Values[i] = ec._MetricHistoPoints_unit(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "data": + out.Values[i] = ec._MetricHistoPoints_data(ctx, field, obj) + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var metricStatisticsImplementors = []string{"MetricStatistics"} func (ec *executionContext) _MetricStatistics(ctx context.Context, sel ast.SelectionSet, obj *schema.MetricStatistics) graphql.Marshaler { @@ -15310,6 +16071,70 @@ func (ec *executionContext) marshalNMetricFootprints2ᚖgithubᚗcomᚋClusterCo return ec._MetricFootprints(ctx, sel, v) } +func (ec *executionContext) marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoint(ctx, sel, v) +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointsᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoints) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + +func (ec *executionContext) marshalNMetricHistoPoints2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoints(ctx context.Context, sel ast.SelectionSet, v *model.MetricHistoPoints) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._MetricHistoPoints(ctx, sel, v) +} + func (ec *executionContext) unmarshalNMetricScope2githubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScope(ctx context.Context, v interface{}) (schema.MetricScope, error) { var res schema.MetricScope err := res.UnmarshalGQL(v) @@ -16365,6 +17190,53 @@ func (ec *executionContext) marshalOJobState2ᚕgithubᚗcomᚋClusterCockpitᚋ return ret } +func (ec *executionContext) marshalOMetricHistoPoint2ᚕᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPointᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.MetricHistoPoint) graphql.Marshaler { + if v == nil { + return graphql.Null + } + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNMetricHistoPoint2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐMetricHistoPoint(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) unmarshalOMetricScope2ᚕgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋpkgᚋschemaᚐMetricScopeᚄ(ctx context.Context, v interface{}) ([]schema.MetricScope, error) { if v == nil { return nil, nil diff --git a/internal/graph/model/models.go b/internal/graph/model/models.go index 58cf9f8..8047957 100644 --- a/internal/graph/model/models.go +++ b/internal/graph/model/models.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/graph/model/models_gen.go b/internal/graph/model/models_gen.go index 050784b..d575fc3 100644 --- a/internal/graph/model/models_gen.go +++ b/internal/graph/model/models_gen.go @@ -78,29 +78,31 @@ type JobMetricWithName struct { } type JobResultList struct { - Items []*schema.Job `json:"items"` - Offset *int `json:"offset,omitempty"` - Limit *int `json:"limit,omitempty"` - Count *int `json:"count,omitempty"` + Items []*schema.Job `json:"items"` + Offset *int `json:"offset,omitempty"` + Limit *int `json:"limit,omitempty"` + Count *int `json:"count,omitempty"` + HasNextPage *bool `json:"hasNextPage,omitempty"` } type JobsStatistics struct { - ID string `json:"id"` - Name string `json:"name"` - TotalJobs int `json:"totalJobs"` - RunningJobs int `json:"runningJobs"` - ShortJobs int `json:"shortJobs"` - TotalWalltime int `json:"totalWalltime"` - TotalNodes int `json:"totalNodes"` - TotalNodeHours int `json:"totalNodeHours"` - TotalCores int `json:"totalCores"` - TotalCoreHours int `json:"totalCoreHours"` - TotalAccs int `json:"totalAccs"` - TotalAccHours int `json:"totalAccHours"` - HistDuration []*HistoPoint `json:"histDuration"` - HistNumNodes []*HistoPoint `json:"histNumNodes"` - HistNumCores []*HistoPoint `json:"histNumCores"` - HistNumAccs []*HistoPoint `json:"histNumAccs"` + ID string `json:"id"` + Name string `json:"name"` + TotalJobs int `json:"totalJobs"` + RunningJobs int `json:"runningJobs"` + ShortJobs int `json:"shortJobs"` + TotalWalltime int `json:"totalWalltime"` + TotalNodes int `json:"totalNodes"` + TotalNodeHours int `json:"totalNodeHours"` + TotalCores int `json:"totalCores"` + TotalCoreHours int `json:"totalCoreHours"` + TotalAccs int `json:"totalAccs"` + TotalAccHours int `json:"totalAccHours"` + HistDuration []*HistoPoint `json:"histDuration"` + HistNumNodes []*HistoPoint `json:"histNumNodes"` + HistNumCores []*HistoPoint `json:"histNumCores"` + HistNumAccs []*HistoPoint `json:"histNumAccs"` + HistMetrics []*MetricHistoPoints `json:"histMetrics"` } type MetricFootprints struct { @@ -108,6 +110,22 @@ type MetricFootprints struct { Data []schema.Float `json:"data"` } +type MetricHistoPoint struct { + Bin *int `json:"bin,omitempty"` + Count int `json:"count"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` +} + +type MetricHistoPoints struct { + Metric string `json:"metric"` + Unit string `json:"unit"` + Data []*MetricHistoPoint `json:"data,omitempty"` +} + +type Mutation struct { +} + type NodeMetrics struct { Host string `json:"host"` SubCluster string `json:"subCluster"` @@ -124,6 +142,9 @@ type PageRequest struct { Page int `json:"page"` } +type Query struct { +} + type StringInput struct { Eq *string `json:"eq,omitempty"` Neq *string `json:"neq,omitempty"` diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 9e5e111..a33e041 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -2,7 +2,7 @@ package graph // This file will be automatically regenerated based on the schema, any resolver implementations // will be copied through when generating and any unknown code will be moved to the end. -// Code generated by github.com/99designs/gqlgen version v0.17.36 +// Code generated by github.com/99designs/gqlgen version v0.17.45 import ( "context" @@ -11,6 +11,7 @@ import ( "strconv" "time" + "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/generated" "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" @@ -240,11 +241,27 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag return nil, err } - return &model.JobResultList{Items: jobs, Count: &count}, nil + if !config.Keys.UiDefaults["job_list_usePaging"].(bool) { + hasNextPage := false + page.Page += 1 + + nextJobs, err := r.Repo.QueryJobs(ctx, filter, page, order) + if err != nil { + log.Warn("Error while querying next jobs") + return nil, err + } + if len(nextJobs) > 0 { + hasNextPage = true + } + + return &model.JobResultList{Items: jobs, Count: &count, HasNextPage: &hasNextPage}, nil + } else { + return &model.JobResultList{Items: jobs, Count: &count}, nil + } } // JobsStatistics is the resolver for the jobsStatistics field. -func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { +func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, metrics []string, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) { var err error var stats []*model.JobsStatistics @@ -291,6 +308,17 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF } } + if requireField(ctx, "histMetrics") { + if groupBy == nil { + stats[0], err = r.Repo.AddMetricHistograms(ctx, filter, metrics, stats[0]) + if err != nil { + return nil, err + } + } else { + return nil, errors.New("metric histograms only implemented without groupBy argument") + } + } + return stats, nil } diff --git a/internal/graph/util.go b/internal/graph/util.go index 3a2c3b1..3e65b6c 100644 --- a/internal/graph/util.go +++ b/internal/graph/util.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/importer/handleImport.go b/internal/importer/handleImport.go index bc97ec0..2d507a2 100644 --- a/internal/importer/handleImport.go +++ b/internal/importer/handleImport.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index 01ac2f2..ce0d2e1 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/importer/initDB.go b/internal/importer/initDB.go index 3118131..0e7a6bb 100644 --- a/internal/importer/initDB.go +++ b/internal/importer/initDB.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/importer/normalize.go b/internal/importer/normalize.go index a2efac3..d9595a2 100644 --- a/internal/importer/normalize.go +++ b/internal/importer/normalize.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/importer/normalize_test.go b/internal/importer/normalize_test.go index 544000a..b441c11 100644 --- a/internal/importer/normalize_test.go +++ b/internal/importer/normalize_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/metricdata/cc-metric-store.go b/internal/metricdata/cc-metric-store.go index 4874975..e564db6 100644 --- a/internal/metricdata/cc-metric-store.go +++ b/internal/metricdata/cc-metric-store.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -32,32 +32,32 @@ type CCMetricStoreConfig struct { } type CCMetricStore struct { + here2there map[string]string + there2here map[string]string + client http.Client jwt string url string queryEndpoint string - client http.Client - here2there map[string]string - there2here map[string]string } type ApiQueryRequest struct { Cluster string `json:"cluster"` + Queries []ApiQuery `json:"queries"` + ForAllNodes []string `json:"for-all-nodes"` From int64 `json:"from"` To int64 `json:"to"` WithStats bool `json:"with-stats"` WithData bool `json:"with-data"` - Queries []ApiQuery `json:"queries"` - ForAllNodes []string `json:"for-all-nodes"` } type ApiQuery struct { + Type *string `json:"type,omitempty"` + SubType *string `json:"subtype,omitempty"` Metric string `json:"metric"` Hostname string `json:"host"` - Aggregate bool `json:"aggreg"` - Type *string `json:"type,omitempty"` TypeIds []string `json:"type-ids,omitempty"` - SubType *string `json:"subtype,omitempty"` SubTypeIds []string `json:"subtype-ids,omitempty"` + Aggregate bool `json:"aggreg"` } type ApiQueryResponse struct { @@ -67,16 +67,15 @@ type ApiQueryResponse struct { type ApiMetricData struct { Error *string `json:"error"` + Data []schema.Float `json:"data"` From int64 `json:"from"` To int64 `json:"to"` - Data []schema.Float `json:"data"` Avg schema.Float `json:"avg"` Min schema.Float `json:"min"` Max schema.Float `json:"max"` } func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error { - var config CCMetricStoreConfig if err := json.Unmarshal(rawConfig, &config); err != nil { log.Warn("Error while unmarshaling raw json config") @@ -122,8 +121,8 @@ func (ccms *CCMetricStore) toLocalName(metric string) string { func (ccms *CCMetricStore) doRequest( ctx context.Context, - body *ApiQueryRequest) (*ApiQueryResponse, error) { - + body *ApiQueryRequest, +) (*ApiQueryResponse, error) { buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(body); err != nil { log.Warn("Error while encoding request body") @@ -162,8 +161,8 @@ func (ccms *CCMetricStore) LoadData( job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { - + ctx context.Context, +) (schema.JobData, error) { queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes) if err != nil { log.Warn("Error while building queries") @@ -186,7 +185,7 @@ func (ccms *CCMetricStore) LoadData( } var errors []string - var jobData schema.JobData = make(schema.JobData) + jobData := make(schema.JobData) for i, row := range resBody.Results { query := req.Queries[i] metric := ccms.toLocalName(query.Metric) @@ -206,7 +205,7 @@ func (ccms *CCMetricStore) LoadData( jobData[metric][scope] = jobMetric } - for _, res := range row { + for ndx, res := range row { if res.Error != nil { /* Build list for "partial errors", if any */ errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error)) @@ -216,7 +215,7 @@ func (ccms *CCMetricStore) LoadData( id := (*string)(nil) if query.Type != nil { id = new(string) - *id = query.TypeIds[0] + *id = query.TypeIds[ndx] } if res.Avg.IsNaN() || res.Min.IsNaN() || res.Max.IsNaN() { @@ -267,8 +266,8 @@ var ( func (ccms *CCMetricStore) buildQueries( job *schema.Job, metrics []string, - scopes []schema.MetricScope) ([]ApiQuery, []schema.MetricScope, error) { - + scopes []schema.MetricScope, +) ([]ApiQuery, []schema.MetricScope, error) { queries := make([]ApiQuery, 0, len(metrics)*len(scopes)*len(job.Resources)) assignedScope := []schema.MetricScope{} @@ -313,6 +312,11 @@ func (ccms *CCMetricStore) buildQueries( // Accelerator -> Accelerator (Use "accelerator" scope if requested scope is lower than node) if nativeScope == schema.MetricScopeAccelerator && scope.LT(schema.MetricScopeNode) { + if scope != schema.MetricScopeAccelerator { + // Skip all other catched cases + continue + } + queries = append(queries, ApiQuery{ Metric: remoteName, Hostname: host.Hostname, @@ -504,8 +508,8 @@ func (ccms *CCMetricStore) buildQueries( func (ccms *CCMetricStore) LoadStats( job *schema.Job, metrics []string, - ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) { - + ctx context.Context, +) (map[string]map[string]schema.MetricStatistics, error) { queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization? if err != nil { log.Warn("Error while building query") @@ -566,8 +570,8 @@ func (ccms *CCMetricStore) LoadNodeData( metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, - ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { req := ApiQueryRequest{ Cluster: cluster, From: from.Unix(), @@ -652,7 +656,6 @@ func (ccms *CCMetricStore) LoadNodeData( } func intToStringSlice(is []int) []string { - ss := make([]string, len(is)) for i, x := range is { ss[i] = strconv.Itoa(x) diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 8055baf..b95f07e 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 3117f8c..a93b1ac 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -38,7 +38,6 @@ var metricDataRepos map[string]MetricDataRepository = map[string]MetricDataRepos var useArchive bool func Init(disableArchive bool) error { - useArchive = !disableArchive for _, cluster := range config.Keys.Clusters { if cluster.MetricDataRepository != nil { @@ -80,7 +79,8 @@ var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) func LoadData(job *schema.Job, metrics []string, scopes []schema.MetricScope, - ctx context.Context) (schema.JobData, error) { + ctx context.Context, +) (schema.JobData, error) { data := cache.Get(cacheKey(job, metrics, scopes), func() (_ interface{}, ttl time.Duration, size int) { var jd schema.JobData var err error @@ -109,7 +109,8 @@ func LoadData(job *schema.Job, jd, err = repo.LoadData(job, metrics, scopes, ctx) if err != nil { if len(jd) != 0 { - log.Warnf("partial error: %s", err.Error()) + log.Errorf("partial error: %s", err.Error()) + return err, 0, 0 } else { log.Error("Error while loading job data from metric repository") return err, 0, 0 @@ -179,8 +180,8 @@ func LoadAverages( job *schema.Job, metrics []string, data [][]schema.Float, - ctx context.Context) error { - + ctx context.Context, +) error { if job.State != schema.JobStateRunning && useArchive { return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here? } @@ -219,8 +220,8 @@ func LoadNodeData( metrics, nodes []string, scopes []schema.MetricScope, from, to time.Time, - ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { - + ctx context.Context, +) (map[string]map[string][]*schema.JobMetric, error) { repo, ok := metricDataRepos[cluster] if !ok { return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster) @@ -252,8 +253,8 @@ func LoadNodeData( func cacheKey( job *schema.Job, metrics []string, - scopes []schema.MetricScope) string { - + scopes []schema.MetricScope, +) string { // Duration and StartTime do not need to be in the cache key as StartTime is less unique than // job.ID and the TTL of the cache entry makes sure it does not stay there forever. return fmt.Sprintf("%d(%s):[%v],[%v]", @@ -267,8 +268,8 @@ func cacheKey( func prepareJobData( job *schema.Job, jobData schema.JobData, - scopes []schema.MetricScope) { - + scopes []schema.MetricScope, +) { const maxSeriesSize int = 15 for _, scopes := range jobData { for _, jm := range scopes { @@ -295,7 +296,6 @@ func prepareJobData( // Writes a running job to the job-archive func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) { - allMetrics := make([]string, 0) metricConfigs := archive.GetCluster(job.Cluster).MetricConfig for _, mc := range metricConfigs { diff --git a/internal/metricdata/utils.go b/internal/metricdata/utils.go index 4affb2e..6d490fe 100644 --- a/internal/metricdata/utils.go +++ b/internal/metricdata/utils.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go index 38a258a..418eef9 100644 --- a/internal/repository/dbConnection.go +++ b/internal/repository/dbConnection.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/hooks.go b/internal/repository/hooks.go index 2f40fd5..e4fe8e1 100644 --- a/internal/repository/hooks.go +++ b/internal/repository/hooks.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/job.go b/internal/repository/job.go index 76834d1..ce5e416 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -16,6 +16,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/graph/model" "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" "github.com/ClusterCockpit/cc-backend/pkg/lrucache" "github.com/ClusterCockpit/cc-backend/pkg/schema" @@ -29,13 +30,11 @@ var ( ) type JobRepository struct { - DB *sqlx.DB - driver string - - stmtCache *sq.StmtCache - cache *lrucache.Cache - + DB *sqlx.DB + stmtCache *sq.StmtCache + cache *lrucache.Cache archiveChannel chan *schema.Job + driver string archivePending sync.WaitGroup } @@ -60,7 +59,7 @@ func GetJobRepository() *JobRepository { var jobColumns []string = []string{ "job.id", "job.job_id", "job.user", "job.project", "job.cluster", "job.subcluster", "job.start_time", "job.partition", "job.array_job_id", "job.num_nodes", "job.num_hwthreads", "job.num_acc", "job.exclusive", "job.monitoring_status", "job.smt", "job.job_state", - "job.duration", "job.walltime", "job.resources", // "job.meta_data", + "job.duration", "job.walltime", "job.resources", "job.mem_used_max", "job.flops_any_avg", "job.mem_bw_avg", "job.load_avg", // "job.meta_data", } func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { @@ -68,7 +67,7 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) { if err := row.Scan( &job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId, &job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State, - &job.Duration, &job.Walltime, &job.RawResources /*&job.RawMetaData*/); err != nil { + &job.Duration, &job.Walltime, &job.RawResources, &job.MemUsedMax, &job.FlopsAnyAvg, &job.MemBwAvg, &job.LoadAvg /*&job.RawMetaData*/); err != nil { log.Warnf("Error while scanning rows (Job): %v", err) return nil, err } @@ -212,7 +211,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er } r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour) - return nil + return archive.UpdateMetadata(job, job.MetaData) } // Find executes a SQL query to find a specific batch job. @@ -223,8 +222,8 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er func (r *JobRepository) Find( jobId *int64, cluster *string, - startTime *int64) (*schema.Job, error) { - + startTime *int64, +) (*schema.Job, error) { start := time.Now() q := sq.Select(jobColumns...).From("job"). Where("job.job_id = ?", *jobId) @@ -248,8 +247,8 @@ func (r *JobRepository) Find( func (r *JobRepository) FindAll( jobId *int64, cluster *string, - startTime *int64) ([]*schema.Job, error) { - + startTime *int64, +) ([]*schema.Job, error) { start := time.Now() q := sq.Select(jobColumns...).From("job"). Where("job.job_id = ?", *jobId) @@ -292,7 +291,8 @@ func (r *JobRepository) FindById(jobId int64) (*schema.Job, error) { func (r *JobRepository) FindConcurrentJobs( ctx context.Context, - job *schema.Job) (*model.JobLinkResultList, error) { + job *schema.Job, +) (*model.JobLinkResultList, error) { if job == nil { return nil, nil } @@ -420,8 +420,8 @@ func (r *JobRepository) Stop( jobId int64, duration int32, state schema.JobState, - monitoringStatus int32) (err error) { - + monitoringStatus int32, +) (err error) { stmt := sq.Update("job"). Set("job_state", state). Set("duration", duration). @@ -434,11 +434,14 @@ func (r *JobRepository) Stop( func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { var cnt int - qs := fmt.Sprintf("SELECT count(*) FROM job WHERE job.start_time < %d", startTime) - err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement - _, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime) + q := sq.Select("count(*)").From("job").Where("job.start_time < ?", startTime) + q.RunWith(r.DB).QueryRow().Scan(cnt) + qd := sq.Delete("job").Where("job.start_time < ?", startTime) + _, err := qd.RunWith(r.DB).Exec() + if err != nil { - log.Errorf(" DeleteJobsBefore(%d): error %#v", startTime, err) + s, _, _ := qd.ToSql() + log.Errorf(" DeleteJobsBefore(%d) with %s: error %#v", startTime, s, err) } else { log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) } @@ -446,9 +449,12 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { } func (r *JobRepository) DeleteJobById(id int64) error { - _, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id) + qd := sq.Delete("job").Where("job.id = ?", id) + _, err := qd.RunWith(r.DB).Exec() + if err != nil { - log.Errorf("DeleteJobById(%d): error %#v", id, err) + s, _, _ := qd.ToSql() + log.Errorf("DeleteJobById(%d) with %s : error %#v", id, s, err) } else { log.Debugf("DeleteJobById(%d): Success", id) } @@ -468,8 +474,8 @@ func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32 func (r *JobRepository) MarkArchived( jobId int64, monitoringStatus int32, - metricStats map[string]schema.JobStatistics) error { - + metricStats map[string]schema.JobStatistics, +) error { stmt := sq.Update("job"). Set("monitoring_status", monitoringStatus). Where("job.id = ?", jobId) @@ -483,6 +489,7 @@ func (r *JobRepository) MarkArchived( case "mem_bw": stmt = stmt.Set("mem_bw_avg", stats.Avg) case "load": + stmt = stmt.Set("load_avg", stats.Avg) case "cpu_load": stmt = stmt.Set("load_avg", stats.Avg) case "net_bw": @@ -577,8 +584,10 @@ func (r *JobRepository) FindUserOrProjectOrJobname(user *schema.User, searchterm } } -var ErrNotFound = errors.New("no such jobname, project or user") -var ErrForbidden = errors.New("not authorized") +var ( + ErrNotFound = errors.New("no such jobname, project or user") + ErrForbidden = errors.New("not authorized") +) func (r *JobRepository) FindColumnValue(user *schema.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) { compareStr := " = ?" @@ -662,7 +671,6 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) { // AllocatedNodes returns a map of all subclusters to a map of hostnames to the amount of jobs running on that host. // Hosts with zero jobs running on them will not show up! func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]int, error) { - start := time.Now() subclusters := make(map[string]map[string]int) rows, err := sq.Select("resources", "subcluster").From("job"). @@ -705,7 +713,6 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in } func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error { - start := time.Now() res, err := sq.Update("job"). Set("monitoring_status", schema.MonitoringStatusArchivingFailed). @@ -734,7 +741,6 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error { } func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64) ([]*schema.Job, error) { - var query sq.SelectBuilder if startTimeBegin == startTimeEnd || startTimeBegin > startTimeEnd { diff --git a/internal/repository/job_test.go b/internal/repository/job_test.go index 986365c..7193ca6 100644 --- a/internal/repository/job_test.go +++ b/internal/repository/job_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/migration.go b/internal/repository/migration.go index 0f37d0a..0259c61 100644 --- a/internal/repository/migration.go +++ b/internal/repository/migration.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( "github.com/golang-migrate/migrate/v4/source/iofs" ) -const Version uint = 6 +const Version uint = 7 //go:embed migrations/* var migrationFiles embed.FS @@ -57,7 +57,7 @@ func checkDBVersion(backend string, db *sql.DB) error { log.Fatalf("unsupported database backend: %s", backend) } - v, _, err := m.Version() + v, dirty, err := m.Version() if err != nil { if err == migrate.ErrNilVersion { log.Warn("Legacy database without version or missing database file!") @@ -68,18 +68,18 @@ func checkDBVersion(backend string, db *sql.DB) error { if v < Version { return fmt.Errorf("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version) + } else if v > Version { + return fmt.Errorf("unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool", v, Version) } - if v > Version { - return fmt.Errorf("unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool", v, Version) + if dirty { + return fmt.Errorf("last migration to version %d has failed, please fix the db manually and force version with -force-db flag", Version) } return nil } -func MigrateDB(backend string, db string) error { - var m *migrate.Migrate - +func getMigrateInstance(backend string, db string) (m *migrate.Migrate, err error) { switch backend { case "sqlite3": d, err := iofs.New(migrationFiles, "migrations/sqlite3") @@ -89,22 +89,31 @@ func MigrateDB(backend string, db string) error { m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db)) if err != nil { - return err + return m, err } case "mysql": d, err := iofs.New(migrationFiles, "migrations/mysql") if err != nil { - return err + return m, err } m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db)) if err != nil { - return err + return m, err } default: log.Fatalf("unsupported database backend: %s", backend) } + return m, nil +} + +func MigrateDB(backend string, db string) error { + m, err := getMigrateInstance(backend, db) + if err != nil { + return err + } + if err := m.Up(); err != nil { if err == migrate.ErrNoChange { log.Info("DB already up to date!") @@ -116,3 +125,35 @@ func MigrateDB(backend string, db string) error { m.Close() return nil } + +func RevertDB(backend string, db string) error { + m, err := getMigrateInstance(backend, db) + if err != nil { + return err + } + + if err := m.Migrate(Version - 1); err != nil { + if err == migrate.ErrNoChange { + log.Info("DB already up to date!") + } else { + return err + } + } + + m.Close() + return nil +} + +func ForceDB(backend string, db string) error { + m, err := getMigrateInstance(backend, db) + if err != nil { + return err + } + + if err := m.Force(int(Version)); err != nil { + return err + } + + m.Close() + return nil +} diff --git a/internal/repository/migrations/mysql/07_fix-tag-id.down.sql b/internal/repository/migrations/mysql/07_fix-tag-id.down.sql new file mode 100644 index 0000000..9f9959a --- /dev/null +++ b/internal/repository/migrations/mysql/07_fix-tag-id.down.sql @@ -0,0 +1,3 @@ +SET FOREIGN_KEY_CHECKS = 0; +ALTER TABLE tag MODIFY id INTEGER; +SET FOREIGN_KEY_CHECKS = 1; diff --git a/internal/repository/migrations/mysql/07_fix-tag-id.up.sql b/internal/repository/migrations/mysql/07_fix-tag-id.up.sql new file mode 100644 index 0000000..1abc4b3 --- /dev/null +++ b/internal/repository/migrations/mysql/07_fix-tag-id.up.sql @@ -0,0 +1,3 @@ +SET FOREIGN_KEY_CHECKS = 0; +ALTER TABLE tag MODIFY id INTEGER AUTO_INCREMENT; +SET FOREIGN_KEY_CHECKS = 1; diff --git a/internal/repository/migrations/sqlite3/07_fix-tag-id.down.sql b/internal/repository/migrations/sqlite3/07_fix-tag-id.down.sql new file mode 100644 index 0000000..e69de29 diff --git a/internal/repository/migrations/sqlite3/07_fix-tag-id.up.sql b/internal/repository/migrations/sqlite3/07_fix-tag-id.up.sql new file mode 100644 index 0000000..e69de29 diff --git a/internal/repository/query.go b/internal/repository/query.go index 84b8048..eec51a2 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -96,7 +96,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde user := GetUserFromContext(ctx) if user == nil { var qnil sq.SelectBuilder - return qnil, fmt.Errorf("user context is nil!") + return qnil, fmt.Errorf("user context is nil") } else if user.HasAnyRole([]schema.Role{schema.RoleAdmin, schema.RoleSupport, schema.RoleApi}) { // Admin & Co. : All jobs return query, nil } else if user.HasRole(schema.RoleManager) { // Manager : Add filter for managed projects' jobs only + personal jobs diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 48b692f..16d94d2 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/stats.go b/internal/repository/stats.go index 8084553..2e226ee 100644 --- a/internal/repository/stats.go +++ b/internal/repository/stats.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -8,11 +8,15 @@ import ( "context" "database/sql" "fmt" + "math" "time" "github.com/ClusterCockpit/cc-backend/internal/config" "github.com/ClusterCockpit/cc-backend/internal/graph/model" + "github.com/ClusterCockpit/cc-backend/internal/metricdata" + "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" sq "github.com/Masterminds/squirrel" ) @@ -450,6 +454,39 @@ func (r *JobRepository) AddHistograms( return stat, nil } +// Requires thresholds for metric from config for cluster? Of all clusters and use largest? split to 10 + 1 for artifacts? +func (r *JobRepository) AddMetricHistograms( + ctx context.Context, + filter []*model.JobFilter, + metrics []string, + stat *model.JobsStatistics) (*model.JobsStatistics, error) { + start := time.Now() + + // Running Jobs Only: First query jobdata from sqlite, then query data and make bins + for _, f := range filter { + if f.State != nil { + if len(f.State) == 1 && f.State[0] == "running" { + stat.HistMetrics = r.runningJobsMetricStatisticsHistogram(ctx, metrics, filter) + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil + } + } + } + + // All other cases: Query and make bins in sqlite directly + for _, m := range metrics { + metricHisto, err := r.jobsMetricStatisticsHistogram(ctx, m, filter) + if err != nil { + log.Warnf("Error while loading job metric statistics histogram: %s", m) + continue + } + stat.HistMetrics = append(stat.HistMetrics, metricHisto) + } + + log.Debugf("Timer AddMetricHistograms %s", time.Since(start)) + return stat, nil +} + // `value` must be the column grouped by, but renamed to "value" func (r *JobRepository) jobsStatisticsHistogram( ctx context.Context, @@ -487,3 +524,231 @@ func (r *JobRepository) jobsStatisticsHistogram( log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) return points, nil } + +func (r *JobRepository) jobsMetricStatisticsHistogram( + ctx context.Context, + metric string, + filters []*model.JobFilter) (*model.MetricHistoPoints, error) { + + var dbMetric string + switch metric { + case "cpu_load": + dbMetric = "load_avg" + case "flops_any": + dbMetric = "flops_any_avg" + case "mem_bw": + dbMetric = "mem_bw_avg" + case "mem_used": + dbMetric = "mem_used_max" + case "net_bw": + dbMetric = "net_bw_avg" + case "file_bw": + dbMetric = "file_bw_avg" + default: + return nil, fmt.Errorf("%s not implemented", metric) + } + + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // log.Debugf("Metric %s: DB %s, Peak %f, Unit %s", metric, dbMetric, peak, unit) + // Make bins, see https://jereze.com/code/sql-histogram/ + + start := time.Now() + + crossJoinQuery := sq.Select( + fmt.Sprintf(`max(%s) as max`, dbMetric), + fmt.Sprintf(`min(%s) as min`, dbMetric), + ).From("job").Where( + fmt.Sprintf(`%s is not null`, dbMetric), + ).Where( + fmt.Sprintf(`%s <= %f`, dbMetric, peak), + ) + + crossJoinQuery, cjqerr := SecurityCheck(ctx, crossJoinQuery) + + if cjqerr != nil { + return nil, cjqerr + } + + for _, f := range filters { + crossJoinQuery = BuildWhereClause(f, crossJoinQuery) + } + + crossJoinQuerySql, crossJoinQueryArgs, sqlerr := crossJoinQuery.ToSql() + if sqlerr != nil { + return nil, sqlerr + } + + bins := 10 + binQuery := fmt.Sprintf(`CAST( (case when job.%s = value.max then value.max*0.999999999 else job.%s end - value.min) / (value.max - value.min) * %d as INTEGER )`, dbMetric, dbMetric, bins) + + mainQuery := sq.Select( + fmt.Sprintf(`%s + 1 as bin`, binQuery), + fmt.Sprintf(`count(job.%s) as count`, dbMetric), + fmt.Sprintf(`CAST(((value.max / %d) * (%s )) as INTEGER ) as min`, bins, binQuery), + fmt.Sprintf(`CAST(((value.max / %d) * (%s + 1 )) as INTEGER ) as max`, bins, binQuery), + ).From("job").CrossJoin( + fmt.Sprintf(`(%s) as value`, crossJoinQuerySql), crossJoinQueryArgs..., + ).Where(fmt.Sprintf(`job.%s is not null and job.%s <= %f`, dbMetric, dbMetric, peak)) + + mainQuery, qerr := SecurityCheck(ctx, mainQuery) + + if qerr != nil { + return nil, qerr + } + + for _, f := range filters { + mainQuery = BuildWhereClause(f, mainQuery) + } + + // Finalize query with Grouping and Ordering + mainQuery = mainQuery.GroupBy("bin").OrderBy("bin") + + rows, err := mainQuery.RunWith(r.DB).Query() + if err != nil { + log.Errorf("Error while running mainQuery: %s", err) + return nil, err + } + + points := make([]*model.MetricHistoPoint, 0) + for rows.Next() { + point := model.MetricHistoPoint{} + if err := rows.Scan(&point.Bin, &point.Count, &point.Min, &point.Max); err != nil { + log.Warnf("Error while scanning rows for %s", metric) + return nil, err // Totally bricks cc-backend if returned and if all metrics requested? + } + + points = append(points, &point) + } + + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + + log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start)) + return &result, nil +} + +func (r *JobRepository) runningJobsMetricStatisticsHistogram( + ctx context.Context, + metrics []string, + filters []*model.JobFilter) []*model.MetricHistoPoints { + + // Get Jobs + jobs, err := r.QueryJobs(ctx, filters, &model.PageRequest{Page: 1, ItemsPerPage: 500 + 1}, nil) + if err != nil { + log.Errorf("Error while querying jobs for footprint: %s", err) + return nil + } + if len(jobs) > 500 { + log.Errorf("too many jobs matched (max: %d)", 500) + return nil + } + + // Get AVGs from metric repo + avgs := make([][]schema.Float, len(metrics)) + for i := range avgs { + avgs[i] = make([]schema.Float, 0, len(jobs)) + } + + for _, job := range jobs { + if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed { + continue + } + + if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil { + log.Errorf("Error while loading averages for histogram: %s", err) + return nil + } + } + + // Iterate metrics to fill endresult + data := make([]*model.MetricHistoPoints, 0) + for idx, metric := range metrics { + // Get specific Peak or largest Peak + var metricConfig *schema.MetricConfig + var peak float64 = 0.0 + var unit string = "" + + for _, f := range filters { + if f.Cluster != nil { + metricConfig = archive.GetMetricConfig(*f.Cluster.Eq, metric) + peak = metricConfig.Peak + unit = metricConfig.Unit.Prefix + metricConfig.Unit.Base + log.Debugf("Cluster %s filter found with peak %f for %s", *f.Cluster.Eq, peak, metric) + } + } + + if peak == 0.0 { + for _, c := range archive.Clusters { + for _, m := range c.MetricConfig { + if m.Name == metric { + if m.Peak > peak { + peak = m.Peak + } + if unit == "" { + unit = m.Unit.Prefix + m.Unit.Base + } + } + } + } + } + + // Make and fill bins + bins := 10.0 + peakBin := peak / bins + + points := make([]*model.MetricHistoPoint, 0) + for b := 0; b < 10; b++ { + count := 0 + bindex := b + 1 + bmin := math.Round(peakBin * float64(b)) + bmax := math.Round(peakBin * (float64(b) + 1.0)) + + // Iterate AVG values for indexed metric and count for bins + for _, val := range avgs[idx] { + if float64(val) >= bmin && float64(val) < bmax { + count += 1 + } + } + + bminint := int(bmin) + bmaxint := int(bmax) + + // Append Bin to Metric Result Array + point := model.MetricHistoPoint{Bin: &bindex, Count: count, Min: &bminint, Max: &bmaxint} + points = append(points, &point) + } + + // Append Metric Result Array to final results array + result := model.MetricHistoPoints{Metric: metric, Unit: unit, Data: points} + data = append(data, &result) + } + + return data +} diff --git a/internal/repository/stats_test.go b/internal/repository/stats_test.go index 6ed3f72..2cc377c 100644 --- a/internal/repository/stats_test.go +++ b/internal/repository/stats_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/tags.go b/internal/repository/tags.go index a6a41b6..a00cf35 100644 --- a/internal/repository/tags.go +++ b/internal/repository/tags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -15,8 +15,11 @@ import ( // Add the tag with id `tagId` to the job with the database id `jobId`. func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) { - if _, err := r.stmtCache.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES ($1, $2)`, job, tag); err != nil { - log.Error("Error while running query") + q := sq.Insert("jobtag").Columns("job_id", "tag_id").Values(job, tag) + + if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := q.ToSql() + log.Errorf("Error adding tag with %s: %v", s, err) return nil, err } @@ -37,8 +40,11 @@ func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) { // Removes a tag from a job func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) { - if _, err := r.stmtCache.Exec("DELETE FROM jobtag WHERE jobtag.job_id = $1 AND jobtag.tag_id = $2", job, tag); err != nil { - log.Error("Error while running query") + q := sq.Delete("jobtag").Where("jobtag.job_id = ?", job).Where("jobtag.tag_id = ?", tag) + + if _, err := q.RunWith(r.stmtCache).Exec(); err != nil { + s, _, _ := q.ToSql() + log.Errorf("Error adding tag with %s: %v", s, err) return nil, err } @@ -59,8 +65,12 @@ func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) { // CreateTag creates a new tag with the specified type and name and returns its database id. func (r *JobRepository) CreateTag(tagType string, tagName string) (tagId int64, err error) { - res, err := r.stmtCache.Exec("INSERT INTO tag (tag_type, tag_name) VALUES ($1, $2)", tagType, tagName) + q := sq.Insert("tag").Columns("tag_type", "tag_name").Values(tagType, tagName) + + res, err := q.RunWith(r.stmtCache).Exec() if err != nil { + s, _, _ := q.ToSql() + log.Errorf("Error inserting tag with %s: %v", s, err) return 0, err } @@ -167,7 +177,8 @@ func (r *JobRepository) GetTags(job *int64) ([]*schema.Tag, error) { rows, err := q.RunWith(r.stmtCache).Query() if err != nil { - log.Error("Error while running query") + s, _, _ := q.ToSql() + log.Errorf("Error get tags with %s: %v", s, err) return nil, err } diff --git a/internal/repository/testdata/job.db b/internal/repository/testdata/job.db index 4685f7f..a70e062 100644 Binary files a/internal/repository/testdata/job.db and b/internal/repository/testdata/job.db differ diff --git a/internal/repository/transaction.go b/internal/repository/transaction.go index 4d003d7..9398354 100644 --- a/internal/repository/transaction.go +++ b/internal/repository/transaction.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/user.go b/internal/repository/user.go index b951740..3b7d945 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/userConfig.go b/internal/repository/userConfig.go index fb8c3f5..e891226 100644 --- a/internal/repository/userConfig.go +++ b/internal/repository/userConfig.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/repository/userConfig_test.go b/internal/repository/userConfig_test.go index 3526919..c01bb5c 100644 --- a/internal/repository/userConfig_test.go +++ b/internal/repository/userConfig_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index 322cbf3..fe374ac 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/runtimeEnv/setup.go b/internal/runtimeEnv/setup.go index 5407a0e..4cacb18 100644 --- a/internal/runtimeEnv/setup.go +++ b/internal/runtimeEnv/setup.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/array.go b/internal/util/array.go index bc7ed04..19bdb53 100644 --- a/internal/util/array.go +++ b/internal/util/array.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/compress.go b/internal/util/compress.go index 0930f7e..4a901ae 100644 --- a/internal/util/compress.go +++ b/internal/util/compress.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/copy.go b/internal/util/copy.go index 3527e1e..c6896c4 100644 --- a/internal/util/copy.go +++ b/internal/util/copy.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/diskUsage.go b/internal/util/diskUsage.go index 8c70201..53665c5 100644 --- a/internal/util/diskUsage.go +++ b/internal/util/diskUsage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/fstat.go b/internal/util/fstat.go index 0c84014..54e1154 100644 --- a/internal/util/fstat.go +++ b/internal/util/fstat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/statistics.go b/internal/util/statistics.go index ca84dac..384de58 100644 --- a/internal/util/statistics.go +++ b/internal/util/statistics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/internal/util/util_test.go b/internal/util/util_test.go index dff0a25..d945c96 100644 --- a/internal/util/util_test.go +++ b/internal/util/util_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index beeb24d..4a05194 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -52,9 +52,11 @@ type JobContainer struct { Data *schema.JobData } -var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) -var ar ArchiveBackend -var useArchive bool +var ( + cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024) + ar ArchiveBackend + useArchive bool +) func Init(rawConfig json.RawMessage, disableArchive bool) error { useArchive = !disableArchive @@ -95,8 +97,8 @@ func GetHandle() ArchiveBackend { func LoadAveragesFromArchive( job *schema.Job, metrics []string, - data [][]schema.Float) error { - + data [][]schema.Float, +) error { metaFile, err := ar.LoadJobMeta(job) if err != nil { log.Warn("Error while loading job metadata from archiveBackend") @@ -115,7 +117,6 @@ func LoadAveragesFromArchive( } func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { - metaFile, err := ar.LoadJobMeta(job) if err != nil { log.Warn("Error while loading job metadata from archiveBackend") @@ -125,10 +126,29 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) { return metaFile.Statistics, nil } +// If the job is archived, find its `meta.json` file and override the Metadata +// in that JSON file. If the job is not archived, nothing is done. +func UpdateMetadata(job *schema.Job, metadata map[string]string) error { + if job.State == schema.JobStateRunning || !useArchive { + return nil + } + + jobMeta, err := ar.LoadJobMeta(job) + if err != nil { + log.Warn("Error while loading job metadata from archiveBackend") + return err + } + + for k, v := range metadata { + jobMeta.MetaData[k] = v + } + + return ar.StoreJobMeta(jobMeta) +} + // If the job is archived, find its `meta.json` file and override the tags list // in that JSON file. If the job is not archived, nothing is done. func UpdateTags(job *schema.Job, tags []*schema.Tag) error { - if job.State == schema.JobStateRunning || !useArchive { return nil } diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 5523a21..ac00ea1 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/clusterConfig.go b/pkg/archive/clusterConfig.go index 0b1c43b..d0bf397 100644 --- a/pkg/archive/clusterConfig.go +++ b/pkg/archive/clusterConfig.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "github.com/ClusterCockpit/cc-backend/pkg/schema" "github.com/ClusterCockpit/cc-backend/pkg/log" + "github.com/ClusterCockpit/cc-backend/pkg/schema" ) var Clusters []*schema.Cluster diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index cb8ed69..8a43748 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/fsBackend_test.go b/pkg/archive/fsBackend_test.go index c5d869d..5e0a06c 100644 --- a/pkg/archive/fsBackend_test.go +++ b/pkg/archive/fsBackend_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/json.go b/pkg/archive/json.go index d4409a6..ff2c6d9 100644 --- a/pkg/archive/json.go +++ b/pkg/archive/json.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/nodelist.go b/pkg/archive/nodelist.go index 35a4265..7700185 100644 --- a/pkg/archive/nodelist.go +++ b/pkg/archive/nodelist.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/nodelist_test.go b/pkg/archive/nodelist_test.go index 792944a..52aa812 100644 --- a/pkg/archive/nodelist_test.go +++ b/pkg/archive/nodelist_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/archive/s3Backend.go b/pkg/archive/s3Backend.go index e204074..d8b06e7 100644 --- a/pkg/archive/s3Backend.go +++ b/pkg/archive/s3Backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/log/log.go b/pkg/log/log.go index 7e89753..a40c656 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/lrucache/cache.go b/pkg/lrucache/cache.go index 874028e..220c53b 100644 --- a/pkg/lrucache/cache.go +++ b/pkg/lrucache/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/lrucache/cache_test.go b/pkg/lrucache/cache_test.go index 7ba5504..8bff40e 100644 --- a/pkg/lrucache/cache_test.go +++ b/pkg/lrucache/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/lrucache/handler.go b/pkg/lrucache/handler.go index db6687f..90b7527 100644 --- a/pkg/lrucache/handler.go +++ b/pkg/lrucache/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/lrucache/handler_test.go b/pkg/lrucache/handler_test.go index 4013c63..d908339 100644 --- a/pkg/lrucache/handler_test.go +++ b/pkg/lrucache/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/cluster.go b/pkg/schema/cluster.go index bc7a86a..e4ca658 100644 --- a/pkg/schema/cluster.go +++ b/pkg/schema/cluster.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/config.go b/pkg/schema/config.go index 5f43fb7..28fa53a 100644 --- a/pkg/schema/config.go +++ b/pkg/schema/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -23,6 +23,11 @@ type LdapConfig struct { SyncUserOnLogin bool `json:"syncUserOnLogin"` } +type OpenIDConfig struct { + Provider string `json:"provider"` + SyncUserOnLogin bool `json:"syncUserOnLogin"` +} + type JWTAuthConfig struct { // Specifies for how long a JWT token shall be valid // as a string parsable by time.ParseDuration(). @@ -65,10 +70,10 @@ type ClusterConfig struct { } type Retention struct { - Age int `json:"age"` - IncludeDB bool `json:"includeDB"` Policy string `json:"policy"` Location string `json:"location"` + Age int `json:"age"` + IncludeDB bool `json:"includeDB"` } // Format of the configuration (file). See below for the defaults. @@ -109,8 +114,9 @@ type ProgramConfig struct { Validate bool `json:"validate"` // For LDAP Authentication and user synchronisation. - LdapConfig *LdapConfig `json:"ldap"` - JwtConfig *JWTAuthConfig `json:"jwts"` + LdapConfig *LdapConfig `json:"ldap"` + JwtConfig *JWTAuthConfig `json:"jwts"` + OpenIDConfig *OpenIDConfig `json:"oidc"` // If 0 or empty, the session does not expire! SessionMaxAge string `json:"session-max-age"` diff --git a/pkg/schema/float.go b/pkg/schema/float.go index 6733380..e7d9857 100644 --- a/pkg/schema/float.go +++ b/pkg/schema/float.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/job.go b/pkg/schema/job.go index ed3a8b6..ad3e6dc 100644 --- a/pkg/schema/job.go +++ b/pkg/schema/job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -54,10 +54,10 @@ type Job struct { BaseJob StartTimeUnix int64 `json:"-" db:"start_time" example:"1649723812"` // Start epoch time stamp in seconds StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type - MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64 - FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64 - MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64 - LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64 + MemUsedMax float64 `json:"memUsedMax" db:"mem_used_max"` // MemUsedMax as Float64 + FlopsAnyAvg float64 `json:"flopsAnyAvg" db:"flops_any_avg"` // FlopsAnyAvg as Float64 + MemBwAvg float64 `json:"memBwAvg" db:"mem_bw_avg"` // MemBwAvg as Float64 + LoadAvg float64 `json:"loadAvg" db:"load_avg"` // LoadAvg as Float64 NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64 NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64 FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64 diff --git a/pkg/schema/metrics.go b/pkg/schema/metrics.go index d9a13d3..e340747 100644 --- a/pkg/schema/metrics.go +++ b/pkg/schema/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/user.go b/pkg/schema/user.go index 047f617..7b1ca13 100644 --- a/pkg/schema/user.go +++ b/pkg/schema/user.go @@ -1,4 +1,4 @@ -// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -27,6 +27,7 @@ const ( AuthViaLocalPassword AuthSource = iota AuthViaLDAP AuthViaToken + AuthViaOIDC AuthViaAll ) diff --git a/pkg/schema/user_test.go b/pkg/schema/user_test.go index cd054c3..ce3ab3b 100644 --- a/pkg/schema/user_test.go +++ b/pkg/schema/user_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/validate.go b/pkg/schema/validate.go index 77b6dbb..d14adf5 100644 --- a/pkg/schema/validate.go +++ b/pkg/schema/validate.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/pkg/schema/validate_test.go b/pkg/schema/validate_test.go index c3e918f..2dc97c1 100644 --- a/pkg/schema/validate_test.go +++ b/pkg/schema/validate_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-manager/main.go b/tools/archive-manager/main.go index 988bb78..1a80712 100644 --- a/tools/archive-manager/main.go +++ b/tools/archive-manager/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/cluster.go b/tools/archive-migration/cluster.go index 8944816..f9a45ad 100644 --- a/tools/archive-migration/cluster.go +++ b/tools/archive-migration/cluster.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/clusterConfig.go b/tools/archive-migration/clusterConfig.go index 0b6df02..0f9f426 100644 --- a/tools/archive-migration/clusterConfig.go +++ b/tools/archive-migration/clusterConfig.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/float.go b/tools/archive-migration/float.go index af322db..3fbccf8 100644 --- a/tools/archive-migration/float.go +++ b/tools/archive-migration/float.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/fsBackend.go b/tools/archive-migration/fsBackend.go index a8f21b9..81cf57e 100644 --- a/tools/archive-migration/fsBackend.go +++ b/tools/archive-migration/fsBackend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/job.go b/tools/archive-migration/job.go index cd54d6c..8705ce9 100644 --- a/tools/archive-migration/job.go +++ b/tools/archive-migration/job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/json.go b/tools/archive-migration/json.go index 174e725..b2c281c 100644 --- a/tools/archive-migration/json.go +++ b/tools/archive-migration/json.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/main.go b/tools/archive-migration/main.go index 93cdad5..b78e94e 100644 --- a/tools/archive-migration/main.go +++ b/tools/archive-migration/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/archive-migration/metrics.go b/tools/archive-migration/metrics.go index e87332f..ec5de6f 100644 --- a/tools/archive-migration/metrics.go +++ b/tools/archive-migration/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. diff --git a/tools/gen-keypair/main.go b/tools/gen-keypair/main.go index f8c66fe..ff9c5c3 100644 --- a/tools/gen-keypair/main.go +++ b/tools/gen-keypair/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -20,7 +20,8 @@ func main() { os.Exit(1) } - fmt.Fprintf(os.Stdout, "JWT_PUBLIC_KEY=%#v\nJWT_PRIVATE_KEY=%#v\n", + fmt.Fprintf(os.Stdout, "ED25519 PUBLIC_KEY=%#v\nED25519 PRIVATE_KEY=%#v\n", base64.StdEncoding.EncodeToString(pub), base64.StdEncoding.EncodeToString(priv)) + fmt.Println("This is NO JWT token. You can generate JWT tokens with cc-backend. Use this keypair for signing and validation of JWT tokens in ClusterCockpit.") } diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 1ca3349..7975773 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -9,24 +9,26 @@ "version": "1.0.0", "license": "MIT", "dependencies": { - "@rollup/plugin-replace": "^5.0.2", - "@urql/svelte": "^4.0.1", - "chart.js": "^4.3.3", - "graphql": "^16.6.0", - "svelte-chartjs": "^3.1.2", - "sveltestrap": "^5.11.1", - "uplot": "^1.6.24", - "wonka": "^6.3.2" + "@rollup/plugin-replace": "^5.0.5", + "@sveltestrap/sveltestrap": "^6.2.6", + "@urql/svelte": "^4.1.0", + "chart.js": "^4.4.2", + "date-fns": "^2.30.0", + "graphql": "^16.8.1", + "mathjs": "^12.4.0", + "svelte-chartjs": "^3.1.5", + "uplot": "^1.6.30", + "wonka": "^6.3.4" }, "devDependencies": { - "@rollup/plugin-commonjs": "^24.1.0", - "@rollup/plugin-node-resolve": "^15.0.2", - "@rollup/plugin-terser": "^0.4.1", - "@timohausmann/quadtree-js": "^1.2.5", - "rollup": "^3.21.0", - "rollup-plugin-css-only": "^4.3.0", - "rollup-plugin-svelte": "^7.1.4", - "svelte": "^3.58.0" + "@rollup/plugin-commonjs": "^25.0.7", + "@rollup/plugin-node-resolve": "^15.2.3", + "@rollup/plugin-terser": "^0.4.4", + "@timohausmann/quadtree-js": "^1.2.6", + "rollup": "^4.12.1", + "rollup-plugin-css-only": "^4.5.2", + "rollup-plugin-svelte": "^7.1.6", + "svelte": "^4.2.12" } }, "node_modules/@0no-co/graphql.web": { @@ -42,34 +44,54 @@ } } }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", - "dev": true, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.0.tgz", + "integrity": "sha512-Chk32uHMg6TnQdvw2e9IlqPpFX/6NLuK0Ys2PqLb7/gL5uFn9mXvK715FGLlOLQrcO4qIkNHkvPGktzzXexsFw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", - "dev": true, + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", - "dev": true, + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "engines": { "node": ">=6.0.0" } @@ -90,10 +112,9 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", - "dev": true, + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -114,9 +135,9 @@ } }, "node_modules/@rollup/plugin-commonjs": { - "version": "24.1.0", - "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-24.1.0.tgz", - "integrity": "sha512-eSL45hjhCWI0jCCXcNtLVqM5N1JlBGvlFfY0m6oOYnLCJ6N0qEXoZql4sY2MOUArzhH4SA/qBpTxvvZp2Sc+DQ==", + "version": "25.0.7", + "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-25.0.7.tgz", + "integrity": "sha512-nEvcR+LRjEjsaSsc4x3XZfCCvZIaSMenZu/OiwOKGN2UhQpAYI7ru7czFvyWbErlpoGjnSX3D5Ch5FcMA3kRWQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", @@ -124,13 +145,13 @@ "estree-walker": "^2.0.2", "glob": "^8.0.3", "is-reference": "1.2.1", - "magic-string": "^0.27.0" + "magic-string": "^0.30.3" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^2.68.0||^3.0.0" + "rollup": "^2.68.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -139,9 +160,9 @@ } }, "node_modules/@rollup/plugin-node-resolve": { - "version": "15.2.1", - "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.1.tgz", - "integrity": "sha512-nsbUg588+GDSu8/NS8T4UAshO6xeaOfINNuXeVHcKV02LJtoRaM1SiOacClw4kws1SFiNhdLGxlbMY9ga/zs/w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", + "integrity": "sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", @@ -155,7 +176,7 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^2.78.0||^3.0.0" + "rollup": "^2.78.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -164,18 +185,18 @@ } }, "node_modules/@rollup/plugin-replace": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.2.tgz", - "integrity": "sha512-M9YXNekv/C/iHHK+cvORzfRYfPbq0RDD8r0G+bMiTXjNGKulPnCT9O3Ss46WfhI6ZOCgApOP7xAdmCQJ+U2LAA==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.5.tgz", + "integrity": "sha512-rYO4fOi8lMaTg/z5Jb+hKnrHHVn8j2lwkqwyS4kTRhKyWOLf2wST2sWXr4WzWiTcoHTp2sTjqUbqIj2E39slKQ==", "dependencies": { "@rollup/pluginutils": "^5.0.1", - "magic-string": "^0.27.0" + "magic-string": "^0.30.3" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -184,9 +205,9 @@ } }, "node_modules/@rollup/plugin-terser": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.3.tgz", - "integrity": "sha512-EF0oejTMtkyhrkwCdg0HJ0IpkcaVg1MMSf2olHb2Jp+1mnLM04OhjpJWGma4HobiDTF0WCyViWuvadyE9ch2XA==", + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@rollup/plugin-terser/-/plugin-terser-0.4.4.tgz", + "integrity": "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A==", "dev": true, "dependencies": { "serialize-javascript": "^6.0.1", @@ -197,7 +218,7 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^2.x || ^3.x" + "rollup": "^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -206,9 +227,9 @@ } }, "node_modules/@rollup/pluginutils": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.3.tgz", - "integrity": "sha512-hfllNN4a80rwNQ9QCxhxuHCGHMAvabXqxNdaChUSSadMre7t4iEUI6fFAhBOn/eIYTgYVhBv7vCLsAJ4u3lf3g==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.0.tgz", + "integrity": "sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==", "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", @@ -218,7 +239,7 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -226,16 +247,196 @@ } } }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.1.tgz", + "integrity": "sha512-iU2Sya8hNn1LhsYyf0N+L4Gf9Qc+9eBTJJJsaOGUp+7x4n2M9dxTt8UvhJl3oeftSjblSlpCfvjA/IfP3g5VjQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.1.tgz", + "integrity": "sha512-wlzcWiH2Ir7rdMELxFE5vuM7D6TsOcJ2Yw0c3vaBR3VOsJFVTx9xvwnAvhgU5Ii8Gd6+I11qNHwndDscIm0HXg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.1.tgz", + "integrity": "sha512-YRXa1+aZIFN5BaImK+84B3uNK8C6+ynKLPgvn29X9s0LTVCByp54TB7tdSMHDR7GTV39bz1lOmlLDuedgTwwHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.1.tgz", + "integrity": "sha512-opjWJ4MevxeA8FhlngQWPBOvVWYNPFkq6/25rGgG+KOy0r8clYwL1CFd+PGwRqqMFVQ4/Qd3sQu5t7ucP7C/Uw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.1.tgz", + "integrity": "sha512-uBkwaI+gBUlIe+EfbNnY5xNyXuhZbDSx2nzzW8tRMjUmpScd6lCQYKY2V9BATHtv5Ef2OBq6SChEP8h+/cxifQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.1.tgz", + "integrity": "sha512-0bK9aG1kIg0Su7OcFTlexkVeNZ5IzEsnz1ept87a0TUgZ6HplSgkJAnFpEVRW7GRcikT4GlPV0pbtVedOaXHQQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.1.tgz", + "integrity": "sha512-qB6AFRXuP8bdkBI4D7UPUbE7OQf7u5OL+R94JE42Z2Qjmyj74FtDdLGeriRyBDhm4rQSvqAGCGC01b8Fu2LthQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.1.tgz", + "integrity": "sha512-sHig3LaGlpNgDj5o8uPEoGs98RII8HpNIqFtAI8/pYABO8i0nb1QzT0JDoXF/pxzqO+FkxvwkHZo9k0NJYDedg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.1.tgz", + "integrity": "sha512-nD3YcUv6jBJbBNFvSbp0IV66+ba/1teuBcu+fBBPZ33sidxitc6ErhON3JNavaH8HlswhWMC3s5rgZpM4MtPqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.1.tgz", + "integrity": "sha512-7/XVZqgBby2qp/cO0TQ8uJK+9xnSdJ9ct6gSDdEr4MfABrjTyrW6Bau7HQ73a2a5tPB7hno49A0y1jhWGDN9OQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.1.tgz", + "integrity": "sha512-CYc64bnICG42UPL7TrhIwsJW4QcKkIt9gGlj21gq3VV0LL6XNb1yAdHVp1pIi9gkts9gGcT3OfUYHjGP7ETAiw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.1.tgz", + "integrity": "sha512-LN+vnlZ9g0qlHGlS920GR4zFCqAwbv2lULrR29yGaWP9u7wF5L7GqWu9Ah6/kFZPXPUkpdZwd//TNR+9XC9hvA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.1.tgz", + "integrity": "sha512-n+vkrSyphvmU0qkQ6QBNXCGr2mKjhP08mPRM/Xp5Ck2FV4NrHU+y6axzDeixUrCBHVUS51TZhjqrKBBsHLKb2Q==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@sveltestrap/sveltestrap": { + "version": "6.2.6", + "resolved": "https://registry.npmjs.org/@sveltestrap/sveltestrap/-/sveltestrap-6.2.6.tgz", + "integrity": "sha512-iB50tbVzsFXp0M10pe3XywRkNxjKPIHXJzV44mb1FhajWNWwxme8MkBis9m2QNivM2hyw5zDHjgGuzwTOB76JQ==", + "dependencies": { + "@popperjs/core": "^2.11.8" + }, + "peerDependencies": { + "svelte": "^4.0.0 || ^5.0.0 || ^5.0.0-next.0" + } + }, "node_modules/@timohausmann/quadtree-js": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.5.tgz", - "integrity": "sha512-WcH3pouYtpyLjTCRvNP0WuSV4m7mRyYhLzW44egveFryT7pJhpDsdIJASEe37iCFNA0vmEpqTYGoG0siyXEthA==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.6.tgz", + "integrity": "sha512-EoAoLMFV2JfSG8+8XD9xWJQdyvfEB5xNpiQWGD7rTDSbDQQV8IVpkm0uOIxwJZ+1uC9hHKri9GmJ5wBSUO4jfg==", "dev": true }, "node_modules/@types/estree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", - "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" }, "node_modules/@types/resolve": { "version": "1.20.2", @@ -244,20 +445,20 @@ "dev": true }, "node_modules/@urql/core": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@urql/core/-/core-4.1.1.tgz", - "integrity": "sha512-iIoAy6BY+BUZZ7KIpnMT7C9q+ULf5ZCVxGe3/i7WZSJBrQa2h1QkIMhL+8fAKmOn9gt83jSIv5drWWnhZ9izEA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@urql/core/-/core-4.3.0.tgz", + "integrity": "sha512-wT+FeL8DG4x5o6RfHEnONNFVDM3616ouzATMYUClB6CB+iIu2mwfBKd7xSUxYOZmwtxna5/hDRQdMl3nbQZlnw==", "dependencies": { "@0no-co/graphql.web": "^1.0.1", "wonka": "^6.3.2" } }, "node_modules/@urql/svelte": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.0.4.tgz", - "integrity": "sha512-HYz9dHdqEcs9d82WWczQ3XG+zuup3TS01H+txaij/QfQ+KHjrlrn0EkOHQQd1S+H8+nFjFU2x9+HE3+3fuwL1A==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@urql/svelte/-/svelte-4.1.0.tgz", + "integrity": "sha512-Ov3EclCjaXPPTjKNTcIDlAG3qY/jhLjl/J9yyz9FeLUQ9S2jEgsvlzNXibrY27f4ihD4gH36CNGuj1XOi5hEEQ==", "dependencies": { - "@urql/core": "^4.1.0", + "@urql/core": "^4.3.0", "wonka": "^6.3.2" }, "peerDependencies": { @@ -265,10 +466,9 @@ } }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", - "dev": true, + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "bin": { "acorn": "bin/acorn" }, @@ -276,6 +476,22 @@ "node": ">=0.4.0" } }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/axobject-query": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.0.0.tgz", + "integrity": "sha512-+60uv1hiVFhHZeO+Lz0RYzsVHy5Wr1ayX0mwda9KPDVLNJgZ1T9Ny7VmFbLDzxsH0D87I86vgj3gFrjTJUYznw==", + "dependencies": { + "dequal": "^2.0.3" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -310,14 +526,34 @@ } }, "node_modules/chart.js": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.3.3.tgz", - "integrity": "sha512-aTk7pBw+x6sQYhon/NR3ikfUJuym/LdgpTlgZRe2PaEhjUMKBKyNaFCMVRAyTEWYFNO7qRu7iQVqOw/OqzxZxQ==", + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.4.2.tgz", + "integrity": "sha512-6GD7iKwFpP5kbSD4MeRRRlTnQvxfQREy36uEtm1hzHzcOqwWx0YEHuspuoNlslu+nciLIB7fjjsHkUv/FzFcOg==", "dependencies": { "@kurkle/color": "^0.3.0" }, "engines": { - "pnpm": ">=7" + "pnpm": ">=8" + } + }, + "node_modules/code-red": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/code-red/-/code-red-1.0.4.tgz", + "integrity": "sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15", + "@types/estree": "^1.0.1", + "acorn": "^8.10.0", + "estree-walker": "^3.0.3", + "periscopic": "^3.1.0" + } + }, + "node_modules/code-red/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" } }, "node_modules/commander": { @@ -332,6 +568,50 @@ "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", "dev": true }, + "node_modules/complex.js": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.1.1.tgz", + "integrity": "sha512-8njCHOTtFFLtegk6zQo0kkVX1rngygb/KQI6z1qZxlFI3scluC+LVTCFbrkWjBv4vvLlbQ9t88IPMC6k95VTTg==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://www.patreon.com/infusion" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/date-fns": { + "version": "2.30.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz", + "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==", + "dependencies": { + "@babel/runtime": "^7.21.0" + }, + "engines": { + "node": ">=0.11" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/date-fns" + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==" + }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", @@ -341,11 +621,36 @@ "node": ">=0.10.0" } }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-latex": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/escape-latex/-/escape-latex-1.2.0.tgz", + "integrity": "sha512-nV5aVWW1K0wEiUIEdZ4erkGGH8mDxGyxSeqPzRNtWP7ataw+/olFObw7hujFWlVjNsaDFw5VZ5NzVSIqRgfTiw==" + }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" }, + "node_modules/fraction.js": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.4.tgz", + "integrity": "sha512-pwiTgt0Q7t+GHZA4yaLjObx4vXmmdcS0iSJ19o8d/goUGgItX9UZWKWNnLHehxviD8wU2IWRsnR8cD5+yOJP2Q==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -367,10 +672,13 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/glob": { "version": "8.1.0", @@ -392,23 +700,23 @@ } }, "node_modules/graphql": { - "version": "16.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.0.tgz", - "integrity": "sha512-0oKGaR+y3qcS5mCu1vb7KG+a89vjn06C7Ihq/dDl3jA+A8B3TKomvi3CiEcVLJQGalbu8F52LxkOym7U5sSfbg==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/hasown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", + "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", "dev": true, "dependencies": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" }, "engines": { - "node": ">= 0.4.0" + "node": ">= 0.4" } }, "node_modules/inflight": { @@ -443,12 +751,12 @@ } }, "node_modules/is-core-module": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", - "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "dev": true, "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -469,17 +777,54 @@ "@types/estree": "*" } }, + "node_modules/javascript-natural-sort": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/javascript-natural-sort/-/javascript-natural-sort-0.7.1.tgz", + "integrity": "sha512-nO6jcEfZWQXDhOiBtG2KvKyEptz7RVbpGP4vTD2hLBdmNQSsCiicO2Ioinv6UI4y9ukqnBpy+XZ9H6uLNgJTlw==" + }, + "node_modules/locate-character": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", + "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==" + }, "node_modules/magic-string": { - "version": "0.27.0", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz", - "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==", + "version": "0.30.8", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.8.tgz", + "integrity": "sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ==", "dependencies": { - "@jridgewell/sourcemap-codec": "^1.4.13" + "@jridgewell/sourcemap-codec": "^1.4.15" }, "engines": { "node": ">=12" } }, + "node_modules/mathjs": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/mathjs/-/mathjs-12.4.0.tgz", + "integrity": "sha512-4Moy0RNjwMSajEkGGxNUyMMC/CZAcl87WBopvNsJWB4E4EFebpTedr+0/rhqmnOSTH3Wu/3WfiWiw6mqiaHxVw==", + "dependencies": { + "@babel/runtime": "^7.23.9", + "complex.js": "^2.1.1", + "decimal.js": "^10.4.3", + "escape-latex": "^1.2.0", + "fraction.js": "4.3.4", + "javascript-natural-sort": "^0.7.1", + "seedrandom": "^3.0.5", + "tiny-emitter": "^2.1.0", + "typed-function": "^4.1.1" + }, + "bin": { + "mathjs": "bin/cli.js" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + }, "node_modules/minimatch": { "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", @@ -507,6 +852,32 @@ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/periscopic/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/periscopic/node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", @@ -527,10 +898,15 @@ "safe-buffer": "^5.1.0" } }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, "node_modules/resolve": { - "version": "1.22.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.4.tgz", - "integrity": "sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dev": true, "dependencies": { "is-core-module": "^2.13.0", @@ -554,25 +930,41 @@ } }, "node_modules/rollup": { - "version": "3.28.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.28.1.tgz", - "integrity": "sha512-R9OMQmIHJm9znrU3m3cpE8uhN0fGdXiawME7aZIpQqvpS/85+Vt1Hq1/yVIcYfOmaQiHjvXkQAoJukvLpau6Yw==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.1.tgz", + "integrity": "sha512-ggqQKvx/PsB0FaWXhIvVkSWh7a/PCLQAsMjBc+nA2M8Rv2/HG0X6zvixAB7KyZBRtifBUhy5k8voQX/mRnABPg==", "devOptional": true, + "dependencies": { + "@types/estree": "1.0.5" + }, "bin": { "rollup": "dist/bin/rollup" }, "engines": { - "node": ">=14.18.0", + "node": ">=18.0.0", "npm": ">=8.0.0" }, "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.1", + "@rollup/rollup-android-arm64": "4.12.1", + "@rollup/rollup-darwin-arm64": "4.12.1", + "@rollup/rollup-darwin-x64": "4.12.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.1", + "@rollup/rollup-linux-arm64-gnu": "4.12.1", + "@rollup/rollup-linux-arm64-musl": "4.12.1", + "@rollup/rollup-linux-riscv64-gnu": "4.12.1", + "@rollup/rollup-linux-x64-gnu": "4.12.1", + "@rollup/rollup-linux-x64-musl": "4.12.1", + "@rollup/rollup-win32-arm64-msvc": "4.12.1", + "@rollup/rollup-win32-ia32-msvc": "4.12.1", + "@rollup/rollup-win32-x64-msvc": "4.12.1", "fsevents": "~2.3.2" } }, "node_modules/rollup-plugin-css-only": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.3.0.tgz", - "integrity": "sha512-BsiCqJJQzZh2lQiHY5irejRoJ3I1EUFHEi5PjVqsr+EmOh54YrWVwd3YZEXnQJ2+fzlhif0YM/Kf0GuH90GAdQ==", + "version": "4.5.2", + "resolved": "https://registry.npmjs.org/rollup-plugin-css-only/-/rollup-plugin-css-only-4.5.2.tgz", + "integrity": "sha512-7rj9+jB17Pz8LNcPgtMUb16JcgD8lxQMK9HcGfAVhMK3na/WXes3oGIo5QsrQQVqtgAU6q6KnQNXJrYunaUIQQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "5" @@ -581,7 +973,7 @@ "node": ">=14" }, "peerDependencies": { - "rollup": "<4" + "rollup": "<5" } }, "node_modules/rollup-plugin-svelte": { @@ -634,19 +1026,24 @@ } ] }, + "node_modules/seedrandom": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-3.0.5.tgz", + "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==" + }, "node_modules/serialize-javascript": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", - "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", "dev": true, "dependencies": { "randombytes": "^2.1.0" } }, "node_modules/smob": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/smob/-/smob-1.4.0.tgz", - "integrity": "sha512-MqR3fVulhjWuRNSMydnTlweu38UhQ0HXM4buStD/S3mc/BzX3CuM9OmhyQpmtYCvoYdl5ris6TI0ZqH355Ymqg==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/smob/-/smob-1.4.1.tgz", + "integrity": "sha512-9LK+E7Hv5R9u4g4C3p+jjLstaLe11MDsL21UpYaCNmapvMkYhqCV4A/f/3gyH8QjMyh6l68q9xC85vihY9ahMQ==", "dev": true }, "node_modules/source-map": { @@ -658,6 +1055,14 @@ "node": ">=0.10.0" } }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", @@ -681,37 +1086,58 @@ } }, "node_modules/svelte": { - "version": "3.59.2", - "resolved": "https://registry.npmjs.org/svelte/-/svelte-3.59.2.tgz", - "integrity": "sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/svelte/-/svelte-4.2.12.tgz", + "integrity": "sha512-d8+wsh5TfPwqVzbm4/HCXC783/KPHV60NvwitJnyTA5lWn1elhXMNWhXGCJ7PwPa8qFUnyJNIyuIRt2mT0WMug==", + "dependencies": { + "@ampproject/remapping": "^2.2.1", + "@jridgewell/sourcemap-codec": "^1.4.15", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/estree": "^1.0.1", + "acorn": "^8.9.0", + "aria-query": "^5.3.0", + "axobject-query": "^4.0.0", + "code-red": "^1.0.3", + "css-tree": "^2.3.1", + "estree-walker": "^3.0.3", + "is-reference": "^3.0.1", + "locate-character": "^3.0.0", + "magic-string": "^0.30.4", + "periscopic": "^3.1.0" + }, "engines": { - "node": ">= 8" + "node": ">=16" } }, "node_modules/svelte-chartjs": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/svelte-chartjs/-/svelte-chartjs-3.1.2.tgz", - "integrity": "sha512-3+6gY2IJ9Ua8R9pk3iS1ypa7Z9OoXCJb9oPwIfTp7caJM+X+RrWnH2CTkGAq7FeSxc2nnmW08tYN88Q8Y+5M+w==", + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/svelte-chartjs/-/svelte-chartjs-3.1.5.tgz", + "integrity": "sha512-ka2zh7v5FiwfAX1oMflZ0HkNkgjHjFqANgRyC+vNYXfxtx2ku68Zo+2KgbKeBH2nS1ThDqkIACPzGxy4T0UaoA==", "peerDependencies": { "chart.js": "^3.5.0 || ^4.0.0", - "svelte": "^3.45.0" + "svelte": "^4.0.0" } }, - "node_modules/sveltestrap": { - "version": "5.11.1", - "resolved": "https://registry.npmjs.org/sveltestrap/-/sveltestrap-5.11.1.tgz", - "integrity": "sha512-FIvPIEU1VolqMN1wi2XrC8aehWVbIJEST7zPfPbOUUfPimyx9giN4nA3We5wkXrBUaifXA8CSIwuHFvf3CmYQw==", + "node_modules/svelte/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dependencies": { - "@popperjs/core": "^2.11.8" - }, - "peerDependencies": { - "svelte": "^3.53.1" + "@types/estree": "^1.0.0" + } + }, + "node_modules/svelte/node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" } }, "node_modules/terser": { - "version": "5.19.2", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.19.2.tgz", - "integrity": "sha512-qC5+dmecKJA4cpYxRa5aVkKehYsQKc+AHeKl0Oe62aYjBL8ZA33tTljktDHJSaxxMnbI5ZYw+o/S2DxxLu8OfA==", + "version": "5.29.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.29.1.tgz", + "integrity": "sha512-lZQ/fyaIGxsbGxApKmoPTODIzELy3++mXhS5hOqaAWZjQtpq/hFHAc+rm29NND1rYRxRWKcjuARNwULNXa5RtQ==", "dev": true, "dependencies": { "@jridgewell/source-map": "^0.3.3", @@ -726,10 +1152,23 @@ "node": ">=10" } }, + "node_modules/tiny-emitter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", + "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==" + }, + "node_modules/typed-function": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/typed-function/-/typed-function-4.1.1.tgz", + "integrity": "sha512-Pq1DVubcvibmm8bYcMowjVnnMwPVMeh0DIdA8ad8NZY2sJgapANJmiigSUwlt+EgXxpfIv8MWrQXTIzkfYZLYQ==", + "engines": { + "node": ">= 14" + } + }, "node_modules/uplot": { - "version": "1.6.25", - "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.25.tgz", - "integrity": "sha512-eWLAhEaGtIcVBiS67mC2UC0yV+G6eYLS2rU67N4F2JVWjt7uBMg4xKXUYGW0dEz9G+m7fNatjCVXHts4gjyuMQ==" + "version": "1.6.30", + "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.30.tgz", + "integrity": "sha512-48oVVRALM/128ttW19F2a2xobc2WfGdJ0VJFX00099CfqbCTuML7L2OrTKxNzeFP34eo1+yJbqFSoFAp2u28/Q==" }, "node_modules/wonka": { "version": "6.3.4", diff --git a/web/frontend/package.json b/web/frontend/package.json index 3e77474..c70e57a 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -7,23 +7,25 @@ "dev": "rollup -c -w" }, "devDependencies": { - "@rollup/plugin-commonjs": "^24.1.0", - "@rollup/plugin-node-resolve": "^15.0.2", - "@rollup/plugin-terser": "^0.4.1", - "@timohausmann/quadtree-js": "^1.2.5", - "rollup": "^3.21.0", - "rollup-plugin-css-only": "^4.3.0", - "rollup-plugin-svelte": "^7.1.4", - "svelte": "^3.58.0" + "@rollup/plugin-commonjs": "^25.0.7", + "@rollup/plugin-node-resolve": "^15.2.3", + "@rollup/plugin-terser": "^0.4.4", + "@timohausmann/quadtree-js": "^1.2.6", + "rollup": "^4.12.1", + "rollup-plugin-css-only": "^4.5.2", + "rollup-plugin-svelte": "^7.1.6", + "svelte": "^4.2.12" }, "dependencies": { - "@rollup/plugin-replace": "^5.0.2", - "@urql/svelte": "^4.0.1", - "chart.js": "^4.3.3", - "graphql": "^16.6.0", - "svelte-chartjs": "^3.1.2", - "sveltestrap": "^5.11.1", - "uplot": "^1.6.24", - "wonka": "^6.3.2" + "@rollup/plugin-replace": "^5.0.5", + "@sveltestrap/sveltestrap": "^6.2.6", + "@urql/svelte": "^4.1.0", + "chart.js": "^4.4.2", + "date-fns": "^2.30.0", + "graphql": "^16.8.1", + "mathjs": "^12.4.0", + "svelte-chartjs": "^3.1.5", + "uplot": "^1.6.30", + "wonka": "^6.3.4" } } diff --git a/web/frontend/src/Analysis.root.svelte b/web/frontend/src/Analysis.root.svelte index aa4ae37..0592f28 100644 --- a/web/frontend/src/Analysis.root.svelte +++ b/web/frontend/src/Analysis.root.svelte @@ -1,439 +1,610 @@ - {#if $initq.fetching || $statsQuery.fetching || $footprintsQuery.fetching} - - - + {#if $initq.fetching || $statsQuery.fetching || $footprintsQuery.fetching} + + + + {/if} + + {#if $initq.error} + {$initq.error.message} + {:else if cluster} + mc.name)} + bind:metricsInHistograms + bind:metricsInScatterplots + /> {/if} - - {#if $initq.error} - {$initq.error.message} - {:else if cluster} - mc.name)} - bind:metricsInHistograms={metricsInHistograms} - bind:metricsInScatterplots={metricsInScatterplots} /> - {/if} - - - { - jobFilters = detail.filters; - }} /> - + + + { + jobFilters = detail.filters; + }} + /> + -
+
{#if $statsQuery.error} - - - {$statsQuery.error.message} - - + + + {$statsQuery.error.message} + + {:else if $statsQuery.data} - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Total Jobs{$statsQuery.data.stats[0].totalJobs}
Short Jobs{$statsQuery.data.stats[0].shortJobs}
Total Walltime{$statsQuery.data.stats[0].totalWalltime}
Total Node Hours{$statsQuery.data.stats[0].totalNodeHours}
Total Core Hours{$statsQuery.data.stats[0].totalCoreHours}
Total Accelerator Hours{$statsQuery.data.stats[0].totalAccHours}
- - -
-
Top - + {#each groupOptions as option} + + {/each} + +
+ {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + t[sortSelection.key], + )} + entities={$topQuery.data.topList.map((t) => t.id)} + /> + {/if} + {/key} +
+ + + {#key $topQuery.data} + {#if $topQuery.fetching} + + {:else if $topQuery.error} + {$topQuery.error.message} + {:else} + + + + + - {#key $topQuery.data} - {#if $topQuery.fetching} - - {:else if $topQuery.error} - {$topQuery.error.message} + + + {#each $topQuery.data.topList as te, i} + + + {#if groupSelection.key == "user"} + {:else} -
Legend{groupSelection.label} + - - {#key $topQuery.data} - {#if $topQuery.fetching} - - {:else if $topQuery.error} - {$topQuery.error.message} - {:else} - t[sortSelection.key])} - entities={$topQuery.data.topList.map((t) => t.id)} - /> - {/if} - {/key} - - -
{te.id}
- - - - - - {#each $topQuery.data.topList as te, i} - - - {#if groupSelection.key == 'user'} - - {:else} - - {/if} - - - {/each} -
Legend{groupSelection.label} - -
{te.id}{te.id}{te[sortSelection.key]}
+ {te.id} {/if} - {/key} - -
- - - {#if $rooflineQuery.fetching} - - {:else if $rooflineQuery.error} - {$rooflineQuery.error.message} - {:else if $rooflineQuery.data && cluster} -
- {#key $rooflineQuery.data} - - {/key} -
- {/if} - - -
- {#key $statsQuery.data.stats[0].histDuration} - - {/key} -
- - -
- {#key $statsQuery.data.stats[0].histNumCores} - - {/key} -
- -
+ {te[sortSelection.key]} + + {/each} + + {/if} + {/key} + + + + + {#if $rooflineQuery.fetching} + + {:else if $rooflineQuery.error} + {$rooflineQuery.error.message} + {:else if $rooflineQuery.data && cluster} +
+ {#key $rooflineQuery.data} + + {/key} +
+ {/if} + + +
+ {#key $statsQuery.data.stats[0].histDuration} + + {/key} +
+ + +
+ {#key $statsQuery.data.stats[0].histNumCores} + + {/key} +
+ +
{/if} -
+
{#if $footprintsQuery.error} - - - {$footprintsQuery.error.message} - - + + + {$footprintsQuery.error.message} + + {:else if $footprintsQuery.data && $initq.data} - - - - These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default - (Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics). - Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values. - -
- -
- - - ({ metric, ...binsFromFootprint( - $footprintsQuery.data.footprints.timeWeights, - metricConfig(cluster.name, metric)?.scope, - $footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))} - itemsPerRow={ccconfig.plot_view_plotsPerRow}> - - - - - -
- - - - Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics. - Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values. - -
- -
- - - ({ - m1, f1: $footprintsQuery.data.footprints.metrics.find(f => f.metric == m1).data, - m2, f2: $footprintsQuery.data.footprints.metrics.find(f => f.metric == m2).data }))} - itemsPerRow={ccconfig.plot_view_plotsPerRow}> - - - - - + + + + These histograms show the distribution of the averages of all jobs + matching the filters. Each job/average is weighted by its node hours by + default (Accelerator hours for native accelerator scope metrics, + coreHours for native core scope metrics). Note that some metrics could + be disabled for specific subclusters as per metricConfig and thus could + affect shown average values. + +
+ +
+ + + ({ + metric, + ...binsFromFootprint( + $footprintsQuery.data.footprints.timeWeights, + metricConfig(cluster.name, metric)?.scope, + $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == metric, + ).data, + numBins, + ), + }))} + itemsPerRow={ccconfig.plot_view_plotsPerRow} + > + + + + +
+ + + + Each circle represents one job. The size of a circle is proportional to + its node hours. Darker circles mean multiple jobs have the same averages + for the respective metrics. Note that some metrics could be disabled for + specific subclusters as per metricConfig and thus could affect shown + average values. + +
+ +
+ + + ({ + m1, + f1: $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == m1, + ).data, + m2, + f2: $footprintsQuery.data.footprints.metrics.find( + (f) => f.metric == m2, + ).data, + }))} + itemsPerRow={ccconfig.plot_view_plotsPerRow} + > + + + + {/if} diff --git a/web/frontend/src/Config.root.svelte b/web/frontend/src/Config.root.svelte index 6df579f..ddd714f 100644 --- a/web/frontend/src/Config.root.svelte +++ b/web/frontend/src/Config.root.svelte @@ -1,31 +1,30 @@ {#if isAdmin == true} - + - Admin Options + Admin Options - - + + {/if} - - Plotting Options - - + + Plotting Options + + diff --git a/web/frontend/src/Header.svelte b/web/frontend/src/Header.svelte index 03c8cd0..cc96dd0 100644 --- a/web/frontend/src/Header.svelte +++ b/web/frontend/src/Header.svelte @@ -1,178 +1,169 @@ - - ClusterCockpit Logo - - (isOpen = !isOpen)} /> - (isOpen = detail.isOpen)} - > - - - + + ClusterCockpit Logo + + (isOpen = !isOpen)} /> + (isOpen = detail.isOpen)} + > + + + diff --git a/web/frontend/src/HistogramSelection.svelte b/web/frontend/src/HistogramSelection.svelte new file mode 100644 index 0000000..39b1872 --- /dev/null +++ b/web/frontend/src/HistogramSelection.svelte @@ -0,0 +1,73 @@ + + + (isOpen = !isOpen)}> + Select metrics presented in histograms + + + {#each availableMetrics as metric (metric)} + + + {metric} + + {/each} + + + + + + + diff --git a/web/frontend/src/Job.root.svelte b/web/frontend/src/Job.root.svelte index 93c5873..758cef9 100644 --- a/web/frontend/src/Job.root.svelte +++ b/web/frontend/src/Job.root.svelte @@ -1,42 +1,50 @@ - - {#if $initq.error} - {$initq.error.message} - {:else if $initq.data} - - {:else} - - {/if} - - {#if $jobMetrics.data && $initq.data} - {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} - {#if authlevel > roles.manager} - -
- Concurrent Jobs -
-
    -
  • - See All -
  • - {#each $initq.data.job.concurrentJobs.items as pjob, index} -
  • - {pjob.jobId} -
  • - {/each} -
- - {:else} - -
- {$initq.data.job.concurrentJobs.items.length} Concurrent - Jobs -
-

- Number of shared jobs on the same node with overlapping - runtimes. -

- - {/if} - {/if} - - - - - c.name == $initq.data.job.cluster) - .subClusters.find( - (sc) => sc.name == $initq.data.job.subCluster - )} - data={ - transformDataForRoofline ( - $jobMetrics.data.jobMetrics.find((m) => m.name == "flops_any" && m.scope == "node").metric, - $jobMetrics.data.jobMetrics.find((m) => m.name == "mem_bw" && m.scope == "node").metric - ) - } - /> - + + {#if $initq.error} + {$initq.error.message} + {:else if $initq.data} + {:else} - - + {/if} + + {#if $jobMetrics.data && showFootprint} + {#key $jobMetrics.data} + + + + {/key} + {/if} + {#if $jobMetrics.data && $initq.data} + {#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0} + {#if authlevel > roles.manager} + +
+ Concurrent Jobs +
+
    +
  • + See All +
  • + {#each $initq.data.job.concurrentJobs.items as pjob, index} +
  • + {pjob.jobId} +
  • + {/each} +
+ + {:else} + +
+ {$initq.data.job.concurrentJobs.items.length} Concurrent Jobs +
+

+ Number of shared jobs on the same node with overlapping runtimes. +

+ + {/if} + {/if} + + + + + c.name == $initq.data.job.cluster) + .subClusters.find((sc) => sc.name == $initq.data.job.subCluster)} + data={transformDataForRoofline( + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "flops_any" && m.scope == "node", + ).metric, + $jobMetrics.data.jobMetrics.find( + (m) => m.name == "mem_bw" && m.scope == "node", + ).metric, + )} + /> + + {:else} + + + {/if}
- - {#if $initq.data} - - {/if} - - - {#if $initq.data} - - {/if} - - - - {#if $jobMetrics.error} - {#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2} - Not monitored or archiving failed -
- {/if} - {$jobMetrics.error.message} - {:else if $jobMetrics.fetching} - - {:else if $jobMetrics.data && $initq.data} - - {#if item.data} - - statsTable.moreLoaded(detail)} - job={$initq.data.job} - metricName={item.metric} - rawData={item.data.map((x) => x.metric)} - scopes={item.data.map((x) => x.scope)} - {width} - isShared={$initq.data.job.exclusive != 1} - resources={$initq.data.job.resources} - /> - {:else} - No dataset returned for {item.metric} - {/if} - + + {#if $jobMetrics.error} + {#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2} + Not monitored or archiving failed +
+ {/if} + {$jobMetrics.error.message} + {:else if $jobMetrics.fetching} + + {:else if $jobMetrics.data && $initq.data} + + {#if item.data} + statsTable.moreLoaded(detail)} + job={$initq.data.job} + metricName={item.metric} + rawData={item.data.map((x) => x.metric)} + scopes={item.data.map((x) => x.scope)} + {width} + isShared={$initq.data.job.exclusive != 1} + resources={$initq.data.job.resources} + /> + {:else} + No dataset returned for {item.metric} {/if} - + + {/if} +
- - {#if $initq.data} - - {#if somethingMissing} - -
- - - Missing Metrics/Reseources - - - {#if missingMetrics.length > 0} -

- No data at all is available for the - metrics: {missingMetrics.join(", ")} -

- {/if} - {#if missingHosts.length > 0} -

- Some metrics are missing for the - following hosts: -

-
    - {#each missingHosts as missing} -
  • - {missing.hostname}: {missing.metrics.join( - ", " - )} -
  • - {/each} -
- {/if} -
-
-
-
- {/if} - - {#if $jobMetrics.data} - {#key $jobMetrics.data} - - {/key} - {/if} - - -
- {#if $initq.data.job.metaData?.jobScript} -
{$initq.data.job.metaData?.jobScript}
- {:else} - No job script available - {/if} -
-
- -
- {#if $initq.data.job.metaData?.slurmInfo} -
{$initq.data.job.metaData?.slurmInfo}
- {:else} - No additional slurm information available - {/if} -
-
-
+ + {#if $initq.data} + + {#if somethingMissing} + +
+ + + Missing Metrics/Reseources + + + {#if missingMetrics.length > 0} +

+ No data at all is available for the metrics: {missingMetrics.join( + ", ", + )} +

+ {/if} + {#if missingHosts.length > 0} +

Some metrics are missing for the following hosts:

+
    + {#each missingHosts as missing} +
  • + {missing.hostname}: {missing.metrics.join(", ")} +
  • + {/each} +
+ {/if} +
+
+
+
{/if} - + + {#if $jobMetrics.data} + {#key $jobMetrics.data} + + {/key} + {/if} + + +
+ {#if $initq.data.job.metaData?.jobScript} +
{$initq.data.job.metaData?.jobScript}
+ {:else} + No job script available + {/if} +
+
+ +
+ {#if $initq.data.job.metaData?.slurmInfo} +
{$initq.data.job.metaData?.slurmInfo}
+ {:else} + No additional slurm information available + {/if} +
+
+
+ {/if} +
{#if $initq.data} - + {/if} diff --git a/web/frontend/src/JobFootprint.svelte b/web/frontend/src/JobFootprint.svelte new file mode 100644 index 0000000..8ed8089 --- /dev/null +++ b/web/frontend/src/JobFootprint.svelte @@ -0,0 +1,265 @@ + + + + + + {#if view === "job"} + + + Core Metrics Footprint + + + {/if} + + {#each footprintData as fpd, index} +
+
 {fpd.name}
+ +
+
+ + {#if fpd.impact === 3 || fpd.impact === -1} + + {:else if fpd.impact === 2} + + {/if} + + {#if fpd.impact === 3} + + {:else if fpd.impact === 2} + + {:else if fpd.impact === 1} + + {:else if fpd.impact === 0} + + {:else if fpd.impact === -1} + + {/if} +
+
+ + {fpd.avg} / {fpd.max} + {fpd.unit}   +
+
+ {fpd.message} +
+
+ +
+ {/each} + {#if job?.metaData?.message} +
+ {@html job.metaData.message} + {/if} +
+
+ + diff --git a/web/frontend/src/Jobs.root.svelte b/web/frontend/src/Jobs.root.svelte index 07094b8..204a4e3 100644 --- a/web/frontend/src/Jobs.root.svelte +++ b/web/frontend/src/Jobs.root.svelte @@ -1,96 +1,121 @@ - {#if $initq.fetching} - - - - {:else if $initq.error} - - {$initq.error.message} - - {/if} + {#if $initq.fetching} + + + + {:else if $initq.error} + + {$initq.error.message} + + {/if} - - - - - - - { - selectedCluster = detail.filters[0]?.cluster ? detail.filters[0].cluster.eq : null - jobList.update(detail.filters) - } - } /> - + + + + + + + { + selectedCluster = detail.filters[0]?.cluster + ? detail.filters[0].cluster.eq + : null; + jobList.update(detail.filters); + }} + /> + - - filterComponent.update(detail)}/> - - - jobList.refresh()} /> - + + filterComponent.update(detail)} + /> + + + jobList.refresh()} /> + -
+
- - - + + + - + + bind:cluster={selectedCluster} + configName="plot_list_selectedMetrics" + bind:metrics + bind:isOpen={isMetricsSelectionOpen} + bind:showFootprint + view="list" +/> diff --git a/web/frontend/src/List.root.svelte b/web/frontend/src/List.root.svelte index c004736..bc1ac6f 100644 --- a/web/frontend/src/List.root.svelte +++ b/web/frontend/src/List.root.svelte @@ -2,52 +2,58 @@ @component List of users or projects --> - - - - - - - - { - jobFilters = detail.filters; - }} - /> - + + + + + + + + { + jobFilters = detail.filters; + }} + /> + - + + + + {#if type == "USER"} + + {/if} + + + + + + + + {#if $stats.fetching} + + + + {:else if $stats.error} + + + + {:else if $stats.data} + {#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)} - + - {/if} - - - - - - - - {#if $stats.fetching} - - - - {:else if $stats.error} - - - - {:else if $stats.data} - {#each sort($stats.data.rows, sorting, nameFilter) as row (row.id)} - - - {#if type == "USER"} - - {/if} - - - - - + {scrambleNames ? scramble(row.id) : row.id} + {:else if type == "PROJECT"} + {scrambleNames ? scramble(row.id) : row.id} {:else} - - - - {/each} - {/if} - + {row.id} + {/if} + + {#if type == "USER"} + + {/if} + + + + + + {:else} + + + + {/each} + {/if} +
+ {{ + USER: "Username", + PROJECT: "Project Name", + }[type]} + + + Name + + + Total Jobs + + + Total Walltime + + + Total Core Hours + + + Total Accelerator Hours + +
{$stats.error.message}
- {({ - USER: "Username", - PROJECT: "Project Name", - })[type]} - - {#if type == "USER"} - - Name - - - Total Jobs - - - Total Walltime - - - Total Core Hours - - - Total Accelerator Hours - -
{$stats.error.message}
- {#if type == "USER"} - {scrambleNames ? scramble(row.id) : row.id} - {:else if type == "PROJECT"} - {scrambleNames ? scramble(row.id) : row.id} - {:else} - {row.id} - {/if} - {scrambleNames ? scramble(row?.name?row.name:"-") : row?.name?row.name:"-"}{row.totalJobs}{row.totalWalltime}{row.totalCoreHours}{row.totalAccHours}
No {type.toLowerCase()}s/jobs found
{scrambleNames + ? scramble(row?.name ? row.name : "-") + : row?.name + ? row.name + : "-"}{row.totalJobs}{row.totalWalltime}{row.totalCoreHours}{row.totalAccHours}
No {type.toLowerCase()}s/jobs found
diff --git a/web/frontend/src/Metric.svelte b/web/frontend/src/Metric.svelte index 8ff0a58..6022ffb 100644 --- a/web/frontend/src/Metric.svelte +++ b/web/frontend/src/Metric.svelte @@ -1,95 +1,118 @@ + - - {metricName} ({(metricConfig?.unit?.prefix ? metricConfig.unit.prefix : '') + - (metricConfig?.unit?.base ? metricConfig.unit.base : '')}) - - - {#if job.resources.length > 1} - + + {metricName} ({(metricConfig?.unit?.prefix + ? metricConfig.unit.prefix + : "") + (metricConfig?.unit?.base ? metricConfig.unit.base : "")}) + + + {#if job.resources.length > 1} + + {/if} {#key series} - {#if fetching == true} - - {:else if error != null} - {error.message} - {:else if series != null} - - {/if} + {#if fetching == true} + + {:else if error != null} + {error.message} + {:else if series != null} + + {/if} {/key} diff --git a/web/frontend/src/MetricSelection.svelte b/web/frontend/src/MetricSelection.svelte index 59fe263..689abef 100644 --- a/web/frontend/src/MetricSelection.svelte +++ b/web/frontend/src/MetricSelection.svelte @@ -8,160 +8,206 @@ --> - - - (isOpen = !isOpen)}> - - Configure columns (Metric availability shown) - - - - {#each newMetricsOrder as metric, index (metric)} -
  • columnsDragStart(event, index)} - on:drop|preventDefault={event => columnsDrag(event, index)} - on:dragenter={() => columnHovering = index} - class:is-active={columnHovering === index}> - {#if unorderedMetrics.includes(metric)} - - {:else} - - {/if} - {metric} - - {cluster == null ? - clusters // No single cluster specified: List Clusters with Metric - .filter(c => c.metricConfig.find(m => m.name == metric) != null) - .map(c => c.name).join(', ') : - clusters // Single cluster requested: List Subclusters with do not have metric remove flag - .filter(c => c.name == cluster) - .filter(c => c.metricConfig.find(m => m.name == metric) != null) - .map(function(c) { - let scNames = c.subClusters.map(sc => sc.name) - scNames.forEach(function(scName){ - let met = c.metricConfig.find(m => m.name == metric) - let msc = met.subClusters.find(msc => msc.name == scName) - if (msc != null) { - if (msc.remove == true) { - scNames = scNames.filter(scn => scn != msc.name) - } - } - }) - return scNames - }) - .join(', ')} - -
  • - {/each} -
    -
    - - - + (isOpen = !isOpen)}> + Configure columns (Metric availability shown) + + + {#if view === "list"} +
  • + Show Footprint +
  • +
    + {/if} + {#each newMetricsOrder as metric, index (metric)} +
  • columnsDragStart(event, index)} + on:drop|preventDefault={(event) => columnsDrag(event, index)} + on:dragenter={() => (columnHovering = index)} + class:is-active={columnHovering === index} + > + {#if unorderedMetrics.includes(metric)} + + {:else} + + {/if} + {metric} + + {cluster == null + ? clusters // No single cluster specified: List Clusters with Metric + .filter( + (c) => c.metricConfig.find((m) => m.name == metric) != null, + ) + .map((c) => c.name) + .join(", ") + : clusters // Single cluster requested: List Subclusters with do not have metric remove flag + .filter((c) => c.name == cluster) + .filter( + (c) => c.metricConfig.find((m) => m.name == metric) != null, + ) + .map(function (c) { + let scNames = c.subClusters.map((sc) => sc.name); + scNames.forEach(function (scName) { + let met = c.metricConfig.find((m) => m.name == metric); + let msc = met.subClusters.find( + (msc) => msc.name == scName, + ); + if (msc != null) { + if (msc.remove == true) { + scNames = scNames.filter((scn) => scn != msc.name); + } + } + }); + return scNames; + }) + .join(", ")} + +
  • + {/each} +
    +
    + + +
    + + diff --git a/web/frontend/src/NavbarLinks.svelte b/web/frontend/src/NavbarLinks.svelte index 6861da5..24ecddf 100644 --- a/web/frontend/src/NavbarLinks.svelte +++ b/web/frontend/src/NavbarLinks.svelte @@ -1,39 +1,38 @@ {#each links as item} - {#if !item.perCluster} - {item.title} - {:else} - - - - {item.title} - - - {#each clusters as cluster} - - {cluster.name} - - {/each} - - - {/if} + {#if !item.perCluster} + {item.title} + {:else} + + + + {item.title} + + + {#each clusters as cluster} + + {cluster.name} + + {/each} + + + {/if} {/each} diff --git a/web/frontend/src/NavbarTools.svelte b/web/frontend/src/NavbarTools.svelte index 2bf9aca..f44b4e9 100644 --- a/web/frontend/src/NavbarTools.svelte +++ b/web/frontend/src/NavbarTools.svelte @@ -1,127 +1,153 @@ diff --git a/web/frontend/src/Node.root.svelte b/web/frontend/src/Node.root.svelte index b23c71e..0a5a75e 100644 --- a/web/frontend/src/Node.root.svelte +++ b/web/frontend/src/Node.root.svelte @@ -1,238 +1,230 @@ - {#if $initq.error} - {$initq.error.message} - {:else if $initq.fetching} + {#if $initq.error} + {$initq.error.message} + {:else if $initq.fetching} + + {:else} + + + + {hostname} ({cluster}) + + + + {#if $nodeJobsData.fetching} - {:else} - - - - {hostname} ({cluster}) - - - - {#if $nodeJobsData.fetching} - - {:else if $nodeJobsData.data} - Currently running jobs on this node: {$nodeJobsData.data.jobs - .count} - [ - View in Job List ] - {:else} - No currently running jobs. - {/if} - - - { - const diff = Date.now() - to - from = new Date(from.getTime() + diff) - to = new Date(to.getTime() + diff) - }} /> - - - - - {/if} + {:else if $nodeJobsData.data} + Currently running jobs on this node: {$nodeJobsData.data.jobs.count} + [ + View in Job List ] + {:else} + No currently running jobs. + {/if} + + + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + + + + {/if}
    - - {#if $nodeMetricsData.error} - {$nodeMetricsData.error.message} - {:else if $nodeMetricsData.fetching || $initq.fetching} - + + {#if $nodeMetricsData.error} + {$nodeMetricsData.error.message} + {:else if $nodeMetricsData.fetching || $initq.fetching} + + {:else} + ({ + ...m, + disabled: checkMetricDisabled( + m.name, + cluster, + $nodeMetricsData.data.nodeMetrics[0].subCluster, + ), + })) + .sort((a, b) => a.name.localeCompare(b.name))} + > +

    + {item.name} + {metricUnits[item.name]} +

    + {#if item.disabled === false && item.metric} + c.name == cluster)} + subCluster={$nodeMetricsData.data.nodeMetrics[0].subCluster} + series={item.metric.series} + resources={[{ hostname: hostname }]} + forNode={true} + /> + {:else if item.disabled === true && item.metric} + Metric disabled for subcluster {item.name}:{$nodeMetricsData.data.nodeMetrics[0] + .subCluster} {:else} - ({ - ...m, - disabled: checkMetricDisabled( - m.name, - cluster, - $nodeMetricsData.data.nodeMetrics[0].subCluster - ), - })) - .sort((a, b) => a.name.localeCompare(b.name))} - > -

    - {item.name} - {metricUnits[item.name]} -

    - {#if item.disabled === false && item.metric} - c.name == cluster)} - subCluster={$nodeMetricsData.data.nodeMetrics[0] - .subCluster} - series={item.metric.series} - resources={[{hostname: hostname}]} - forNode={true} - /> - {:else if item.disabled === true && item.metric} - Metric disabled for subcluster {item.name}:{$nodeMetricsData.data.nodeMetrics[0] - .subCluster} - {:else} - No dataset returned for {item.name} - {/if} -
    + No dataset returned for {item.name} {/if} - +
    + {/if} +
    diff --git a/web/frontend/src/PlotSelection.svelte b/web/frontend/src/PlotSelection.svelte index 449de64..b4cf58b 100644 --- a/web/frontend/src/PlotSelection.svelte +++ b/web/frontend/src/PlotSelection.svelte @@ -1,139 +1,163 @@ - - - (isHistogramConfigOpen = !isHistogramConfigOpen)}> - - Select metrics presented in histograms - - - - {#each availableMetrics as metric (metric)} - - updateConfiguration({ - name: 'analysis_view_histogramMetrics', - value: metricsInHistograms - })} /> + (isHistogramConfigOpen = !isHistogramConfigOpen)} +> + Select metrics presented in histograms + + + {#each availableMetrics as metric (metric)} + + + updateConfiguration({ + name: "analysis_view_histogramMetrics", + value: metricsInHistograms, + })} + /> - {metric} - - {/each} - - - - - + {metric} + + {/each} + + + + + - (isScatterPlotConfigOpen = !isScatterPlotConfigOpen)}> - - Select metric pairs presented in scatter plots - - - - {#each metricsInScatterplots as pair} - - {pair[0]} / {pair[1]} + (isScatterPlotConfigOpen = !isScatterPlotConfigOpen)} +> + Select metric pairs presented in scatter plots + + + {#each metricsInScatterplots as pair} + + {pair[0]} / {pair[1]} - - - {/each} - + + + {/each} + -
    +
    - - - - - - -
    - - - + + + + + + + + +
    diff --git a/web/frontend/src/StatsTable.svelte b/web/frontend/src/StatsTable.svelte index e85e835..3a9d84d 100644 --- a/web/frontend/src/StatsTable.svelte +++ b/web/frontend/src/StatsTable.svelte @@ -1,136 +1,154 @@ - - - - {#each selectedMetrics as metric} - - {/each} - - - - {#each selectedMetrics as metric} - {#if selectedScopes[metric] != 'node'} - - {/if} - {#each ['min', 'avg', 'max'] as stat} - - {/each} - {/each} - - - - {#each hosts as host (host)} - - - {#each selectedMetrics as metric (metric)} - - {/each} - + + + + {#each selectedMetrics as metric} + + {/each} + + + + {#each selectedMetrics as metric} + {#if selectedScopes[metric] != "node"} + + {/if} + {#each ["min", "avg", "max"] as stat} + {/each} - + {/each} + + + + {#each hosts as host (host)} + + + {#each selectedMetrics as metric (metric)} + + {/each} + + {/each} +
    - - - - - {metric} - - - -
    NodeId sortBy(metric, stat)}> - {stat} - {#if selectedScopes[metric] == 'node'} - - {/if} -
    {host}
    + + + + + {metric} + + + +
    NodeId sortBy(metric, stat)}> + {stat} + {#if selectedScopes[metric] == "node"} + + {/if} +
    {host}
    -
    +
    + cluster={job.cluster} + configName="job_view_nodestats_selectedMetrics" + allMetrics={new Set(allMetrics)} + bind:metrics={selectedMetrics} + bind:isOpen={isMetricSelectionOpen} +/> diff --git a/web/frontend/src/StatsTableEntry.svelte b/web/frontend/src/StatsTableEntry.svelte index 5e497d4..99cde21 100644 --- a/web/frontend/src/StatsTableEntry.svelte +++ b/web/frontend/src/StatsTableEntry.svelte @@ -1,82 +1,86 @@ {#if series == null || series.length == 0} - No data -{:else if series.length == 1 && scope == 'node'} - - {series[0].statistics.min} - - - {series[0].statistics.avg} - - - {series[0].statistics.max} - + No data +{:else if series.length == 1 && scope == "node"} + + {series[0].statistics.min} + + + {series[0].statistics.avg} + + + {series[0].statistics.max} + {:else} - - - - {#each ['id', 'min', 'avg', 'max'] as field} - - {/each} - - {#each series as s, i} - - - - - - - {/each} -
    sortByField(field)}> - Sort - -
    {s.id ?? i}{s.statistics.min}{s.statistics.avg}{s.statistics.max}
    - + + + + {#each ["id", "min", "avg", "max"] as field} + + {/each} + + {#each series as s, i} + + + + + + + {/each} +
    sortByField(field)}> + Sort + +
    {s.id ?? i}{s.statistics.min}{s.statistics.avg}{s.statistics.max}
    + {/if} diff --git a/web/frontend/src/Status.root.svelte b/web/frontend/src/Status.root.svelte index fffbfde..4121ead 100644 --- a/web/frontend/src/Status.root.svelte +++ b/web/frontend/src/Status.root.svelte @@ -1,341 +1,358 @@ - - -

    Current utilization of cluster "{cluster}"

    - - - {#if $initq.fetching || $mainQuery.fetching} - - {:else if $initq.error} - {$initq.error.message} - {:else} - - {/if} - - - { - from = new Date(Date.now() - 5 * 60 * 1000); - to = new Date(Date.now()); - }} - /> - + + +

    Current utilization of cluster "{cluster}"

    + + + {#if $initq.fetching || $mainQuery.fetching} + + {:else if $initq.error} + {$initq.error.message} + {:else} + + {/if} + + + + + + { + from = new Date(Date.now() - 5 * 60 * 1000); + to = new Date(Date.now()); + }} + /> +
    {#if $mainQuery.error} - - - {$mainQuery.error.message} - - + + + {$mainQuery.error.message} + + {/if}
    @@ -343,327 +360,318 @@ {#if $initq.data && $mainQuery.data} - {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i} - - - - - SubCluster "{subCluster.name}" - - - - - - - - - - - - - - - - - - -
    Allocated Nodes
    - -
    {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes} - Nodes
    Flop Rate (Any)
    - -
    - {scaleNumbers( - flopRate[subCluster.name], - subCluster.flopRateSimd.value * - subCluster.numberOfNodes, - flopRateUnitPrefix[subCluster.name] - )}{flopRateUnitBase[subCluster.name]} [Max] -
    MemBw Rate
    - -
    - {scaleNumbers( - memBwRate[subCluster.name], - subCluster.memoryBandwidth.value * - subCluster.numberOfNodes, - memBwRateUnitPrefix[subCluster.name] - )}{memBwRateUnitBase[subCluster.name]} [Max] -
    -
    -
    - - -
    - {#key $mainQuery.data.nodeMetrics} - data.subCluster == subCluster.name - ) - ) - } - /> - {/key} -
    - -
    - {/each} + {#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i} + + + + + SubCluster "{subCluster.name}" + + + + + + + + + + + + + + + + + + +
    Allocated Nodes
    + +
    {allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes} + Nodes
    Flop Rate (Any)
    + +
    + {scaleNumbers( + flopRate[subCluster.name], + subCluster.flopRateSimd.value * subCluster.numberOfNodes, + flopRateUnitPrefix[subCluster.name], + )}{flopRateUnitBase[subCluster.name]} [Max] +
    MemBw Rate
    + +
    + {scaleNumbers( + memBwRate[subCluster.name], + subCluster.memoryBandwidth.value * subCluster.numberOfNodes, + memBwRateUnitPrefix[subCluster.name], + )}{memBwRateUnitBase[subCluster.name]} [Max] +
    +
    +
    + + +
    + {#key $mainQuery.data.nodeMetrics} + data.subCluster == subCluster.name, + ), + )} + /> + {/key} +
    + +
    + {/each} -
    +
    - + + + +
    +

    + Top Users on {cluster.charAt(0).toUpperCase() + cluster.slice(1)} +

    + {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + tu[topUserSelection.key], + )} + entities={$topUserQuery.data.topUser.map((tu) => tu.id)} + /> + {/if} + {/key} +
    + + + {#key $topUserQuery.data} + {#if $topUserQuery.fetching} + + {:else if $topUserQuery.error} + {$topUserQuery.error.message} + {:else} + + + + + + + {#each $topUserQuery.data.topUser as tu, i} + + + + + + {/each} +
    LegendUser NameNumber of + +
    {tu.id}{tu[topUserSelection.key]}
    + {/if} + {/key} + + +

    + Top Projects on {cluster.charAt(0).toUpperCase() + cluster.slice(1)} +

    + {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + tp[topProjectSelection.key], + )} + entities={$topProjectQuery.data.topProjects.map((tp) => tp.id)} + /> + {/if} + {/key} + + + {#key $topProjectQuery.data} + {#if $topProjectQuery.fetching} + + {:else if $topProjectQuery.error} + {$topProjectQuery.error.message} + {:else} + + + + + + + {#each $topProjectQuery.data.topProjects as tp, i} + + + + + + {/each} +
    LegendProject CodeNumber of + +
    {tp.id}{tp[topProjectSelection.key]}
    + {/if} + {/key} + +
    +
    + + +
    + {#key $mainQuery.data.stats} + + {/key} +
    + + + {#key $mainQuery.data.stats} + + {/key} + +
    + + +
    + {#key $mainQuery.data.stats} + + {/key} +
    + + + {#key $mainQuery.data.stats} + + {/key} + +
    +
    + {#if metricsInHistograms} - -
    -

    - Top Users on {cluster.charAt(0).toUpperCase() + - cluster.slice(1)} -

    - {#key $topUserQuery.data} - {#if $topUserQuery.fetching} - - {:else if $topUserQuery.error} - {$topUserQuery.error.message} - {:else} - tu[topUserSelection.key] - )} - entities={$topUserQuery.data.topUser.map( - (tu) => tu.id - )} - /> - {/if} - {/key} -
    - - - {#key $topUserQuery.data} - {#if $topUserQuery.fetching} - - {:else if $topUserQuery.error} - {$topUserQuery.error.message} - {:else} - - - - - - - {#each $topUserQuery.data.topUser as tu, i} - - - - - - {/each} -
    LegendUser NameNumber of - -
    {tu.id}{tu[topUserSelection.key]}
    - {/if} - {/key} - - -

    - Top Projects on {cluster.charAt(0).toUpperCase() + - cluster.slice(1)} -

    - {#key $topProjectQuery.data} - {#if $topProjectQuery.fetching} - - {:else if $topProjectQuery.error} - {$topProjectQuery.error.message} - {:else} - tp[topProjectSelection.key] - )} - entities={$topProjectQuery.data.topProjects.map( - (tp) => tp.id - )} - /> - {/if} - {/key} - - - {#key $topProjectQuery.data} - {#if $topProjectQuery.fetching} - - {:else if $topProjectQuery.error} - {$topProjectQuery.error.message} - {:else} - - - - - - - {#each $topProjectQuery.data.topProjects as tp, i} - - - - - - {/each} -
    LegendProject CodeNumber of - -
    {tp.id}{tp[topProjectSelection.key]}
    - {/if} - {/key} - -
    -
    - - -
    - {#key $mainQuery.data.stats} - - {/key} -
    - - - {#key $mainQuery.data.stats} - - {/key} - -
    - - -
    - {#key $mainQuery.data.stats} - - {/key} -
    - - - {#key $mainQuery.data.stats} - - {/key} - + + {#key $mainQuery.data.stats[0].histMetrics} + + + + {/key} +
    + {/if} {/if} + + diff --git a/web/frontend/src/Systems.root.svelte b/web/frontend/src/Systems.root.svelte index d881236..4a7f633 100644 --- a/web/frontend/src/Systems.root.svelte +++ b/web/frontend/src/Systems.root.svelte @@ -1,159 +1,218 @@ - {#if $initq.error} - {$initq.error.message} - {:else if $initq.fetching} - - {:else} - - { - const diff = Date.now() - to - from = new Date(from.getTime() + diff) - to = new Date(to.getTime() + diff) - }} /> - - - - - - - - Metric - - - - - - - Find Node - - - - {/if} - -
    - + {#if $initq.error} + {$initq.error.message} + {:else if $initq.fetching} + + {:else} - {#if $nodesQuery.error} - {$nodesQuery.error.message} - {:else if $nodesQuery.fetching || $initq.fetching} - - {:else} - h.host.includes(hostnameFilter) && h.metrics.some(m => m.name == selectedMetric && m.scope == 'node')) - .map(h => ({ - host: h.host, - subCluster: h.subCluster, - data: h.metrics.find(m => m.name == selectedMetric && m.scope == 'node'), - disabled: checkMetricDisabled(selectedMetric, cluster, h.subCluster) - })) - .sort((a, b) => a.host.localeCompare(b.host)) - }> - -

    {item.host} ({item.subCluster})

    - {#if item.disabled === false && item.data} - c.name == cluster)} - subCluster={item.subCluster} - resources={[{hostname: item.host}]} - forNode={true}/> - {:else if item.disabled === true && item.data} - Metric disabled for subcluster {selectedMetric}:{item.subCluster} - {:else} - No dataset returned for {selectedMetric} - {/if} -
    - {/if} + { + const diff = Date.now() - to; + from = new Date(from.getTime() + diff); + to = new Date(to.getTime() + diff); + }} + /> + + + + + + + Metric + + + + + + + Find Node + + + + {/if} +
    +
    + + + {#if $nodesQuery.error} + {$nodesQuery.error.message} + {:else if $nodesQuery.fetching || $initq.fetching} + + {:else} + + h.host.includes(hostnameFilter) && + h.metrics.some( + (m) => m.name == selectedMetric && m.scope == "node", + ), + ) + .map((h) => ({ + host: h.host, + subCluster: h.subCluster, + data: h.metrics.find( + (m) => m.name == selectedMetric && m.scope == "node", + ), + disabled: checkMetricDisabled( + selectedMetric, + cluster, + h.subCluster, + ), + })) + .sort((a, b) => a.host.localeCompare(b.host))} + > +

    + {item.host} ({item.subCluster}) +

    + {#if item.disabled === false && item.data} + c.name == cluster)} + subCluster={item.subCluster} + resources={[{ hostname: item.host }]} + forNode={true} + /> + {:else if item.disabled === true && item.data} + Metric disabled for subcluster {selectedMetric}:{item.subCluster} + {:else} + No dataset returned for {selectedMetric} + {/if} +
    + {/if} +
    - diff --git a/web/frontend/src/TagManagement.svelte b/web/frontend/src/TagManagement.svelte index 6ab4752..e9fb9e9 100644 --- a/web/frontend/src/TagManagement.svelte +++ b/web/frontend/src/TagManagement.svelte @@ -1,190 +1,234 @@ - - (isOpen = !isOpen)}> - - Manage Tags - {#if pendingChange !== false} - - {:else} - - {/if} - - - + + Manage Tags + {#if pendingChange !== false} + + {:else} + + {/if} + + + -
    +
    - - Search using "type: name". If no tag matches your search, - a button for creating a new one will appear. - + + Search using "type: name". If no tag matches your search, a + button for creating a new one will appear. + -
      - {#each allTagsFiltered as tag} - - +
        + {#each allTagsFiltered as tag} + + - - {#if pendingChange === tag.id} - - {:else if job.tags.find(t => t.id == tag.id)} - - {:else} - - {/if} - - + + {#if pendingChange === tag.id} + + {:else if job.tags.find((t) => t.id == tag.id)} + {:else} - - No tags matching - - {/each} -
      -
      - {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} - - {:else if allTagsFiltered.length == 0} - Search Term is not a valid Tag (type: name) - {/if} - - - - + + {/if} + +
      + {:else} + + No tags matching + + {/each} +
    +
    + {#if newTagType && newTagName && isNewTag(newTagType, newTagName)} + + {:else if allTagsFiltered.length == 0} + Search Term is not a valid Tag (type: name) + {/if} +
    + + +
    + + diff --git a/web/frontend/src/User.root.svelte b/web/frontend/src/User.root.svelte index 3871f60..c60ea20 100644 --- a/web/frontend/src/User.root.svelte +++ b/web/frontend/src/User.root.svelte @@ -1,180 +1,276 @@ - {#if $initq.fetching} - - - - {:else if $initq.error} - - {$initq.error.message} - - {/if} - - - - - - - - { - jobFilters = [...detail.filters, { user: { eq: user.username } }] - selectedCluster = jobFilters[0]?.cluster ? jobFilters[0].cluster.eq : null - jobList.update(jobFilters) - }} /> - - - jobList.refresh()} /> - - -
    - - {#if $stats.error} - - {$stats.error.message} - - {:else if !$stats.data} - - - - {:else} - - - - - - - - {#if user.name} - - - - - {/if} - {#if user.email} - - - - - {/if} - - - - - - - - - - - - - - - - - -
    Username{scrambleNames ? scramble(user.username) : user.username}
    Name{scrambleNames ? scramble(user.name) : user.name}
    Email{user.email}
    Total Jobs{$stats.data.jobsStatistics[0].totalJobs}
    Short Jobs{$stats.data.jobsStatistics[0].shortJobs}
    Total Walltime{$stats.data.jobsStatistics[0].totalWalltime}
    Total Core Hours{$stats.data.jobsStatistics[0].totalCoreHours}
    - -
    - {#key $stats.data.jobsStatistics[0].histDuration} - - {/key} -
    -
    - {#key $stats.data.jobsStatistics[0].histNumNodes} - - {/key} -
    - {/if} -
    -
    - + {#if $initq.fetching} - + + {:else if $initq.error} + + {$initq.error.message} + + {/if} + + + + + + + + + + { + jobFilters = [...detail.filters, { user: { eq: user.username } }]; + selectedCluster = jobFilters[0]?.cluster + ? jobFilters[0].cluster.eq + : null; + jobList.update(jobFilters); + }} + /> + + + jobList.refresh()} /> + + +
    + + {#if $stats.error} + + {$stats.error.message} + + {:else if !$stats.data} + + + + {:else} + + + + + + + + {#if user.name} + + + + + {/if} + {#if user.email} + + + + + {/if} + + + + + + + + + + + + + + + + + +
    Username{scrambleNames ? scramble(user.username) : user.username}
    Name{scrambleNames ? scramble(user.name) : user.name}
    Email{user.email}
    Total Jobs{$stats.data.jobsStatistics[0].totalJobs}
    Short Jobs{$stats.data.jobsStatistics[0].shortJobs}
    Total Walltime{$stats.data.jobsStatistics[0].totalWalltime}
    Total Core Hours{$stats.data.jobsStatistics[0].totalCoreHours}
    + +
    + {#key $stats.data.jobsStatistics[0].histDuration} + + {/key} +
    +
    + {#key $stats.data.jobsStatistics[0].histNumNodes} + + {/key} +
    + {/if} +
    +{#if metricsInHistograms} + + {#if $stats.error} + + {$stats.error.message} + + {:else if !$stats.data} + + + + {:else} + + {#key $stats.data.jobsStatistics[0].histMetrics} + + + + {/key} + + {/if} + +{/if} +
    + + + + - + - \ No newline at end of file + + + diff --git a/web/frontend/src/Zoom.svelte b/web/frontend/src/Zoom.svelte index ae842fc..c5f73c1 100644 --- a/web/frontend/src/Zoom.svelte +++ b/web/frontend/src/Zoom.svelte @@ -1,60 +1,65 @@
    - - - - - - Window Size: - - - ({windowSize}%) - - - - Window Position: - - - + + + + + + Window Size: + + + ({windowSize}%) + + + + Window Position: + + +
    diff --git a/web/frontend/src/config/AdminSettings.svelte b/web/frontend/src/config/AdminSettings.svelte index 97c5b17..26e1d0f 100644 --- a/web/frontend/src/config/AdminSettings.svelte +++ b/web/frontend/src/config/AdminSettings.svelte @@ -1,54 +1,53 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + diff --git a/web/frontend/src/config/PlotSettings.svelte b/web/frontend/src/config/PlotSettings.svelte index 36326bd..20a7f2e 100644 --- a/web/frontend/src/config/PlotSettings.svelte +++ b/web/frontend/src/config/PlotSettings.svelte @@ -1,171 +1,498 @@ - - - -
    handleSettingSubmit('#line-width-form', 'lw')}> - - -
    Line Width
    - - {#if displayMessage && message.target == 'lw'} -
    - - Update: {message.msg} - -
    - {/if} -
    - -
    - - -
    Width of the lines in the timeseries plots.
    + + + + + handleSettingSubmit("#line-width-form", "lw")} + > + + +
    Line Width
    + + {#if displayMessage && message.target == "lw"} +
    + + Update: {message.msg} +
    - - -
    + {/if} + + +
    + + +
    + Width of the lines in the timeseries plots. +
    +
    + + + - - -
    handleSettingSubmit('#plots-per-row-form', 'ppr')}> - - -
    Plots per Row
    - {#if displayMessage && message.target == 'ppr'}
    Update: {message.msg}
    {/if} -
    - -
    - - -
    How many plots to show next to each other on pages such as /monitoring/job/, /monitoring/system/...
    -
    - -
    -
    + + +
    + handleSettingSubmit("#plots-per-row-form", "ppr")} + > + + +
    Plots per Row
    + {#if displayMessage && message.target == "ppr"}
    + Update: {message.msg} +
    {/if} +
    + +
    + + +
    + How many plots to show next to each other on pages such as + /monitoring/job/, /monitoring/system/... +
    +
    + +
    +
    - - -
    handleSettingSubmit('#backgrounds-form', 'bg')}> - - -
    Colored Backgrounds
    - {#if displayMessage && message.target == 'bg'}
    Update: {message.msg}
    {/if} -
    - -
    -
    - {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
    -
    - {#if config.plot_general_colorBackground} - - {:else} - - {/if} - -
    -
    - -
    -
    + + +
    + handleSettingSubmit("#backgrounds-form", "bg")} + > + + +
    Colored Backgrounds
    + {#if displayMessage && message.target == "bg"}
    + Update: {message.msg} +
    {/if} +
    + +
    +
    + {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
    +
    + {#if config.plot_general_colorBackground} + + {:else} + + {/if} + +
    +
    + +
    +
    - - -
    - - -
    Color Scheme for Timeseries Plots
    - {#if displayMessage && message.target == 'cs'}
    Update: {message.msg}
    {/if} -
    - - - - {#each Object.entries(colorschemes) as [name, rgbrow]} - - - - - - {/each} - -
    {name} - {#if rgbrow.join(',') == config.plot_general_colorscheme} - handleSettingSubmit("#colorscheme-form", "cs")}/> - {:else} - handleSettingSubmit("#colorscheme-form", "cs")}/> - {/if} - - {#each rgbrow as rgb} - - {/each} -
    -
    -
    + + +
    + + +
    Color Scheme for Timeseries Plots
    + {#if displayMessage && message.target == "cs"}
    + Update: {message.msg} +
    {/if} +
    + + + + {#each Object.entries(colorschemes) as [name, rgbrow]} + + + + + + {/each} + +
    {name} + {#if rgbrow.join(",") == config.plot_general_colorscheme} + + handleSettingSubmit("#colorscheme-form", "cs")} + /> + {:else} + + handleSettingSubmit("#colorscheme-form", "cs")} + /> + {/if} + + {#each rgbrow as rgb} + + {/each} +
    +
    +
    diff --git a/web/frontend/src/config/admin/AddUser.svelte b/web/frontend/src/config/admin/AddUser.svelte index 2712e17..43f08de 100644 --- a/web/frontend/src/config/admin/AddUser.svelte +++ b/web/frontend/src/config/admin/AddUser.svelte @@ -1,103 +1,156 @@ -
    - Create User -
    - - -
    Must be unique.
    -
    -
    - - -
    Only API users are allowed to have a blank password. Users with a blank password can only authenticate via Tokens.
    -
    -
    - - -
    Only Manager users can have a project. Allows to inspect jobs and users of given project.
    -
    -
    - - -
    Optional, can be blank.
    -
    -
    - - -
    Optional, can be blank.
    -
    + + Create User +
    + + +
    Must be unique.
    +
    +
    + + +
    + Only API users are allowed to have a blank password. Users with a blank + password can only authenticate via Tokens. +
    +
    +
    + + +
    + Only Manager users can have a project. Allows to inspect jobs and users + of given project. +
    +
    +
    + + +
    Optional, can be blank.
    +
    +
    + + +
    Optional, can be blank.
    +
    - -
    -

    Role:

    - {#each roles as role, i} - {#if i == 0} -
    - - -
    - {:else if i == 1} -
    - - -
    - {:else} -
    - - -
    - {/if} - {/each} -
    -

    - - {#if displayMessage}

    {message.msg}
    {/if} -

    -
    +
    +

    Role:

    + {#each roles as role, i} + {#if i == 0} +
    + + +
    + {:else if i == 1} +
    + + +
    + {:else} +
    + + +
    + {/if} + {/each} +
    +

    + + {#if displayMessage}

    + {message.msg} +
    {/if} +

    +
    diff --git a/web/frontend/src/config/admin/EditProject.svelte b/web/frontend/src/config/admin/EditProject.svelte index 857f7db..a4a8d75 100644 --- a/web/frontend/src/config/admin/EditProject.svelte +++ b/web/frontend/src/config/admin/EditProject.svelte @@ -1,97 +1,129 @@ - - Edit Project Managed By User (Manager Only) -
    - - - - - - -
    -

    - {#if displayMessage}Update: {message.msg}{/if} -

    -
    + + Edit Project Managed By User (Manager Only) +
    + + + + + + +
    +

    + {#if displayMessage}Update: {message.msg}{/if} +

    +
    diff --git a/web/frontend/src/config/admin/EditRole.svelte b/web/frontend/src/config/admin/EditRole.svelte index ca14699..f201f38 100644 --- a/web/frontend/src/config/admin/EditRole.svelte +++ b/web/frontend/src/config/admin/EditRole.svelte @@ -1,104 +1,131 @@ - - Edit User Roles -
    - - - - - - -
    -

    - {#if displayMessage}Update: {message.msg}{/if} -

    -
    + + Edit User Roles +
    + + + + + + +
    +

    + {#if displayMessage}Update: {message.msg}{/if} +

    +
    diff --git a/web/frontend/src/config/admin/Options.svelte b/web/frontend/src/config/admin/Options.svelte index 44f9650..8ad3c44 100644 --- a/web/frontend/src/config/admin/Options.svelte +++ b/web/frontend/src/config/admin/Options.svelte @@ -1,29 +1,34 @@ - - Scramble Names / Presentation Mode - - Active? - + + Scramble Names / Presentation Mode + + Active? + diff --git a/web/frontend/src/config/admin/ShowUsers.svelte b/web/frontend/src/config/admin/ShowUsers.svelte index 439bebb..be9b146 100644 --- a/web/frontend/src/config/admin/ShowUsers.svelte +++ b/web/frontend/src/config/admin/ShowUsers.svelte @@ -1,68 +1,87 @@ - - Special Users -

    - Not created by an LDAP sync and/or having a role other than user - -

    -
    - - - - - - - - - - - - - - {#each userList as user} - - - - - {:else} - - - - {/each} - -
    UsernameNameProject(s)EmailRolesJWTDelete
    -
    Loading...
    -
    -
    -
    + + Special Users +

    + Not created by an LDAP sync and/or having a role other than user + +

    +
    + + + + + + + + + + + + + + {#each userList as user} + + + + + {:else} + + + + {/each} + +
    UsernameNameProject(s)EmailRolesJWTDelete
    +
    + Loading... +
    +
    +
    +
    diff --git a/web/frontend/src/config/admin/ShowUsersRow.svelte b/web/frontend/src/config/admin/ShowUsersRow.svelte index 34b2240..9845241 100644 --- a/web/frontend/src/config/admin/ShowUsersRow.svelte +++ b/web/frontend/src/config/admin/ShowUsersRow.svelte @@ -1,28 +1,32 @@ {user.username} {user.name} {user.projects} {user.email} -{user.roles.join(', ')} +{user.roles.join(", ")} - {#if ! jwt} - - {:else} - - {/if} + {#if !jwt} + + {:else} + + {/if} diff --git a/web/frontend/src/filters/Cluster.svelte b/web/frontend/src/filters/Cluster.svelte index 2740b74..9c82321 100644 --- a/web/frontend/src/filters/Cluster.svelte +++ b/web/frontend/src/filters/Cluster.svelte @@ -1,77 +1,95 @@ - (isOpen = !isOpen)}> - - Select Cluster & Slurm Partition - - - {#if $initialized} -

    Cluster

    - - (pendingCluster = null, pendingPartition = null)}> - Any Cluster - - {#each clusters as cluster} - (pendingCluster = cluster.name, pendingPartition = null)}> - {cluster.name} - - {/each} - - {/if} - {#if $initialized && pendingCluster != null} -
    -

    Partiton

    - - (pendingPartition = null)}> - Any Partition - - {#each clusters.find(c => c.name == pendingCluster).partitions as partition} - (pendingPartition = partition)}> - {partition} - - {/each} - - {/if} -
    - - - - - + (isOpen = !isOpen)}> + Select Cluster & Slurm Partition + + {#if $initialized} +

    Cluster

    + + ((pendingCluster = null), (pendingPartition = null))} + > + Any Cluster + + {#each clusters as cluster} + ( + (pendingCluster = cluster.name), (pendingPartition = null) + )} + > + {cluster.name} + + {/each} + + {/if} + {#if $initialized && pendingCluster != null} +
    +

    Partiton

    + + (pendingPartition = null)} + > + Any Partition + + {#each clusters.find((c) => c.name == pendingCluster).partitions as partition} + (pendingPartition = partition)} + > + {partition} + + {/each} + + {/if} +
    + + + + +
    diff --git a/web/frontend/src/filters/Duration.svelte b/web/frontend/src/filters/Duration.svelte index b482b9c..132ce05 100644 --- a/web/frontend/src/filters/Duration.svelte +++ b/web/frontend/src/filters/Duration.svelte @@ -1,95 +1,244 @@ - (isOpen = !isOpen)}> - - Select Start Time - - -

    Between

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -

    and

    - - -
    - -
    -
    h
    -
    -
    - - -
    - -
    -
    m
    -
    -
    - -
    -
    - - - - - + (isOpen = !isOpen)}> + Select Job Duration + +

    Duration more than

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    + +

    Duration less than

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    + +

    Duration between

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +

    and

    + + +
    + +
    +
    h
    +
    +
    + + +
    + +
    +
    m
    +
    +
    + +
    +
    + + + + + +
    diff --git a/web/frontend/src/filters/Filters.svelte b/web/frontend/src/filters/Filters.svelte index 38d7e7a..8e7a8ef 100644 --- a/web/frontend/src/filters/Filters.svelte +++ b/web/frontend/src/filters/Filters.svelte @@ -10,351 +10,418 @@ - void update(additionalFilters: Object?): Triggers an update --> - - - - - Filters - - - - Manage Filters - - {#if menuText} - {menuText} - - {/if} - (isClusterOpen = true)}> - Cluster/Partition - - (isJobStatesOpen = true)}> - Job States - - (isStartTimeOpen = true)}> - Start Time - - (isDurationOpen = true)}> - Duration - - (isTagsOpen = true)}> - Tags - - (isResourcesOpen = true)}> - Resources - - (isStatsOpen = true)}> - (isStatsOpen = true)}/> Statistics - - {#if startTimeQuickSelect} - - Start Time Qick Selection - {#each [ - { text: 'Last 6hrs', url: 'last6h', seconds: 6*60*60 }, - // { text: 'Last 12hrs', seconds: 12*60*60 }, - { text: 'Last 24hrs', url: 'last24h', seconds: 24*60*60 }, - // { text: 'Last 48hrs', seconds: 48*60*60 }, - { text: 'Last 7 days', url: 'last7d', seconds: 7*24*60*60 }, - { text: 'Last 30 days', url: 'last30d', seconds: 30*24*60*60 } - ] as {text, url, seconds}} - { - filters.startTime.from = (new Date(Date.now() - seconds * 1000)).toISOString() - filters.startTime.to = (new Date(Date.now())).toISOString() - filters.startTime.text = text, - filters.startTime.url = url - update() - }}> - {text} - - {/each} - {/if} - - - - - {#if filters.cluster} - (isClusterOpen = true)}> - {filters.cluster} - {#if filters.partition} - ({filters.partition}) - {/if} - + + + + + Filters + + + Manage Filters + {#if menuText} + {menuText} + {/if} + (isClusterOpen = true)}> + Cluster/Partition + + (isJobStatesOpen = true)}> + Job States + + (isStartTimeOpen = true)}> + Start Time + + (isDurationOpen = true)}> + Duration + + (isTagsOpen = true)}> + Tags + + (isResourcesOpen = true)}> + Resources + + (isStatsOpen = true)}> + (isStatsOpen = true)} /> Statistics + + {#if startTimeQuickSelect} + + Start Time Qick Selection + {#each [{ text: "Last 6hrs", url: "last6h", seconds: 6 * 60 * 60 }, { text: "Last 24hrs", url: "last24h", seconds: 24 * 60 * 60 }, { text: "Last 7 days", url: "last7d", seconds: 7 * 24 * 60 * 60 }, { text: "Last 30 days", url: "last30d", seconds: 30 * 24 * 60 * 60 }] as { text, url, seconds }} + { + filters.startTime.from = new Date( + Date.now() - seconds * 1000, + ).toISOString(); + filters.startTime.to = new Date(Date.now()).toISOString(); + (filters.startTime.text = text), (filters.startTime.url = url); + update(); + }} + > + + {text} + + {/each} + {/if} + + + + + {#if filters.cluster} + (isClusterOpen = true)}> + {filters.cluster} + {#if filters.partition} + ({filters.partition}) + {/if} + + {/if} - {#if filters.states.length != allJobStates.length} - (isJobStatesOpen = true)}> - {filters.states.join(', ')} - - {/if} + {#if filters.states.length != allJobStates.length} + (isJobStatesOpen = true)}> + {filters.states.join(", ")} + + {/if} - {#if filters.startTime.from || filters.startTime.to} - (isStartTimeOpen = true)}> - {#if filters.startTime.text} - {filters.startTime.text} - {:else} - {new Date(filters.startTime.from).toLocaleString()} - {new Date(filters.startTime.to).toLocaleString()} - {/if} - + {#if filters.startTime.from || filters.startTime.to} + (isStartTimeOpen = true)}> + {#if filters.startTime.text} + {filters.startTime.text} + {:else} + {new Date(filters.startTime.from).toLocaleString()} - {new Date( + filters.startTime.to, + ).toLocaleString()} {/if} + + {/if} - {#if filters.duration.from || filters.duration.to} - (isDurationOpen = true)}> - {Math.floor(filters.duration.from / 3600)}h:{Math.floor(filters.duration.from % 3600 / 60)}m - - - {Math.floor(filters.duration.to / 3600)}h:{Math.floor(filters.duration.to % 3600 / 60)}m - - {/if} + {#if filters.duration.from || filters.duration.to} + (isDurationOpen = true)}> + {Math.floor(filters.duration.from / 3600)}h:{Math.floor( + (filters.duration.from % 3600) / 60, + )}m - + {Math.floor(filters.duration.to / 3600)}h:{Math.floor( + (filters.duration.to % 3600) / 60, + )}m + + {/if} - {#if filters.tags.length != 0} - (isTagsOpen = true)}> - {#each filters.tags as tagId} - - {/each} - - {/if} + {#if filters.duration.lessThan} + (isDurationOpen = true)}> + Duration less than {Math.floor( + filters.duration.lessThan / 3600, + )}h:{Math.floor((filters.duration.lessThan % 3600) / 60)}m + + {/if} - {#if filters.numNodes.from != null || filters.numNodes.to != null || - filters.numHWThreads.from != null || filters.numHWThreads.to != null || - filters.numAccelerators.from != null || filters.numAccelerators.to != null } - (isResourcesOpen = true)}> - {#if isNodesModified } Nodes: {filters.numNodes.from} - {filters.numNodes.to} {/if} - {#if isNodesModified && isHwthreadsModified }, {/if} - {#if isHwthreadsModified } HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to} {/if} - {#if (isNodesModified || isHwthreadsModified) && isAccsModified }, {/if} - {#if isAccsModified } Accelerators: {filters.numAccelerators.from} - {filters.numAccelerators.to} {/if} - - {/if} + {#if filters.duration.moreThan} + (isDurationOpen = true)}> + Duration more than {Math.floor( + filters.duration.moreThan / 3600, + )}h:{Math.floor((filters.duration.moreThan % 3600) / 60)}m + + {/if} - {#if filters.node != null } - (isResourcesOpen = true)}> - Node: {filters.node} - - {/if} + {#if filters.tags.length != 0} + (isTagsOpen = true)}> + {#each filters.tags as tagId} + + {/each} + + {/if} - {#if filters.stats.length > 0} - (isStatsOpen = true)}> - {filters.stats.map(stat => `${stat.text}: ${stat.from} - ${stat.to}`).join(', ')} - + {#if filters.numNodes.from != null || filters.numNodes.to != null || filters.numHWThreads.from != null || filters.numHWThreads.to != null || filters.numAccelerators.from != null || filters.numAccelerators.to != null} + (isResourcesOpen = true)}> + {#if isNodesModified} + Nodes: {filters.numNodes.from} - {filters.numNodes.to} {/if} - + {#if isNodesModified && isHwthreadsModified}, + {/if} + {#if isHwthreadsModified} + HWThreads: {filters.numHWThreads.from} - {filters.numHWThreads.to} + {/if} + {#if (isNodesModified || isHwthreadsModified) && isAccsModified}, + {/if} + {#if isAccsModified} + Accelerators: {filters.numAccelerators.from} - {filters + .numAccelerators.to} + {/if} + + {/if} + + {#if filters.node != null} + (isResourcesOpen = true)}> + Node: {filters.node} + + {/if} + + {#if filters.stats.length > 0} + (isStatsOpen = true)}> + {filters.stats + .map((stat) => `${stat.text}: ${stat.from} - ${stat.to}`) + .join(", ")} + + {/if} + update()} /> + {disableClusterSelection} + bind:isOpen={isClusterOpen} + bind:cluster={filters.cluster} + bind:partition={filters.partition} + on:update={() => update()} +/> update()} /> + bind:isOpen={isJobStatesOpen} + bind:states={filters.states} + on:update={() => update()} +/> { - delete filters.startTime['text'] - delete filters.startTime['url'] - update() - }} /> + bind:isOpen={isStartTimeOpen} + bind:from={filters.startTime.from} + bind:to={filters.startTime.to} + on:update={() => { + delete filters.startTime["text"]; + delete filters.startTime["url"]; + update(); + }} +/> update()} /> + bind:isOpen={isDurationOpen} + bind:lessThan={filters.duration.lessThan} + bind:moreThan={filters.duration.moreThan} + bind:from={filters.duration.from} + bind:to={filters.duration.to} + on:update={() => update()} +/> update()} /> + bind:isOpen={isTagsOpen} + bind:tags={filters.tags} + on:update={() => update()} +/> - update()} /> + update()} +/> - update()} /> + update()} +/> diff --git a/web/frontend/src/filters/InfoBox.svelte b/web/frontend/src/filters/InfoBox.svelte index 58fc8a5..8fe75ab 100644 --- a/web/frontend/src/filters/InfoBox.svelte +++ b/web/frontend/src/filters/InfoBox.svelte @@ -1,11 +1,11 @@ - diff --git a/web/frontend/src/filters/JobStates.svelte b/web/frontend/src/filters/JobStates.svelte index 4e5db2e..e22144f 100644 --- a/web/frontend/src/filters/JobStates.svelte +++ b/web/frontend/src/filters/JobStates.svelte @@ -1,47 +1,76 @@ + - (isOpen = !isOpen)}> - - Select Job States - - - - {#each allJobStates as state} - - - {state} - - {/each} - - - - - - - + (isOpen = !isOpen)}> + Select Job States + + + {#each allJobStates as state} + + + {state} + + {/each} + + + + + + + diff --git a/web/frontend/src/filters/Resources.svelte b/web/frontend/src/filters/Resources.svelte index be5995a..01f1c57 100644 --- a/web/frontend/src/filters/Resources.svelte +++ b/web/frontend/src/filters/Resources.svelte @@ -1,145 +1,242 @@ - (isOpen = !isOpen)}> - - Select number of utilized Resources - - -
    Named Node
    - -
    Number of Nodes
    - { - pendingNumNodes = { from: detail[0], to: detail[1] } - isNodesModified = true - }} - min={minNumNodes} max={maxNumNodes} - firstSlider={pendingNumNodes.from} secondSlider={pendingNumNodes.to} - inputFieldFrom={pendingNumNodes.from} inputFieldTo={pendingNumNodes.to}/> -
    Number of HWThreads (Use for Single-Node Jobs)
    - { - pendingNumHWThreads = { from: detail[0], to: detail[1] } - isHwthreadsModified = true - }} - min={minNumHWThreads} max={maxNumHWThreads} - firstSlider={pendingNumHWThreads.from} secondSlider={pendingNumHWThreads.to} - inputFieldFrom={pendingNumHWThreads.from} inputFieldTo={pendingNumHWThreads.to}/> - {#if maxNumAccelerators != null && maxNumAccelerators > 1} -
    Number of Accelerators
    - { - pendingNumAccelerators = { from: detail[0], to: detail[1] } - isAccsModified = true - }} - min={minNumAccelerators} max={maxNumAccelerators} - firstSlider={pendingNumAccelerators.from} secondSlider={pendingNumAccelerators.to} - inputFieldFrom={pendingNumAccelerators.from} inputFieldTo={pendingNumAccelerators.to}/> - {/if} -
    - - - - - + (isOpen = !isOpen)}> + Select number of utilized Resources + +
    Named Node
    + +
    Number of Nodes
    + { + pendingNumNodes = { from: detail[0], to: detail[1] }; + isNodesModified = true; + }} + min={minNumNodes} + max={maxNumNodes} + firstSlider={pendingNumNodes.from} + secondSlider={pendingNumNodes.to} + inputFieldFrom={pendingNumNodes.from} + inputFieldTo={pendingNumNodes.to} + /> +
    + Number of HWThreads (Use for Single-Node Jobs) +
    + { + pendingNumHWThreads = { from: detail[0], to: detail[1] }; + isHwthreadsModified = true; + }} + min={minNumHWThreads} + max={maxNumHWThreads} + firstSlider={pendingNumHWThreads.from} + secondSlider={pendingNumHWThreads.to} + inputFieldFrom={pendingNumHWThreads.from} + inputFieldTo={pendingNumHWThreads.to} + /> + {#if maxNumAccelerators != null && maxNumAccelerators > 1} +
    Number of Accelerators
    + { + pendingNumAccelerators = { from: detail[0], to: detail[1] }; + isAccsModified = true; + }} + min={minNumAccelerators} + max={maxNumAccelerators} + firstSlider={pendingNumAccelerators.from} + secondSlider={pendingNumAccelerators.to} + inputFieldFrom={pendingNumAccelerators.from} + inputFieldTo={pendingNumAccelerators.to} + /> + {/if} +
    + + + + +
    diff --git a/web/frontend/src/filters/StartTime.svelte b/web/frontend/src/filters/StartTime.svelte index c89851d..1759b6e 100644 --- a/web/frontend/src/filters/StartTime.svelte +++ b/web/frontend/src/filters/StartTime.svelte @@ -1,90 +1,121 @@ - (isOpen = !isOpen)}> - - Select Start Time - - -

    From

    - - - - - - - - -

    To

    - - - - - - - - -
    - - - - - + (isOpen = !isOpen)}> + Select Start Time + +

    From

    + + + + + + + + +

    To

    + + + + + + + + +
    + + + + +
    diff --git a/web/frontend/src/filters/Stats.svelte b/web/frontend/src/filters/Stats.svelte index cf559da..ee80a4b 100644 --- a/web/frontend/src/filters/Stats.svelte +++ b/web/frontend/src/filters/Stats.svelte @@ -1,115 +1,137 @@ - (isOpen = !isOpen)}> - - Filter based on statistics (of non-running jobs) - - - {#each statistics as stat} -

    {stat.text}

    - (stat.from = detail[0], stat.to = detail[1], stat.enabled = true)} - min={0} max={stat.peak} - firstSlider={stat.from} secondSlider={stat.to} - inputFieldFrom={stat.from} inputFieldTo={stat.to}/> - {/each} -
    - - - - - + (isOpen = !isOpen)}> + Filter based on statistics (of non-running jobs) + + {#each statistics as stat} +

    {stat.text}

    + ( + (stat.from = detail[0]), (stat.to = detail[1]), (stat.enabled = true) + )} + min={0} + max={stat.peak} + firstSlider={stat.from} + secondSlider={stat.to} + inputFieldFrom={stat.from} + inputFieldTo={stat.to} + /> + {/each} +
    + + + + +
    diff --git a/web/frontend/src/filters/Tags.svelte b/web/frontend/src/filters/Tags.svelte index b5a145a..06153ed 100644 --- a/web/frontend/src/filters/Tags.svelte +++ b/web/frontend/src/filters/Tags.svelte @@ -1,67 +1,89 @@ - (isOpen = !isOpen)}> - - Select Tags - - - -
    - - {#if $initialized} - {#each fuzzySearchTags(searchTerm, allTags) as tag (tag)} - - {#if pendingTags.includes(tag.id)} - - {:else} - - {/if} - - - - {:else} - No Tags - {/each} + (isOpen = !isOpen)}> + Select Tags + + +
    + + {#if $initialized} + {#each fuzzySearchTags(searchTerm, allTags) as tag (tag)} + + {#if pendingTags.includes(tag.id)} + + {:else} + {/if} - -
    - - - - - + + + + {:else} + No Tags + {/each} + {/if} +
    +
    + + + + +
    diff --git a/web/frontend/src/filters/TimeSelection.svelte b/web/frontend/src/filters/TimeSelection.svelte index c715b9c..f9c230b 100644 --- a/web/frontend/src/filters/TimeSelection.svelte +++ b/web/frontend/src/filters/TimeSelection.svelte @@ -1,81 +1,96 @@ - - - - {#if timeRange == -1} - from - updateExplicitTimeRange('from', event)}> - to - updateExplicitTimeRange('to', event)}> + + {#if timeRange == -1} + from + updateExplicitTimeRange("from", event)} + > + to + updateExplicitTimeRange("to", event)} + > + {/if} diff --git a/web/frontend/src/filters/UserOrProject.svelte b/web/frontend/src/filters/UserOrProject.svelte index 8235863..983192c 100644 --- a/web/frontend/src/filters/UserOrProject.svelte +++ b/web/frontend/src/filters/UserOrProject.svelte @@ -1,75 +1,84 @@ {#if authlevel >= roles.manager} - - - termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)} - placeholder={mode == 'user' ? 'filter username...' : 'filter project...'} /> - + + + termChanged()} + on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} + placeholder={mode == "user" ? "filter username..." : "filter project..."} + /> + {:else} - - - termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)} placeholder='filter project...' - /> - + + + termChanged()} + on:keyup={(event) => termChanged(event.key == "Enter" ? 0 : throttle)} + placeholder="filter project..." + /> + {/if} diff --git a/web/frontend/src/joblist/JobInfo.svelte b/web/frontend/src/joblist/JobInfo.svelte index 83841c6..a30d058 100644 --- a/web/frontend/src/joblist/JobInfo.svelte +++ b/web/frontend/src/joblist/JobInfo.svelte @@ -6,109 +6,138 @@ - jobTags: Defaults to job.tags, usefull for dynamically updating the tags. --> +
    -

    - {job.jobId} ({job.cluster}) - {#if job.metaData?.jobName} -
    - {#if job.metaData?.jobName.length <= 25} -

    {job.metaData.jobName}
    - {:else} -
    {job.metaData.jobName}
    - {/if} - {/if} - {#if job.arrayJobId} - Array Job: #{job.arrayJobId} - {/if} -

    +

    + {job.jobId} + ({job.cluster}) + {#if job.metaData?.jobName} +
    + {#if job.metaData?.jobName.length <= 25} +

    {job.metaData.jobName}
    + {:else} +
    + {job.metaData.jobName} +
    + {/if} + {/if} + {#if job.arrayJobId} + Array Job: #{job.arrayJobId} + {/if} +

    -

    - - - {scrambleNames ? scramble(job.user) : job.user} - - {#if job.userData && job.userData.name} - ({scrambleNames ? scramble(job.userData.name) : job.userData.name}) - {/if} - {#if job.project && job.project != 'no project'} -
    - - - {scrambleNames ? scramble(job.project) : job.project} - - {/if} -

    +

    + + + {scrambleNames ? scramble(job.user) : job.user} + + {#if job.userData && job.userData.name} + ({scrambleNames ? scramble(job.userData.name) : job.userData.name}) + {/if} + {#if job.project && job.project != "no project"} +
    + + + {scrambleNames ? scramble(job.project) : job.project} + + {/if} +

    -

    - {#if job.numNodes == 1} - {job.resources[0].hostname} - {:else} - {job.numNodes} - {/if} - - {#if job.exclusive != 1} - (shared) - {/if} - {#if job.numAcc > 0} - , {job.numAcc} - {/if} - {#if job.numHWThreads > 0} - , {job.numHWThreads} - {/if} -
    - {job.subCluster} -

    +

    + {#if job.numNodes == 1} + {job.resources[0].hostname} + {:else} + {job.numNodes} + {/if} + + {#if job.exclusive != 1} + (shared) + {/if} + {#if job.numAcc > 0} + , {job.numAcc} + {/if} + {#if job.numHWThreads > 0} + , {job.numHWThreads} + {/if} +
    + {job.subCluster} +

    -

    - Start: {(new Date(job.startTime)).toLocaleString()} -
    - Duration: {formatDuration(job.duration)} - {#if job.state == 'running'} - running - {:else if job.state != 'completed'} - {job.state} - {/if} - {#if job.walltime} -
    - Walltime: {formatDuration(job.walltime)} - {/if} -

    +

    + Start: {new Date(job.startTime).toLocaleString()} +
    + Duration: {formatDuration(job.duration)} + {job.state} + {#if job.walltime} +
    + Walltime: {formatDuration(job.walltime)} + {/if} +

    -

    - {#each jobTags as tag} - - {/each} -

    +

    + {#each jobTags as tag} + + {/each} +

    diff --git a/web/frontend/src/joblist/JobList.svelte b/web/frontend/src/joblist/JobList.svelte index 02caf3f..39a3010 100644 --- a/web/frontend/src/joblist/JobList.svelte +++ b/web/frontend/src/joblist/JobList.svelte @@ -9,264 +9,308 @@ - update(filters?: [JobFilter]) --> -
    - - - - - {#each metrics as metric (metric)} - - {/each} - - - - {#if $jobs.error} - - - - {:else if $jobs.fetching || !$jobs.data} - - - - {:else if $jobs.data && $initialized} - {#each $jobs.data.jobs.items as job (job)} - - {:else} - - - - {/each} - {/if} - -
    - Job Info - - {metric} - {#if $initialized} - ({clusters - .map((cluster) => - cluster.metricConfig.find( - (m) => m.name == metric - ) - ) - .filter((m) => m != null) - .map( - (m) => - (m.unit?.prefix - ? m.unit?.prefix - : "") + - (m.unit?.base ? m.unit?.base : "") - ) // Build unitStr - .reduce( - (arr, unitStr) => - arr.includes(unitStr) - ? arr - : [...arr, unitStr], - [] - ) // w/o this, output would be [unitStr, unitStr] - .join(", ")}) - {/if} -
    -

    {$jobs.error.message}

    -
    - -
    - No jobs found -
    -
    +
    + + + + + {#if showFootprint} + + {/if} + {#each metrics as metric (metric)} + + {/each} + + + + {#if $jobsStore.error} + + + + {:else} + {#each jobs as job (job)} + + {:else} + + + + {/each} + {/if} + {#if $jobsStore.fetching || !$jobsStore.data} + + + + {/if} + +
    + Job Info + + Job Footprint + + {metric} + {#if $initialized} + ({clusters + .map((cluster) => + cluster.metricConfig.find((m) => m.name == metric), + ) + .filter((m) => m != null) + .map( + (m) => + (m.unit?.prefix ? m.unit?.prefix : "") + + (m.unit?.base ? m.unit?.base : ""), + ) // Build unitStr + .reduce( + (arr, unitStr) => + arr.includes(unitStr) ? arr : [...arr, unitStr], + [], + ) // w/o this, output would be [unitStr, unitStr] + .join(", ")}) + {/if} +
    +

    {$jobsStore.error.message}

    +
    No jobs found
    +
    + +
    +
    +
    - { - if (detail.itemsPerPage != itemsPerPage) { - updateConfiguration( - detail.itemsPerPage.toString(), - detail.page - ) - } else { - paging = { itemsPerPage: detail.itemsPerPage, page: detail.page } - } + if (detail.itemsPerPage != itemsPerPage) { + updateConfiguration(detail.itemsPerPage.toString(), detail.page); + } else { + jobs = [] + paging = { itemsPerPage: detail.itemsPerPage, page: detail.page }; + } }} -/> + /> +{/if} diff --git a/web/frontend/src/joblist/Refresher.svelte b/web/frontend/src/joblist/Refresher.svelte index 2587711..635ffbe 100644 --- a/web/frontend/src/joblist/Refresher.svelte +++ b/web/frontend/src/joblist/Refresher.svelte @@ -5,39 +5,46 @@ - 'reload': When fired, the parent component shoud refresh its contents --> - - - \ No newline at end of file + + + + diff --git a/web/frontend/src/joblist/Row.svelte b/web/frontend/src/joblist/Row.svelte index 2117b91..98d3190 100644 --- a/web/frontend/src/joblist/Row.svelte +++ b/web/frontend/src/joblist/Row.svelte @@ -9,141 +9,198 @@ --> - - + + + + {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} + + Not monitored or archiving failed - {#if job.monitoringStatus == 0 || job.monitoringStatus == 2} - - Not monitored or archiving failed - - {:else if $metricsQuery.fetching} - - - - {:else if $metricsQuery.error} - - - {$metricsQuery.error.message.length > 500 - ? $metricsQuery.error.message.substring(0, 499) + "..." - : $metricsQuery.error.message} - - - {:else} - {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} - - - {#if metric.disabled == false && metric.data} - - {:else if metric.disabled == true && metric.data} - Metric disabled for subcluster {metric.data.name}:{job.subCluster} - {:else} - No dataset returned - {/if} - - {/each} + {:else if $metricsQuery.fetching} + + + + {:else if $metricsQuery.error} + + + {$metricsQuery.error.message.length > 500 + ? $metricsQuery.error.message.substring(0, 499) + "..." + : $metricsQuery.error.message} + + + {:else} + {#if showFootprint} + + + {/if} + {#each sortAndSelectScope($metricsQuery.data.jobMetrics) as metric, i (metric || i)} + + + {#if metric.disabled == false && metric.data} + + {:else if metric.disabled == true && metric.data} + Metric disabled for subcluster {metric.data.name}:{job.subCluster} + {:else} + No dataset returned + {/if} + + {/each} + {/if} diff --git a/web/frontend/src/joblist/SortSelection.svelte b/web/frontend/src/joblist/SortSelection.svelte index 5941964..2cc8615 100644 --- a/web/frontend/src/joblist/SortSelection.svelte +++ b/web/frontend/src/joblist/SortSelection.svelte @@ -7,65 +7,94 @@ --> - { isOpen = !isOpen }}> - - Sort rows - - - - {#each sortableColumns as col, i (col)} - - + sortableColumns[i] = { ...sortableColumns[i] }; + activeColumnIdx = i; + sortableColumns = [...sortableColumns]; + sorting = { field: col.field, order: col.order }; + }} + > + + - {col.text} - - {/each} - - - - - + {col.text} + + {/each} + + + + + \ No newline at end of file + .sort { + border: none; + margin: 0; + padding: 0; + background: 0 0; + transition: all 70ms; + } + + diff --git a/web/frontend/src/plots/Histogram.svelte b/web/frontend/src/plots/Histogram.svelte index d3e1aaa..8300384 100644 --- a/web/frontend/src/plots/Histogram.svelte +++ b/web/frontend/src/plots/Histogram.svelte @@ -5,212 +5,222 @@ --> {#if data.length > 0} -
    +
    {:else} - Cannot render histogram: No data! + Cannot render histogram: No data! {/if} - - diff --git a/web/frontend/src/plots/MetricPlot.svelte b/web/frontend/src/plots/MetricPlot.svelte index 17eec5f..bd44675 100644 --- a/web/frontend/src/plots/MetricPlot.svelte +++ b/web/frontend/src/plots/MetricPlot.svelte @@ -1,3 +1,122 @@ + + - {#if series[0].data.length > 0} -
    +
    {:else} - Cannot render plot: No series data returned for {metric} + Cannot render plot: No series data returned for {metric} {/if} diff --git a/web/frontend/src/plots/Roofline.svelte b/web/frontend/src/plots/Roofline.svelte index d79f86d..1e47f6f 100644 --- a/web/frontend/src/plots/Roofline.svelte +++ b/web/frontend/src/plots/Roofline.svelte @@ -1,253 +1,339 @@ {#if data != null} -
    +
    {:else} - Cannot render roofline: No data! -{/if} \ No newline at end of file + Cannot render roofline: No data! +{/if} + diff --git a/web/frontend/src/utils.js b/web/frontend/src/utils.js index da1878a..5346208 100644 --- a/web/frontend/src/utils.js +++ b/web/frontend/src/utils.js @@ -316,11 +316,17 @@ export function checkMetricDisabled(m, c, s) { //[m]etric, [c]luster, [s]ubclust } export function convert2uplot(canvasData) { - // initial use: Canvas Histogram Data to Uplot + // Prep: Uplot Data Structure let uplotData = [[],[]] // [X, Y1, Y2, ...] - canvasData.forEach( pair => { - uplotData[0].push(pair.value) - uplotData[1].push(pair.count) + // Iterate + canvasData.forEach( cd => { + if (Object.keys(cd).length == 4) { // MetricHisto Datafromat + uplotData[0].push(cd?.max ? cd.max : 0) + uplotData[1].push(cd.count) + } else { // Default + uplotData[0].push(cd.value) + uplotData[1].push(cd.count) + } }) return uplotData } diff --git a/web/templates/login.tmpl b/web/templates/login.tmpl index 304a96f..f10e064 100644 --- a/web/templates/login.tmpl +++ b/web/templates/login.tmpl @@ -38,6 +38,9 @@
    + {{- if .Infos.hasOpenIDConnect}} + OpenID Connect Login + {{end}}
    diff --git a/web/web.go b/web/web.go index 8d4ce4b..99008b5 100644 --- a/web/web.go +++ b/web/web.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg. +// Copyright (C) NHR@FAU, University Erlangen-Nuremberg. // All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file.