mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-11-10 08:57:25 +01:00
Merge branch 'master' of github.com:ClusterCockpit/cc-backend into 135-batch-scheduler-integration
This commit is contained in:
commit
c0f9eb7869
1
.gitignore
vendored
1
.gitignore
vendored
@ -16,3 +16,4 @@ var/job.db-shm
|
||||
var/job.db-wal
|
||||
|
||||
dist/
|
||||
*.db
|
||||
|
@ -1,4 +1,3 @@
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
@ -7,24 +6,60 @@ builds:
|
||||
- CGO_ENABLED=1
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
goamd64:
|
||||
- v3
|
||||
goarm:
|
||||
- "7"
|
||||
id: "cc-backend"
|
||||
binary: cc-backend
|
||||
main: ./cmd/cc-backend
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- -X main.commit={{.Commit}} -X main.date={{.Date}}
|
||||
- -linkmode external -extldflags -static
|
||||
tags:
|
||||
- static_build
|
||||
hooks:
|
||||
pre: make frontend
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goamd64:
|
||||
- v3
|
||||
id: "archive-manager"
|
||||
binary: archive-manager
|
||||
main: ./tools/archive-manager
|
||||
tags:
|
||||
- static_build
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goamd64:
|
||||
- v3
|
||||
id: "archive-migration"
|
||||
binary: archive-migration
|
||||
main: ./tools/archive-migration
|
||||
tags:
|
||||
- static_build
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goamd64:
|
||||
- v3
|
||||
id: "gen-keypair"
|
||||
binary: gen-keypair
|
||||
main: ./tools/gen-keypair
|
||||
tags:
|
||||
- static_build
|
||||
archives:
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
@ -41,13 +76,11 @@ snapshot:
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^test:"
|
||||
- "^chore"
|
||||
- "merge conflict"
|
||||
- Merge pull request
|
||||
- Merge remote-tracking branch
|
||||
- Merge branch
|
||||
include:
|
||||
- "^feat:"
|
||||
- "^fix:"
|
||||
- "^sec:"
|
||||
- "^docs:"
|
||||
groups:
|
||||
- title: "Dependency updates"
|
||||
regexp: '^.*?(feat|fix)\(deps\)!?:.+$'
|
||||
@ -64,11 +97,9 @@ changelog:
|
||||
- title: "Documentation updates"
|
||||
regexp: ^.*?doc(\([[:word:]]+\))??!?:.+$
|
||||
order: 400
|
||||
- title: Other work
|
||||
order: 9999
|
||||
release:
|
||||
draft: true
|
||||
draft: false
|
||||
footer: |
|
||||
Supports job archive version 1 and database version 4.
|
||||
Supports job archive version 1 and database version 6.
|
||||
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
4
Makefile
4
Makefile
@ -2,7 +2,7 @@ TARGET = ./cc-backend
|
||||
VAR = ./var
|
||||
CFG = config.json .env
|
||||
FRONTEND = ./web/frontend
|
||||
VERSION = 1.0.0
|
||||
VERSION = 1.1.0
|
||||
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
||||
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
|
||||
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'
|
||||
@ -28,7 +28,7 @@ SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \
|
||||
$(wildcard $(FRONTEND)/src/plots/*.svelte) \
|
||||
$(wildcard $(FRONTEND)/src/joblist/*.svelte)
|
||||
|
||||
.PHONY: clean test tags frontend $(TARGET)
|
||||
.PHONY: clean distclean test tags frontend $(TARGET)
|
||||
|
||||
.NOTPARALLEL:
|
||||
|
||||
|
213
README.md
213
README.md
@ -2,25 +2,32 @@
|
||||
|
||||
[![Build](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml)
|
||||
|
||||
This is a Golang backend implementation for a REST and GraphQL API according to the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications).
|
||||
It also includes a web interface for ClusterCockpit.
|
||||
While there is a backend for the InfluxDB timeseries database, the only tested and supported setup is using cc-metric-store as a mtric data backend.
|
||||
We will add documentation how to integrate ClusterCockpit with other timeseries databases in the future.
|
||||
This implementation replaces the previous PHP Symfony based ClusterCockpit web-interface.
|
||||
[Here](https://github.com/ClusterCockpit/ClusterCockpit/wiki/Why-we-switched-from-PHP-Symfony-to-a-Golang-based-solution) is a discussion of the reasons why we switched from PHP Symfony to a Golang based solution.
|
||||
This is a Golang backend implementation for a REST and GraphQL API according to
|
||||
the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications). It also
|
||||
includes a web interface for ClusterCockpit. This implementation replaces the
|
||||
previous PHP Symfony based ClusterCockpit web interface. The reasons for
|
||||
switching from PHP Symfony to a Golang based solution are explained
|
||||
[here](https://github.com/ClusterCockpit/ClusterCockpit/wiki/Why-we-switched-from-PHP-Symfony-to-a-Golang-based-solution).
|
||||
|
||||
## Overview
|
||||
|
||||
This is a golang web backend for the ClusterCockpit job-specific performance monitoring framework.
|
||||
It provides a REST API for integrating ClusterCockpit with a HPC cluster batch system and external analysis scripts.
|
||||
Data exchange between the web frontend and backend is based on a GraphQL API.
|
||||
|
||||
This is a Golang web backend for the ClusterCockpit job-specific performance monitoring framework.
|
||||
It provides a REST API for integrating ClusterCockpit with an HPC cluster batch system and external analysis scripts.
|
||||
Data exchange between the web front-end and the back-end is based on a GraphQL API.
|
||||
The web frontend is also served by the backend using [Svelte](https://svelte.dev/) components.
|
||||
Layout and styling is based on [Bootstrap 5](https://getbootstrap.com/) using [Bootstrap Icons](https://icons.getbootstrap.com/).
|
||||
The backend uses [SQLite 3](https://sqlite.org/) as relational SQL database by default.
|
||||
It can optionally use a MySQL/MariaDB database server.
|
||||
Finished batch jobs are stored in a file-based job archive following [this specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).
|
||||
The backend supports authentication using local accounts or an external LDAP directory.
|
||||
Authorization for APIs is implemented using [JWT](https://jwt.io/) tokens created with public/private key encryption.
|
||||
Layout and styling are based on [Bootstrap 5](https://getbootstrap.com/) using [Bootstrap Icons](https://icons.getbootstrap.com/).
|
||||
|
||||
The backend uses [SQLite 3](https://sqlite.org/) as a relational SQL database by default.
|
||||
Optionally it can use a MySQL/MariaDB database server.
|
||||
While there are metric data backends for the InfluxDB and Prometheus time series databases, the only tested and supported setup is to use cc-metric-store as the metric data backend.
|
||||
Documentation on how to integrate ClusterCockpit with other time series databases will be added in the future.
|
||||
|
||||
Completed batch jobs are stored in a file-based job archive according to
|
||||
[this specification] (https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).
|
||||
The backend supports authentication via local accounts, an external LDAP
|
||||
directory, and JWT tokens. Authorization for APIs is implemented with
|
||||
[JWT](https://jwt.io/) tokens created with public/private key encryption.
|
||||
|
||||
You find more detailed information here:
|
||||
* `./configs/README.md`: Infos about configuration and setup of cc-backend.
|
||||
@ -28,40 +35,54 @@ You find more detailed information here:
|
||||
* `./tools/README.md`: Infos on the JWT authorizatin token workflows in ClusterCockpit.
|
||||
* `./docs`: You can find further documentation here. There is also a Hands-on tutorial that is recommended to get familiar with the ClusterCockpit setup.
|
||||
|
||||
**NOTICE**
|
||||
**NOTE**
|
||||
|
||||
ClusterCockpit requires a recent version of the golang toolchain and node.js.
|
||||
You can check in `go.mod` what is the current minimal golang version required.
|
||||
Homebrew and Archlinux usually have up to date golang versions. For other Linux
|
||||
distros this often means you have to install the golang compiler yourself.
|
||||
Fortunatly this is easy with golang. Since a lot of functionality is based on
|
||||
the go standard library it is crucial for security and performance to use a
|
||||
recent golang version. Also an old golang tool chain may restrict the supported
|
||||
versions of third party packages.
|
||||
ClusterCockpit requires a current version of the golang toolchain and node.js.
|
||||
You can check `go.mod` to see what is the current minimal golang version needed.
|
||||
Homebrew and Archlinux usually have current golang versions. For other Linux
|
||||
distros this often means that you have to install the golang compiler yourself.
|
||||
Fortunately, this is easy with golang. Since much of the functionality is based
|
||||
on the Go standard library, it is crucial for security and performance to use a
|
||||
current version of golang. In addition, an old golang toolchain may limit the supported
|
||||
versions of third-party packages.
|
||||
|
||||
## Demo Setup
|
||||
## How to try ClusterCockpit with a demo setup.
|
||||
|
||||
We provide a shell skript that downloads demo data and automatically builds and
|
||||
starts cc-backend. You need `wget`, `go`, `node`, `npm` in your path to start
|
||||
the demo. The demo will download 32MB of data (223MB on disk).
|
||||
We provide a shell script that downloads demo data and automatically starts the
|
||||
cc-backend. You will need `wget`, `go`, `node`, `npm` in your path to
|
||||
start the demo. The demo downloads 32MB of data (223MB on disk).
|
||||
|
||||
```sh
|
||||
git clone https://github.com/ClusterCockpit/cc-backend.git
|
||||
cd ./cc-backend
|
||||
./startDemo.sh
|
||||
```
|
||||
You can access the web interface at http://localhost:8080.
|
||||
Credentials for login: `demo:demo`.
|
||||
Please note that some views do not work without a metric backend (e.g., the Systems and Status view).
|
||||
|
||||
## Howto Build and Run
|
||||
You can also try the demo using the lates release binary.
|
||||
Create a folder and put the release binary `cc-backend` into this folder.
|
||||
Execute the following steps:
|
||||
```
|
||||
$ ./cc-backend -init
|
||||
$ vim config.json (Add a second cluster entry and name the clusters alex and fritz)
|
||||
$ wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/job-archive-demo.tar
|
||||
$ tar xf job-archive-demo.tar
|
||||
$ ./cc-backend -init-db -add-user demo:admin:demo -loglevel info
|
||||
$ ./cc-backend -server -dev -loglevel info
|
||||
```
|
||||
|
||||
You can access the web interface at http://localhost:8080.
|
||||
Credentials for login are `demo:demo`.
|
||||
Please note that some views do not work without a metric backend (e.g., the
|
||||
Analysis, Systems and Status views).
|
||||
|
||||
## Howto build and run
|
||||
|
||||
There is a Makefile to automate the build of cc-backend. The Makefile supports the following targets:
|
||||
* `$ make`: Initialize `var` directory and build svelte frontend and backend binary. Please note that there is no proper prerequesite handling. Any change of frontend source files will trigger a complete rebuild.
|
||||
* `$ make clean`: Clean go build cache and remove binary
|
||||
* `$ make`: Initialize `var` directory and build svelte frontend and backend binary. Note that there is no proper prerequesite handling. Any change of frontend source files will result in a complete rebuild.
|
||||
* `$ make clean`: Clean go build cache and remove binary.
|
||||
* `$ make test`: Run the tests that are also run in the GitHub workflow setup.
|
||||
|
||||
A common workflow to setup cc-backend fron scratch is:
|
||||
A common workflow for setting up cc-backend from scratch is:
|
||||
```sh
|
||||
git clone https://github.com/ClusterCockpit/cc-backend.git
|
||||
|
||||
@ -72,87 +93,109 @@ make
|
||||
# EDIT THE .env FILE BEFORE YOU DEPLOY (Change the secrets)!
|
||||
# If authentication is disabled, it can be empty.
|
||||
cp configs/env-template.txt .env
|
||||
vim ./.env
|
||||
vim .env
|
||||
|
||||
cp configs/config.json ./
|
||||
vim ./config.json
|
||||
cp configs/config.json .
|
||||
vim config.json
|
||||
|
||||
#Optional: Link an existing job archive:
|
||||
ln -s <your-existing-job-archive> ./var/job-archive
|
||||
|
||||
# This will first initialize the job.db database by traversing all
|
||||
# `meta.json` files in the job-archive and add a new user. `--no-server` will cause the
|
||||
# executable to stop once it has done that instead of starting a server.
|
||||
./cc-backend --init-db --add-user <your-username>:admin:<your-password>
|
||||
# `meta.json` files in the job-archive and add a new user.
|
||||
./cc-backend -init-db -add-user <your-username>:admin:<your-password>
|
||||
|
||||
# Start a HTTP server (HTTPS can be enabled, the default port is 8080).
|
||||
# Start a HTTP server (HTTPS can be enabled in the configuration, the default port is 8080).
|
||||
# The --dev flag enables GraphQL Playground (http://localhost:8080/playground) and Swagger UI (http://localhost:8080/swagger).
|
||||
./cc-backend --server --dev
|
||||
./cc-backend -server -dev
|
||||
|
||||
# Show other options:
|
||||
./cc-backend --help
|
||||
./cc-backend -help
|
||||
```
|
||||
|
||||
### Run as systemd daemon
|
||||
|
||||
In order to run this program as a daemon, cc-backend ships with an [example systemd setup](./init/README.md).
|
||||
To run this program as a daemon, cc-backend comes with a [example systemd setup](./init/README.md).
|
||||
|
||||
## Configuration and Setup
|
||||
## Configuration and setup
|
||||
|
||||
cc-backend can be used as a local web-interface for an existing job archive or as a general web-interface server for a live ClusterCockpit Monitoring framework.
|
||||
cc-backend can be used as a local web interface for an existing job archive or
|
||||
as a server for the ClusterCockpit monitoring framework.
|
||||
|
||||
Create your job-archive according to [this specification](https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).
|
||||
At least one cluster with a valid `cluster.json` file is required.
|
||||
Having no jobs in the job-archive at all is fine.
|
||||
Create your job archive according to [this specification] (https://github.com/ClusterCockpit/cc-specifications/tree/master/job-archive).
|
||||
At least one cluster directory with a valid `cluster.json` file is required. If
|
||||
you configure the job archive from scratch, you must also create the job
|
||||
archive version file that contains the job archive version as an integer.
|
||||
You can retrieve the currently supported version by running the following
|
||||
command:
|
||||
```
|
||||
$ ./cc-backend -version
|
||||
```
|
||||
It is ok to have no jobs in the job archive.
|
||||
|
||||
### Configuration
|
||||
|
||||
A config file in the JSON format has to be provided using `--config` to override the defaults.
|
||||
By default, if there is a `config.json` file in the current directory of the `cc-backend` process, it will be loaded even without the `--config` flag.
|
||||
You find documentation of all supported configuration and command line options [here](./configs/README.md).
|
||||
A configuration file in JSON format must be specified with `-config` to override the default settings.
|
||||
By default, a `config.json` file located in the current directory of the `cc-backend` process will be loaded even without the `-config` flag.
|
||||
Documentation of all supported configuration and command line options can be found [here](./configs/README.md).
|
||||
|
||||
## Database initialization and migration
|
||||
|
||||
Every cc-backend version supports a specific database version.
|
||||
On startup the version of the sqlite database is validated and cc-backend will terminate if the version does not match.
|
||||
cc-backend supports to migrate the database schema up to the required version using the `--migrate-db` command line option.
|
||||
In case the database file does not yet exist it is created and initialized by the `--migrate-db` command line option.
|
||||
In case you want to use a newer database version with an older version of cc-backend you can downgrade a database using the external [migrate](https://github.com/golang-migrate/migrate) tool.
|
||||
In this case you have to provide the path to the migration files in a recent source tree: `./internal/repository/migrations/`.
|
||||
Each `cc-backend` version supports a specific database version.
|
||||
At startup, the version of the sqlite database is checked and `cc-backend` terminates if the version does not match.
|
||||
`cc-backend` supports the migration of the database schema to the required version with the command line option `-migrate-db`.
|
||||
If the database file does not exist yet, it will be created and initialized with the command line option `-migrate-db`.
|
||||
If you want to use a newer database version with an older version of cc-backend, you can downgrade a database with the external tool [migrate](https://github.com/golang-migrate/migrate).
|
||||
In this case, you must specify the path to the migration files in a current source tree: `./internal/repository/migrations/`.
|
||||
|
||||
## Development
|
||||
In case the REST or GraphQL API is changed the according code generators have to be used.
|
||||
## Development and testing
|
||||
When making changes to the REST or GraphQL API, the appropriate code generators must be used.
|
||||
You must always rebuild `cc-backend` after updating the API files.
|
||||
|
||||
### Update GraphQL schema
|
||||
|
||||
This project uses [gqlgen](https://github.com/99designs/gqlgen) for the GraphQL API.
|
||||
The schema can be found in `./api/schema.graphqls`.
|
||||
After changing it, you need to run `go run github.com/99designs/gqlgen` which will update `./internal/graph/model`.
|
||||
In case new resolvers are needed, they will be inserted into `./internal/graph/schema.resolvers.go`, where you will need to implement them.
|
||||
If you start cc-backend with flag `--dev` the GraphQL Playground UI is available at http://localhost:8080/playground .
|
||||
After changing it, you need to run `go run github.com/99designs/gqlgen`, which will update `./internal/graph/model`.
|
||||
If new resolvers are needed, they will be added to `./internal/graph/schema.resolvers.go`, where you will then need to implement them.
|
||||
If you start `cc-backend` with the `-dev` flag, the GraphQL Playground UI is available at http://localhost:8080/playground.
|
||||
|
||||
### Update Swagger UI
|
||||
|
||||
This project integrates [swagger ui](https://swagger.io/tools/swagger-ui/) to document and test its REST API.
|
||||
The swagger doc files can be found in `./api/`.
|
||||
You can generate the configuration of swagger-ui by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `.
|
||||
You need to move the generated `./api/doc.go` to `./internal/api/doc.go`.
|
||||
If you start cc-backend with flag `--dev` the Swagger UI is available at http://localhost:8080/swagger/ .
|
||||
You have to enter a JWT key for a user with role API.
|
||||
This project integrates [swagger ui] (https://swagger.io/tools/swagger-ui/) to document and test its REST API.
|
||||
The swagger documentation files can be found in `./api/`.
|
||||
You can generate the swagger-ui configuration by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `.
|
||||
You need to move the created `./api/doc.go` to `./internal/api/doc.go`.
|
||||
If you start cc-backend with the `-dev` flag, the Swagger interface is available
|
||||
at http://localhost:8080/swagger/.
|
||||
You must enter a JWT key for a user with the API role.
|
||||
|
||||
**NOTICE** The user owning the JWT token must not be logged in the same browser (have a running session), otherwise Swagger requests will not work. It is recommended to create a separate user that has just the API role.
|
||||
**NOTE**
|
||||
|
||||
## Project Structure
|
||||
The user who owns the JWT key must not be logged into the same browser (have a
|
||||
running session), or the Swagger requests will not work. It is recommended to
|
||||
create a separate user that has only the API role.
|
||||
|
||||
## Development and testing
|
||||
In case the REST or GraphQL API is changed the according code generators have to be used.
|
||||
|
||||
## Project file structure
|
||||
|
||||
- [`api/`](https://github.com/ClusterCockpit/cc-backend/tree/master/api) contains the API schema files for the REST and GraphQL APIs. The REST API is documented in the OpenAPI 3.0 format in [./api/openapi.yaml](./api/openapi.yaml).
|
||||
- [`cmd/cc-backend`](https://github.com/ClusterCockpit/cc-backend/tree/master/cmd/cc-backend) contains `main.go` for the main application.
|
||||
- [`configs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/configs) contains documentation about configuration and command line options and required environment variables. A sample configuration file is provided.
|
||||
- [`docs/`](https://github.com/ClusterCockpit/cc-backend/tree/master/docs) contains more in-depth documentation.
|
||||
- [`init/`](https://github.com/ClusterCockpit/cc-backend/tree/master/init) contains an example of setting up systemd for production use.
|
||||
- [`internal/`](https://github.com/ClusterCockpit/cc-backend/tree/master/internal) contains library source code that is not intended for use by others.
|
||||
- [`pkg/`](https://github.com/ClusterCockpit/cc-backend/tree/master/pkg) contains Go packages that can be used by other projects.
|
||||
- [`tools/`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools) Additional command line helper tools.
|
||||
- [`archive-manager`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-manager) Commands for getting infos about and existing job archive.
|
||||
- [`archive-migration`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/archive-migration) Tool to migrate from previous to current job archive version.
|
||||
- [`convert-pem-pubkey`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/convert-pem-pubkey) Tool to convert external pubkey for use in `cc-backend`.
|
||||
- [`gen-keypair`](https://github.com/ClusterCockpit/cc-backend/tree/master/tools/gen-keypair) contains a small application to generate a compatible JWT keypair. You find documentation on how to use it [here](https://github.com/ClusterCockpit/cc-backend/blob/master/docs/JWT-Handling.md).
|
||||
- [`web/`](https://github.com/ClusterCockpit/cc-backend/tree/master/web) Server-side templates and frontend-related files:
|
||||
- [`frontend`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/frontend) Svelte components and static assets for the frontend UI
|
||||
- [`templates`](https://github.com/ClusterCockpit/cc-backend/tree/master/web/templates) Server-side Go templates
|
||||
- [`gqlgen.yml`](https://github.com/ClusterCockpit/cc-backend/blob/master/gqlgen.yml) Configures the behaviour and generation of [gqlgen](https://github.com/99designs/gqlgen).
|
||||
- [`startDemo.sh`](https://github.com/ClusterCockpit/cc-backend/blob/master/startDemo.sh) is a shell script that sets up demo data, and builds and starts `cc-backend`.
|
||||
|
||||
- `api/` contains the API schema files for the REST and GraphQL APIs. The REST API is documented in the OpenAPI 3.0 format in [./api/openapi.yaml](./api/openapi.yaml).
|
||||
- `cmd/cc-backend` contains `main.go` for the main application.
|
||||
- `cmd/gen-keypair` contains is a small application to generate a compatible JWT keypair includin a README about JWT setup in ClusterCockpit.
|
||||
- `configs/` contains documentation about configuration and command line options and required environment variables. An example configuration file is provided.
|
||||
- `init/` contains an example systemd setup for production use.
|
||||
- `internal/` contains library source code that is not intended to be used by others.
|
||||
- `pkg/` contains go packages that can also be used by other projects.
|
||||
- `test/` Test apps and test data.
|
||||
- `web/` Server side templates and frontend related files:
|
||||
- `templates` Serverside go templates
|
||||
- `frontend` Svelte components and static assets for frontend UI
|
||||
- `gqlgen.yml` configures the behaviour and generation of [gqlgen](https://github.com/99designs/gqlgen).
|
||||
- `startDemo.sh` is a shell script that sets up demo data, and builds and starts cc-backend.
|
||||
|
@ -1,11 +1,11 @@
|
||||
# `cc-backend` version 1.0.0
|
||||
# `cc-backend` version 1.1.0
|
||||
|
||||
Supports job archive version 1 and database version 4.
|
||||
Supports job archive version 1 and database version 6.
|
||||
|
||||
This is the initial release of `cc-backend`, the API backend and frontend
|
||||
This is a minor release of `cc-backend`, the API backend and frontend
|
||||
implementation of ClusterCockpit.
|
||||
|
||||
**Breaking changes**
|
||||
** Breaking changes v1 **
|
||||
|
||||
The aggregate job statistic core hours is now computed using the job table
|
||||
column `num_hwthreads`. In a future release this column will be renamed to
|
||||
@ -16,11 +16,12 @@ if you have exclusive jobs, only. Please be aware that we treat this column as
|
||||
it is the number of cores. In case you have SMT enabled and `num_hwthreads`
|
||||
is not the number of cores the core hours will be too high by a factor!
|
||||
|
||||
**Notable changes**
|
||||
* Supports user roles admin, support, manager, user, and api.
|
||||
* Unified search bar supports job id, job name, project id, user name, and name
|
||||
* Performance improvements for sqlite db backend
|
||||
* Extended REST api supports to query job metrics
|
||||
* Better support for shared jobs
|
||||
* More flexible metric list configuration
|
||||
* Versioning and migration for database and job archive
|
||||
** NOTE **
|
||||
If you are using the sqlite3 backend the `PRAGMA` option `foreign_keys` must be
|
||||
explicitly set to ON. If using the sqlite3 console it is per default set to
|
||||
OFF! On every console session you must set:
|
||||
```
|
||||
sqlite> PRAGMA foreign_keys = ON;
|
||||
|
||||
```
|
||||
Otherwise if you delete jobs the jobtag relation table will not be updated accordingly!
|
||||
|
@ -237,10 +237,7 @@ input JobFilter {
|
||||
memUsedMax: FloatRange
|
||||
|
||||
exclusive: Int
|
||||
sharedNode: StringInput
|
||||
selfJobId: StringInput
|
||||
selfStartTime: Time
|
||||
selfDuration: Int
|
||||
node: StringInput
|
||||
}
|
||||
|
||||
input OrderByInput {
|
||||
@ -274,6 +271,7 @@ type JobResultList {
|
||||
}
|
||||
|
||||
type JobLinkResultList {
|
||||
listQuery: String
|
||||
items: [JobLink!]!
|
||||
count: Int
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/routerConfig"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/runtimeEnv"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
@ -58,15 +59,84 @@ const logoString = `
|
||||
|_|
|
||||
`
|
||||
|
||||
const envString = `
|
||||
# Base64 encoded Ed25519 keys (DO NOT USE THESE TWO IN PRODUCTION!)
|
||||
# You can generate your own keypair using the gen-keypair tool
|
||||
JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
||||
JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q=="
|
||||
|
||||
# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION)
|
||||
SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629"
|
||||
`
|
||||
|
||||
const configString = `
|
||||
{
|
||||
"addr": "127.0.0.1:8080",
|
||||
"archive": {
|
||||
"kind": "file",
|
||||
"path": "./var/job-archive"
|
||||
},
|
||||
"clusters": [
|
||||
{
|
||||
"name": "name",
|
||||
"metricDataRepository": {
|
||||
"kind": "cc-metric-store",
|
||||
"url": "http://localhost:8082",
|
||||
"token": ""
|
||||
},
|
||||
"filterRanges": {
|
||||
"numNodes": {
|
||||
"from": 1,
|
||||
"to": 64
|
||||
},
|
||||
"duration": {
|
||||
"from": 0,
|
||||
"to": 86400
|
||||
},
|
||||
"startTime": {
|
||||
"from": "2023-01-01T00:00:00Z",
|
||||
"to": null
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var (
|
||||
date string
|
||||
commit string
|
||||
version string
|
||||
)
|
||||
|
||||
func initEnv() {
|
||||
if util.CheckFileExists("var") {
|
||||
fmt.Print("Directory ./var already exists. Exiting!\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := os.WriteFile("config.json", []byte(configString), 0666); err != nil {
|
||||
log.Fatalf("Writing config.json failed: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := os.WriteFile(".env", []byte(envString), 0666); err != nil {
|
||||
log.Fatalf("Writing .env failed: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := os.Mkdir("var", 0777); err != nil {
|
||||
log.Fatalf("Mkdir var failed: %s", err.Error())
|
||||
}
|
||||
|
||||
err := repository.MigrateDB("sqlite3", "./var/job.db")
|
||||
if err != nil {
|
||||
log.Fatalf("Initialize job.db failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagDev, flagVersion, flagLogDateTime bool
|
||||
var flagReinitDB, flagInit, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagDev, flagVersion, flagLogDateTime bool
|
||||
var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
|
||||
flag.BoolVar(&flagInit, "init", false, "Setup var directory, initialize swlite database file, config.json and .env")
|
||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
||||
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
||||
@ -96,6 +166,14 @@ func main() {
|
||||
// Apply config flags for pkg/log
|
||||
log.Init(flagLogLevel, flagLogDateTime)
|
||||
|
||||
if flagInit {
|
||||
initEnv()
|
||||
fmt.Print("Succesfully setup environment!\n")
|
||||
fmt.Print("Please review config.json and .env and adjust it to your needs.\n")
|
||||
fmt.Print("Add your job-archive at ./var/job-archive.\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||
if flagGops {
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
@ -274,9 +352,10 @@ func main() {
|
||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
web.RenderTemplate(rw, r, "login.tmpl", &web.Page{
|
||||
Title: "Login failed - ClusterCockpit",
|
||||
Error: err.Error(),
|
||||
Build: buildInfo,
|
||||
Title: "Login failed - ClusterCockpit",
|
||||
MsgType: "alert-warning",
|
||||
Message: err.Error(),
|
||||
Build: buildInfo,
|
||||
})
|
||||
})).Methods(http.MethodPost)
|
||||
|
||||
@ -284,9 +363,10 @@ func main() {
|
||||
rw.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
web.RenderTemplate(rw, r, "login.tmpl", &web.Page{
|
||||
Title: "Bye - ClusterCockpit",
|
||||
Info: "Logout sucessful",
|
||||
Build: buildInfo,
|
||||
Title: "Bye - ClusterCockpit",
|
||||
MsgType: "alert-info",
|
||||
Message: "Logout successful",
|
||||
Build: buildInfo,
|
||||
})
|
||||
}))).Methods(http.MethodPost)
|
||||
|
||||
@ -299,9 +379,10 @@ func main() {
|
||||
func(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
rw.WriteHeader(http.StatusUnauthorized)
|
||||
web.RenderTemplate(rw, r, "login.tmpl", &web.Page{
|
||||
Title: "Authentication failed - ClusterCockpit",
|
||||
Error: err.Error(),
|
||||
Build: buildInfo,
|
||||
Title: "Authentication failed - ClusterCockpit",
|
||||
MsgType: "alert-danger",
|
||||
Message: err.Error(),
|
||||
Build: buildInfo,
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -316,14 +397,20 @@ func main() {
|
||||
|
||||
// Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project.
|
||||
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
||||
routerConfig.HandleSearchBar(rw, r, api)
|
||||
routerConfig.HandleSearchBar(rw, r, buildInfo)
|
||||
})
|
||||
|
||||
// Mount all /monitoring/... and /api/... routes.
|
||||
routerConfig.SetupRoutes(secured, version, commit, date)
|
||||
routerConfig.SetupRoutes(secured, buildInfo)
|
||||
api.MountRoutes(secured)
|
||||
|
||||
if config.Keys.EmbedStaticFiles {
|
||||
if i, err := os.Stat("./var/img"); err == nil {
|
||||
if i.IsDir() {
|
||||
log.Info("Use local directory for static images")
|
||||
r.PathPrefix("/img/").Handler(http.StripPrefix("/img/", http.FileServer(http.Dir("./var/img"))))
|
||||
}
|
||||
}
|
||||
r.PathPrefix("/").Handler(web.ServeFiles())
|
||||
} else {
|
||||
r.PathPrefix("/").Handler(http.FileServer(http.Dir(config.Keys.StaticFiles)))
|
||||
@ -338,7 +425,7 @@ func main() {
|
||||
handlers.AllowedOrigins([]string{"*"})))
|
||||
handler := handlers.CustomLoggingHandler(io.Discard, r, func(_ io.Writer, params handlers.LogFormatterParams) {
|
||||
if strings.HasPrefix(params.Request.RequestURI, "/api/") {
|
||||
log.Infof("%s %s (%d, %.02fkb, %dms)",
|
||||
log.Debugf("%s %s (%d, %.02fkb, %dms)",
|
||||
params.Request.Method, params.URL.RequestURI(),
|
||||
params.StatusCode, float32(params.Size)/1024,
|
||||
time.Since(params.TimeStamp).Milliseconds())
|
||||
@ -392,14 +479,14 @@ func main() {
|
||||
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||
// be established first, then the user can be changed, and after that,
|
||||
// the actual http server can be started.
|
||||
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||
if err = runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||
log.Fatalf("error while preparing server start: %s", err.Error())
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
if err = server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("starting server failed: %v", err)
|
||||
}
|
||||
}()
|
||||
@ -425,7 +512,7 @@ func main() {
|
||||
log.Info("Register undead jobs service")
|
||||
|
||||
s.Every(1).Day().At("3:00").Do(func() {
|
||||
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
||||
err = jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
||||
if err != nil {
|
||||
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
|
||||
}
|
||||
@ -440,7 +527,7 @@ func main() {
|
||||
|
||||
cfg.Retention.IncludeDB = true
|
||||
|
||||
if err := json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
|
||||
if err = json.Unmarshal(config.Keys.Archive, &cfg); err != nil {
|
||||
log.Warn("Error while unmarshaling raw config json")
|
||||
}
|
||||
|
||||
@ -511,7 +598,7 @@ func main() {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("Error while looking for retention jobs: %v", err)
|
||||
log.Warnf("Error while looking for compression jobs: %v", err)
|
||||
}
|
||||
ar.Compress(jobs)
|
||||
})
|
||||
|
@ -1,10 +1,10 @@
|
||||
## Intro
|
||||
|
||||
cc-backend requires a configuration file specifying the cluster systems to be used. Still many default
|
||||
options documented below are used. cc-backend tries to load a config.json from the working directory per default.
|
||||
To overwrite the default specify a json config file location using the command line option `--config <filepath>`.
|
||||
All security relevant configuration. e.g., keys and passwords, are set using environment variables.
|
||||
It is supported to specify these by means of an `.env` file located in the project root.
|
||||
cc-backend requires a configuration file that specifies the cluster systems to be used.
|
||||
To override the default, specify the location of a json configuration file with the `-config <file path>` command line option.
|
||||
All security-related configurations, e.g. keys and passwords, are set using
|
||||
environment variables.
|
||||
It is supported to set these by means of a `.env` file in the project root.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
@ -19,12 +19,12 @@ It is supported to specify these by means of an `.env` file located in the proje
|
||||
* `job-archive`: Type string. Path to the job-archive. Default: `./var/job-archive`.
|
||||
* `disable-archive`: Type bool. Keep all metric data in the metric data repositories, do not write to the job-archive. Default `false`.
|
||||
* `validate`: Type bool. Validate all input json documents against json schema.
|
||||
* `"session-max-age`: Type string. Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `168h`.
|
||||
* `"jwt-max-age`: Type string. Specifies for how long a JWT token shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `0`.
|
||||
* `session-max-age`: Type string. Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `168h`.
|
||||
* `jwt-max-age`: Type string. Specifies for how long a JWT token shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `0`.
|
||||
* `https-cert-file` and `https-key-file`: Type string. If both those options are not empty, use HTTPS using those certificates.
|
||||
* `redirect-http-to`: Type string. If not the empty string and `addr` does not end in ":80", redirect every request incoming at port 80 to that url.
|
||||
* `machine-state-dir`: Type string. Where to store MachineState files. TODO: Explain in more detail!
|
||||
* `"stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`.
|
||||
* `stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`.
|
||||
* `short-running-jobs-duration`: Type int. Do not show running jobs shorter than X seconds. Default `300`.
|
||||
* `ldap`: Type object. For LDAP Authentication and user synchronisation. Default `nil`.
|
||||
- `url`: Type string. URL of LDAP directory server.
|
||||
@ -73,4 +73,4 @@ An example env file is found in this directory. Copy it to `.env` in the project
|
||||
* `SESSION_KEY`: Some random bytes used as secret for cookie-based sessions.
|
||||
* `LDAP_ADMIN_PASSWORD`: The LDAP admin user password (optional).
|
||||
* `CROSS_LOGIN_JWT_HS512_KEY`: Used for token based logins via another authentication service.
|
||||
* `LOGLEVEL`: Can be `err`, `warn`, `info` or `debug` (optional, `debug` by default). Can be used to reduce logging.
|
||||
* `LOGLEVEL`: Can be `err`, `warn`, `info` or `debug` (optional, `warn` by default). Can be used to reduce logging.
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Base64 encoded Ed25519 keys (DO NOT USE THESE TWO IN PRODUCTION!)
|
||||
# You can generate your own keypair using `go run utils/gen-keypair.go`
|
||||
# You can generate your own keypair using `go run tools/gen-keypair/main.go`
|
||||
JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
||||
JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q=="
|
||||
|
||||
# Base64 encoded Ed25519 public key for accepting externally generated JWTs
|
||||
# Keys in PEM format can be converted, see `tools/convert-pem-pubkey-for-cc/Readme.md`
|
||||
# Keys in PEM format can be converted, see `tools/convert-pem-pubkey/Readme.md`
|
||||
CROSS_LOGIN_JWT_PUBLIC_KEY=""
|
||||
|
||||
# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION)
|
||||
|
@ -1,22 +1,22 @@
|
||||
# Release versioning
|
||||
# Release versions
|
||||
|
||||
Releases are numbered with an integer ID, starting with 1.
|
||||
Each release embeds the following assets in the binary:
|
||||
* Web front-end with Javascript files and all static assets.
|
||||
Versions are marked according to [semantic versioning] (https://semver.org).
|
||||
Each version embeds the following static assets in the binary:
|
||||
* Web frontend with javascript files and all static assets.
|
||||
* Golang template files for server-side rendering.
|
||||
* JSON schema files for validation.
|
||||
* Database migration files
|
||||
* Database migration files.
|
||||
|
||||
The remaining external assets are:
|
||||
* The SQL database used
|
||||
* The SQL database used.
|
||||
* The job archive
|
||||
* The configuration file `config.json`
|
||||
* The configuration files `config.json` and `.env`.
|
||||
|
||||
Both external assets are also versioned with integer IDs.
|
||||
The external assets are versioned with integer IDs.
|
||||
This means that each release binary is bound to specific versions of the SQL
|
||||
database and the job archive.
|
||||
The configuration file is validated against the current schema on startup.
|
||||
The command line switch `-migrate-db` can be used to upgrade the SQL database
|
||||
The configuration file is checked against the current schema at startup.
|
||||
The `-migrate-db` command line switch can be used to upgrade the SQL database
|
||||
to migrate from a previous version to the latest one.
|
||||
We offer a separate tool `archive-migration` to migrate an existing job archive
|
||||
archive from the previous to the latest version.
|
||||
@ -24,14 +24,15 @@ archive from the previous to the latest version.
|
||||
# Versioning of APIs
|
||||
|
||||
cc-backend provides two API backends:
|
||||
* A REST API for querying jobs
|
||||
* A GraphQL API for data exchange between web frontend and cc-backend
|
||||
* A REST API for querying jobs.
|
||||
* A GraphQL API for data exchange between web frontend and cc-backend.
|
||||
|
||||
Both APIs will also be versioned. We still need to decide wether we will also support
|
||||
older REST API version by versioning the endpoint URLs.
|
||||
The REST API will also be versioned. We still have to decide whether we will also
|
||||
support older REST API versions by versioning the endpoint URLs.
|
||||
The GraphQL API is for internal use and will not be versioned.
|
||||
|
||||
# How to build
|
||||
|
||||
Please always build `cc-backend` with the supplied Makefile. This will ensure
|
||||
that the frontend is also built correctly and that the version in the binary file is coded
|
||||
in the binary.
|
||||
In general it is recommended to use the provided release binary.
|
||||
In case you want to build build `cc-backend` please always use the provided makefile. This will ensure
|
||||
that the frontend is also built correctly and that the version in the binary is encoded in the binary.
|
||||
|
@ -1,9 +1,9 @@
|
||||
# CC-HANDSON - Setup ClusterCockpit from scratch (w/o docker)
|
||||
# Hands-on setup ClusterCockpit from scratch (w/o docker)
|
||||
|
||||
## Prerequisites
|
||||
* Perl
|
||||
* Yarn
|
||||
* Go
|
||||
* perl
|
||||
* go
|
||||
* npm
|
||||
* Optional: curl
|
||||
* Script migrateTimestamp.pl
|
||||
|
||||
@ -33,22 +33,17 @@ Start by creating a base folder for all of the following steps.
|
||||
* Clone Repository
|
||||
- `git clone https://github.com/ClusterCockpit/cc-backend.git`
|
||||
- `cd cc-backend`
|
||||
* Setup Frontend
|
||||
- `cd ./web/frontend`
|
||||
- `yarn install`
|
||||
- `yarn build`
|
||||
- `cd ../..`
|
||||
* Build Go Executable
|
||||
- `go build ./cmd/cc-backend/`
|
||||
* Activate & Config environment for cc-backend
|
||||
* Build
|
||||
- `make`
|
||||
* Activate & configure environment for cc-backend
|
||||
- `cp configs/env-template.txt .env`
|
||||
- Optional: Have a look via `vim ./.env`
|
||||
- Optional: Have a look via `vim .env`
|
||||
- Copy the `config.json` file included in this tarball into the root directory of cc-backend: `cp ../../config.json ./`
|
||||
* Back to toplevel `clustercockpit`
|
||||
- `cd ..`
|
||||
* Prepare Datafolder and Database file
|
||||
- `mkdir var`
|
||||
- `./cc-backend --migrate-db`
|
||||
- `./cc-backend -migrate-db`
|
||||
|
||||
### Setup cc-metric-store
|
||||
* Clone Repository
|
||||
@ -112,7 +107,7 @@ Done for checkpoints
|
||||
- `cp source-data/job-archive-source/woody/cluster.json cc-backend/var/job-archive/woody/`
|
||||
* Initialize Job-Archive in SQLite3 job.db and add demo user
|
||||
- `cd cc-backend`
|
||||
- `./cc-backend --init-db --add-user demo:admin:AdminDev`
|
||||
- `./cc-backend -init-db -add-user demo:admin:demo`
|
||||
- Expected output:
|
||||
```
|
||||
<6>[INFO] new user "demo" created (roles: ["admin"], auth-source: 0)
|
||||
@ -123,7 +118,7 @@ Done for checkpoints
|
||||
- `cd ..`
|
||||
|
||||
### Startup both Apps
|
||||
* In cc-backend root: `$./cc-backend --server --dev`
|
||||
* In cc-backend root: `$./cc-backend -server -dev`
|
||||
- Starts Clustercockpit at `http:localhost:8080`
|
||||
- Log: `<6>[INFO] HTTP server listening at :8080...`
|
||||
- Use local internet browser to access interface
|
||||
@ -161,7 +156,7 @@ Content-Length: 119
|
||||
```
|
||||
|
||||
### Development API web interfaces
|
||||
The `--dev` flag enables web interfaces to document and test the apis:
|
||||
The `-dev` flag enables web interfaces to document and test the apis:
|
||||
* http://localhost:8080/playground - A GraphQL playground. To use it you must have a authenticated session in the same browser.
|
||||
* http://localhost:8080/swagger - A Swagger UI. To use it you have to be logged out, so no user session in the same browser. Use the JWT token with role Api generate previously to authenticate via http header.
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
# Overview
|
||||
|
||||
Customizing `cc-backend` means changing the logo and certain legal texts
|
||||
instead of the placeholders. To change the logo displayed in the navigation bar, the
|
||||
file `web/frontend/public/img/logo.png` in the source tree must be replaced
|
||||
and cc-backend must be rebuild.
|
||||
Customizing `cc-backend` means changing the logo, legal texts, and the login
|
||||
template instead of the placeholders. You can also place a text file in `./var`
|
||||
to add dynamic status or notification messages to the clusterCockpit homepage.
|
||||
|
||||
# Replace legal texts
|
||||
|
||||
@ -11,3 +10,20 @@ To replace the `imprint.tmpl` and `privacy.tmpl` legal texts, you can place your
|
||||
version in `./var/`. At startup `cc-backend` will check if `./var/imprint.tmpl` and/or
|
||||
`./var/privacy.tmpl` exist and use them instead of the built-in placeholders.
|
||||
You can use the placeholders in `web/templates` as a blueprint.
|
||||
|
||||
# Replace login template
|
||||
To replace the default login layout and styling, you can place your version in
|
||||
`./var/`. At startup `cc-backend` will check if `./var/login.tmpl` exist and use
|
||||
it instead of the built-in placeholder. You can use the default temaplte
|
||||
`web/templates/login.tmpl` as a blueprint.
|
||||
|
||||
# Replace logo
|
||||
To change the logo displayed in the navigation bar, you can provide the file
|
||||
`logo.png` in the folder `./var/img/`. On startup `cc-backend` will check if the
|
||||
folder exists and use the images provided there instead of the built-in images.
|
||||
You may also place additional images there you use in a custom login template.
|
||||
|
||||
# Add notification banner on homepage
|
||||
To add a notification banner you can add a file `notice.txt` to `./var`. As long
|
||||
as this file is present all text in this file is shown in an info banner on the
|
||||
homepage.
|
||||
|
156
docs/dev-authentication.md
Normal file
156
docs/dev-authentication.md
Normal file
@ -0,0 +1,156 @@
|
||||
# Overview
|
||||
|
||||
The implementation of authentication is not easy to understand by just looking
|
||||
at the code. The authentication is implemented in `internal/auth/`. In `auth.go`
|
||||
an interface is defined that any authentication provider must fulfill. It also
|
||||
acts as a dispatcher to delegate the calls to the available authentication
|
||||
providers.
|
||||
|
||||
The most important routine are:
|
||||
* `CanLogin()` Check if the authentication method is supported for login attempt
|
||||
* `Login()` Handle POST request to login user and start a new session
|
||||
* `Auth()` Authenticate user and put User Object in context of the request
|
||||
|
||||
The http router calls auth in the following cases:
|
||||
* `r.Handle("/login", authentication.Login( ... )).Methods(http.MethodPost)`:
|
||||
The POST request on the `/login` route will call the Login callback.
|
||||
* Any route in the secured subrouter will always call Auth(), on success it will
|
||||
call the next handler in the chain, on failure it will render the login
|
||||
template.
|
||||
```
|
||||
secured.Use(func(next http.Handler) http.Handler {
|
||||
return authentication.Auth(
|
||||
// On success;
|
||||
next,
|
||||
|
||||
// On failure:
|
||||
func(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
// Render login form
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
For non API routes a JWT token can be used to initiate an authenticated user
|
||||
session. This can either happen by calling the login route with a token
|
||||
provided in a header or query URL or via the `Auth()` method on first access
|
||||
to a secured URL via a special cookie containing the JWT token.
|
||||
For API routes the access is authenticated on every request using the JWT token
|
||||
and no session is initiated.
|
||||
|
||||
# Login
|
||||
|
||||
The Login function (located in `auth.go`):
|
||||
* Extracts the user name and gets the user from the user database table. In case the
|
||||
user is not found the user object is set to nil.
|
||||
* Iterates over all authenticators and:
|
||||
- Calls the `CanLogin` function which checks if the authentication method is
|
||||
supported for this user and the user object is valid.
|
||||
- Calls the `Login` function to authenticate the user. On success a valid user
|
||||
object is returned.
|
||||
- Creates a new session object, stores the user attributes in the session and
|
||||
saves the session.
|
||||
- Starts the `onSuccess` http handler
|
||||
|
||||
## Local authenticator
|
||||
|
||||
This authenticator is applied if
|
||||
```
|
||||
return user != nil && user.AuthSource == AuthViaLocalPassword
|
||||
```
|
||||
|
||||
Compares the password provided by the login form to the password hash stored in
|
||||
the user database table:
|
||||
```
|
||||
if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil {
|
||||
log.Errorf("AUTH/LOCAL > Authentication for user %s failed!", user.Username)
|
||||
return nil, fmt.Errorf("AUTH/LOCAL > Authentication failed")
|
||||
}
|
||||
```
|
||||
|
||||
## LDAP authenticator
|
||||
|
||||
This authenticator is applied if
|
||||
```
|
||||
return user != nil && user.AuthSource == AuthViaLDAP
|
||||
```
|
||||
|
||||
Gets the LDAP connection and tries a bind with the provided credentials:
|
||||
```
|
||||
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
|
||||
log.Errorf("AUTH/LOCAL > Authentication for user %s failed: %v", user.Username, err)
|
||||
return nil, fmt.Errorf("AUTH/LDAP > Authentication failed")
|
||||
}
|
||||
```
|
||||
|
||||
## JWT authenticator
|
||||
|
||||
Login via JWT token will create a session without password.
|
||||
For login the `X-Auth-Token` header is not supported.
|
||||
This authenticator is applied if either user is not nil and auth source is
|
||||
`AuthViaToken` or the Authorization header is present or the URL query key
|
||||
login-token is present:
|
||||
```
|
||||
return (user != nil && user.AuthSource == AuthViaToken) ||
|
||||
r.Header.Get("Authorization") != "" ||
|
||||
r.URL.Query().Get("login-token") != ""
|
||||
```
|
||||
|
||||
The Login function:
|
||||
* Parses the token
|
||||
* Check if the signing method is EdDSA or HS256 or HS512
|
||||
* Check if claims are valid and extracts the claims
|
||||
* The following claims have to be present:
|
||||
- `sub`: The subject, in this case this is the username
|
||||
- `exp`: Expiration in Unix epoch time
|
||||
- `roles`: String array with roles of user
|
||||
* In case user is not yet set, which is usually the case:
|
||||
- Try to fetch user from database
|
||||
- In case user is not yet present add user to user database table with `AuthViaToken` AuthSource.
|
||||
* Return valid user object
|
||||
|
||||
# Auth
|
||||
|
||||
The Auth function (located in `auth.go`):
|
||||
* Returns a new http handler function that is defined right away
|
||||
* This handler iterates over all authenticators
|
||||
* Calls `Auth()` on every authenticator
|
||||
* If err is not nil and the user object is valid it puts the user object in the
|
||||
request context and starts the onSuccess http handler
|
||||
* Otherwise it calls the onFailure handler
|
||||
|
||||
## Local
|
||||
|
||||
Calls the `AuthViaSession()` function in `auth.go`. This will extract username,
|
||||
projects and roles from the session and initialize a user object with those
|
||||
values.
|
||||
|
||||
## LDAP
|
||||
|
||||
Calls the `AuthViaSession()` function in `auth.go`. This will extract username,
|
||||
projects and roles from the session and initialize a user object with those
|
||||
values.
|
||||
|
||||
# JWT
|
||||
|
||||
Check for JWT token:
|
||||
* Is token passed in the `X-Auth-Token` or `Authorization` header
|
||||
* If no token is found in a header it tries to read the token from a configured
|
||||
cookie.
|
||||
|
||||
Finally it calls AuthViaSession in `auth.go` if a valid session exists. This is
|
||||
true if a JWT token was previously used to initiate a session. In this case the
|
||||
user object initialized with the session is returned right away.
|
||||
|
||||
In case a token was found extract and parse the token:
|
||||
* Check if signing method is Ed25519/EdDSA
|
||||
* In case publicKeyCrossLogin is configured:
|
||||
- Check if `iss` issuer claim matched trusted issuer from configuration
|
||||
- Return public cross login key
|
||||
- Otherwise return standard public key
|
||||
* Check if claims are valid
|
||||
* Depending on the option `ForceJWTValidationViaDatabase ` the roles are
|
||||
extracted from JWT token or taken from user object fetched from database
|
||||
* In case the token was extracted from cookie create a new session and ask the
|
||||
browser to delete the JWT cookie
|
||||
* Return valid user object
|
||||
|
13
docs/dev-release.md
Normal file
13
docs/dev-release.md
Normal file
@ -0,0 +1,13 @@
|
||||
# Steps to prepare a release
|
||||
|
||||
1. On `hotfix` branch:
|
||||
* Update ReleaseNotes.md
|
||||
* Update version in Makefile
|
||||
* Commit, push, and pull request
|
||||
* Merge in master
|
||||
|
||||
2. On Linux host:
|
||||
* Pull master
|
||||
* Ensure that GitHub Token environment variable `GITHUB_TOKEN` is set
|
||||
* Create release tag: `git tag v1.1.0 -m release`
|
||||
* Execute `goreleaser release`
|
@ -3,11 +3,11 @@
|
||||
## Usage
|
||||
|
||||
* Searchtags are implemented as `type:<query>` search-string
|
||||
* Types `jobId, jobName, projectId, username, name` for roles `admin` and `support`
|
||||
* Types `jobId, jobName, projectId, username, name, arrayJobId` for roles `admin` and `support`
|
||||
* `jobName` is jobName as persisted in `job.meta_data` table-column
|
||||
* `username` is actual account identifier as persisted in `job.user` table-column
|
||||
* `name` is account owners name as persisted in `user.name` table-column
|
||||
* Types `jobId, jobName, projectId` for role `user`
|
||||
* Types `jobId, jobName, projectId, arrayJobId` for role `user`
|
||||
* Examples:
|
||||
* `jobName:myJob12`
|
||||
* `jobId:123456`
|
||||
@ -20,11 +20,12 @@
|
||||
* JobName: Job-Table (Allows multiple identical matches, e.g. JobNames from different clusters)
|
||||
* ProjectId: Job-Table
|
||||
* Username: Users-Table
|
||||
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table.
|
||||
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table. Also, a `Last 30 Days` is active by default and might filter out expected users.
|
||||
* Name: Users-Table
|
||||
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table.
|
||||
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table. Also, a `Last 30 Days` is active by default and might filter out expected users.
|
||||
* ArrayJobId: Job-Table (Lists all Jobs of Queried ArrayJobId)
|
||||
* Best guess search always redirects to Job-Table or `/monitoring/user/$USER` (first username match)
|
||||
* Unprocessable queries will redirect to `/monitoring/jobs/?`
|
||||
* Unprocessable queries will display messages detailing the cause (Info, Warning, Error)
|
||||
* Spaces trimmed (both for searchTag and queryString)
|
||||
* ` job12` == `job12`
|
||||
* `projectID : abcd ` == `projectId:abcd`
|
||||
|
102
init/README.md
102
init/README.md
@ -1,71 +1,79 @@
|
||||
# How to run this as a systemd service
|
||||
# How to run `cc-backend` as a systemd service.
|
||||
|
||||
The files in this directory assume that you install ClusterCockpit to `/opt/monitoring`.
|
||||
Of course you can choose any other location, but make sure to replace all paths that begin with `/opt/monitoring` in the `clustercockpit.service` file!
|
||||
The files in this directory assume that you install ClusterCockpit to
|
||||
`/opt/monitoring/cc-backend`.
|
||||
Of course you can choose any other location, but make sure you replace all paths
|
||||
starting with `/opt/monitoring/cc-backend` in the `clustercockpit.service` file!
|
||||
|
||||
If you have not installed [yarn](https://yarnpkg.com/getting-started/install) and [go](https://go.dev/doc/install) already, do that (Golang is available in most package managers).
|
||||
It is recommended and easy to install the most recent stable version of Golang as every version also improves the Golang standard library.
|
||||
|
||||
The `config.json` can have the optional fields *user* and *group*.
|
||||
If provided, the application will call [setuid](https://man7.org/linux/man-pages/man2/setuid.2.html) and [setgid](https://man7.org/linux/man-pages/man2/setgid.2.html) after having read the config file and having bound to a TCP port (so that it can take a privileged port), but before it starts accepting any connections.
|
||||
This is good for security, but means that the directories `web/frontend/public`, `var/` and `web/templates/` must be readable by that user and `var/` writable as well (All paths relative to the repos root).
|
||||
The `.env` and `config.json` files might contain secrets and should not be readable by that user.
|
||||
If those files are changed, the server has to be restarted.
|
||||
The `config.json` may contain the optional fields *user* and *group*. If
|
||||
specified, the application will call
|
||||
[setuid](https://man7.org/linux/man-pages/man2/setuid.2.html) and
|
||||
[setgid](https://man7.org/linux/man-pages/man2/setgid.2.html) after reading the
|
||||
config file and binding to a TCP port (so it can take a privileged port), but
|
||||
before it starts accepting any connections. This is good for security, but also
|
||||
means that the `var/` directory must be readable and writeable by this user.
|
||||
The `.env` and `config.json` files may contain secrets and should not be
|
||||
readable by this user. If these files are changed, the server must be restarted.
|
||||
|
||||
```sh
|
||||
# 1.: Clone this repository to /opt/monitoring
|
||||
git clone git@github.com:ClusterCockpit/cc-backend.git /opt/monitoring
|
||||
# 1. Clone this repository somewhere in your home
|
||||
git clone git@github.com:ClusterCockpit/cc-backend.git <DSTDIR>
|
||||
|
||||
# 2.: Install all dependencies and build everything
|
||||
cd /mnt/monitoring
|
||||
go get && go build cmd/cc-backend && (cd ./web/frontend && yarn install && yarn build)
|
||||
# 2. (Optional) Install dependencies and build. In general it is recommended to use the provided release binaries.
|
||||
cd <DSTDIR>
|
||||
make
|
||||
sudo mkdir -p /opt/monitoring/cc-backend/
|
||||
cp ./cc-backend /opt/monitoring/cc-backend/
|
||||
|
||||
# 3.: Modify the `./config.json` and env-template.txt file from the configs directory to your liking and put it in the repo root
|
||||
cp ./configs/config.json ./config.json
|
||||
cp ./configs/env-template.txt ./.env
|
||||
vim ./config.json # do your thing...
|
||||
vim ./.env # do your thing...
|
||||
# 3. Modify the `./config.json` and env-template.txt file from the configs directory to your liking and put it in the target directory
|
||||
cp ./configs/config.json /opt/monitoring/config.json
|
||||
cp ./configs/env-template.txt /opt/monitoring/.env
|
||||
vim /opt/monitoring/config.json # do your thing...
|
||||
vim /opt/monitoring/.env # do your thing...
|
||||
|
||||
# 4.: Add the systemd service unit file (in case /opt/ is mounted on another file system it may be better to copy the file to /etc)
|
||||
sudo ln -s /mnt/monitoring/init/clustercockpit.service /etc/systemd/system/clustercockpit.service
|
||||
# 4. (Optional) Customization: Add your versions of the login view, legal texts, and logo image.
|
||||
# You may use the templates in `./web/templates` as blueprint. Every overwrite separate.
|
||||
cp login.tmpl /opt/monitoring/cc-backend/var/
|
||||
cp imprint.tmpl /opt/monitoring/cc-backend/var/
|
||||
cp privacy.tmpl /opt/monitoring/cc-backend/var/
|
||||
# Ensure your logo, and any images you use in your login template has a suitable size.
|
||||
cp -R img /opt/monitoring/cc-backend/img
|
||||
|
||||
# 5.: Enable and start the server
|
||||
# 5. Copy the systemd service unit file. You may adopt it to your needs.
|
||||
sudo cp ./init/clustercockpit.service /etc/systemd/system/clustercockpit.service
|
||||
|
||||
# 6. Enable and start the server
|
||||
sudo systemctl enable clustercockpit.service # optional (if done, (re-)starts automatically)
|
||||
sudo systemctl start clustercockpit.service
|
||||
|
||||
# Check whats going on:
|
||||
sudo systemctl status clustercockpit.service
|
||||
sudo journalctl -u clustercockpit.service
|
||||
```
|
||||
|
||||
# Recommended deployment workflow
|
||||
# Recommended workflow for deployment
|
||||
|
||||
It is recommended to install all ClusterCockpit components in a common durectory, this can be something like `/opt/monitoring`, `var/monitoring` or `var/clustercockpit`.
|
||||
In the following we are using `/opt/monitoring`.
|
||||
It is recommended to install all ClusterCockpit components in a common directory, e.g. `/opt/monitoring`, `var/monitoring` or `var/clustercockpit`.
|
||||
In the following we use `/opt/monitoring`.
|
||||
|
||||
Two systemd services are running on the central monitoring server:
|
||||
Two systemd services run on the central monitoring server:
|
||||
* clustercockpit : binary cc-backend in `/opt/monitoring/cc-backend`.
|
||||
* cc-metric-store : Binary cc-metric-store in `/opt/monitoring/cc-metric-store`.
|
||||
|
||||
clustercockpit : Binary cc-backend in `/opt/monitoring/cc-backend`
|
||||
cc-metric-store: Binary cc-metric-store in `/opt/monitoring/cc-metric-store`
|
||||
|
||||
ClusterCockpit is deployed as a single file binary that embeds all static assets.
|
||||
We recommend to keep all binaries in a folder `archive` and link the currently active from cc-backend root.
|
||||
This allows to easily roll-back in case something breaks.
|
||||
ClusterCockpit is deployed as a single binary that embeds all static assets.
|
||||
We recommend keeping all `cc-backend` binary versions in a folder `archive` and
|
||||
linking the currently active one from the `cc-backend` root.
|
||||
This allows for easy roll-back in case something doesn't work.
|
||||
|
||||
## Workflow to deploy new version
|
||||
|
||||
This example assumes the DB and job archive did not change.
|
||||
This example assumes the DB and job archive versions did not change.
|
||||
* Stop systemd service: `$ sudo systemctl stop clustercockpit.service`
|
||||
* Backup the sqlite DB file and Job archive directory tree!
|
||||
* Clone cc-backend source tree (e.g. in your home directory)
|
||||
* Copy the adapted legal text files into the git source tree (./web/templates).
|
||||
* Build cc-backend:
|
||||
```
|
||||
$ cd web/frontend
|
||||
$ yarn && yarn build
|
||||
$ cd ../../
|
||||
$ go build ./cmd/cc-backend
|
||||
```
|
||||
* Copy `cc-backend` binary to `/opt/monitoring/cc-backend/archive`
|
||||
* Link from cc-backend root to recent version
|
||||
* Restart systemd service: `$ sudo systemctl restart clustercockpit.service`
|
||||
* Copy `cc-backend` binary to `/opt/monitoring/cc-backend/archive` (Tip: Use a
|
||||
date tag like `YYYYMMDD-cc-backend`)
|
||||
* Link from cc-backend root to current version
|
||||
* Start systemd service: `$ sudo systemctl start clustercockpit.service`
|
||||
* Check if everything is ok: `$ sudo systemctl status clustercockpit.service`
|
||||
* Check log for issues: `$ sudo journalctl -u clustercockpit.service`
|
||||
* Check the ClusterCockpit web frontend and your Slurm adapters if anything is broken!
|
||||
|
@ -523,7 +523,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
||||
} else if err == nil {
|
||||
for _, job := range jobs {
|
||||
if (req.StartTime - job.StartTimeUnix) < 86400 {
|
||||
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
|
||||
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d, jobid: %d", job.ID, job.JobID), http.StatusUnprocessableEntity, rw)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -75,10 +75,7 @@ func getRoleEnum(roleStr string) Role {
|
||||
}
|
||||
|
||||
func isValidRole(role string) bool {
|
||||
if getRoleEnum(role) == RoleError {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return getRoleEnum(role) != RoleError
|
||||
}
|
||||
|
||||
func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) {
|
||||
@ -166,16 +163,16 @@ func GetValidRoles(user *User) ([]string, error) {
|
||||
return vals, fmt.Errorf("%s: only admins are allowed to fetch a list of roles", user.Username)
|
||||
}
|
||||
|
||||
// Called by routerConfig web.page setup in backend: Only requires known user and/or not API user
|
||||
// Called by routerConfig web.page setup in backend: Only requires known user
|
||||
func GetValidRolesMap(user *User) (map[string]Role, error) {
|
||||
named := make(map[string]Role)
|
||||
if user.HasNotRoles([]Role{RoleApi, RoleAnonymous}) {
|
||||
if user.HasNotRoles([]Role{RoleAnonymous}) {
|
||||
for i := RoleApi; i < RoleError; i++ {
|
||||
named[GetRoleString(i)] = i
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
return named, fmt.Errorf("Only known users are allowed to fetch a list of roles")
|
||||
return named, fmt.Errorf("only known users are allowed to fetch a list of roles")
|
||||
}
|
||||
|
||||
// Find highest role
|
||||
@ -300,6 +297,7 @@ func (auth *Authentication) AuthViaSession(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TODO Check if keys are present in session?
|
||||
username, _ := session.Values["username"].(string)
|
||||
projects, _ := session.Values["projects"].([]string)
|
||||
roles, _ := session.Values["roles"].([]string)
|
||||
@ -320,11 +318,9 @@ func (auth *Authentication) Login(
|
||||
err := errors.New("no authenticator applied")
|
||||
username := r.FormValue("username")
|
||||
user := (*User)(nil)
|
||||
|
||||
if username != "" {
|
||||
if user, _ = auth.GetUser(username); err != nil {
|
||||
// log.Warnf("login of unkown user %v", username)
|
||||
_ = err
|
||||
}
|
||||
user, _ = auth.GetUser(username)
|
||||
}
|
||||
|
||||
for _, authenticator := range auth.authenticators {
|
||||
@ -364,7 +360,7 @@ func (auth *Authentication) Login(
|
||||
return
|
||||
}
|
||||
|
||||
log.Warn("login failed: no authenticator applied")
|
||||
log.Debugf("login failed: no authenticator applied")
|
||||
onfailure(rw, r, err)
|
||||
})
|
||||
}
|
||||
@ -380,7 +376,7 @@ func (auth *Authentication) Auth(
|
||||
for _, authenticator := range auth.authenticators {
|
||||
user, err := authenticator.Auth(rw, r)
|
||||
if err != nil {
|
||||
log.Warnf("authentication failed: %s", err.Error())
|
||||
log.Infof("authentication failed: %s", err.Error())
|
||||
http.Error(rw, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@ -393,7 +389,7 @@ func (auth *Authentication) Auth(
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("authentication failed: %s", "no authenticator applied")
|
||||
log.Debugf("authentication failed: %s", "no authenticator applied")
|
||||
// http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
onfailure(rw, r, errors.New("unauthorized (login first or use a token)"))
|
||||
})
|
||||
|
@ -92,7 +92,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
|
||||
}
|
||||
} else {
|
||||
ja.publicKeyCrossLogin = nil
|
||||
log.Warn("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
||||
log.Debug("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -103,7 +103,9 @@ func (ja *JWTAuthenticator) CanLogin(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) bool {
|
||||
|
||||
return (user != nil && user.AuthSource == AuthViaToken) || r.Header.Get("Authorization") != "" || r.URL.Query().Get("login-token") != ""
|
||||
return (user != nil && user.AuthSource == AuthViaToken) ||
|
||||
r.Header.Get("Authorization") != "" ||
|
||||
r.URL.Query().Get("login-token") != ""
|
||||
}
|
||||
|
||||
func (ja *JWTAuthenticator) Login(
|
||||
@ -111,13 +113,9 @@ func (ja *JWTAuthenticator) Login(
|
||||
rw http.ResponseWriter,
|
||||
r *http.Request) (*User, error) {
|
||||
|
||||
rawtoken := r.Header.Get("X-Auth-Token")
|
||||
rawtoken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
|
||||
if rawtoken == "" {
|
||||
rawtoken = r.Header.Get("Authorization")
|
||||
rawtoken = strings.TrimPrefix(rawtoken, "Bearer ")
|
||||
if rawtoken == "" {
|
||||
rawtoken = r.URL.Query().Get("login-token")
|
||||
}
|
||||
rawtoken = r.URL.Query().Get("login-token")
|
||||
}
|
||||
|
||||
token, err := jwt.Parse(rawtoken, func(t *jwt.Token) (interface{}, error) {
|
||||
@ -134,7 +132,7 @@ func (ja *JWTAuthenticator) Login(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
if err = token.Claims.Valid(); err != nil {
|
||||
log.Warn("jwt token claims are not valid")
|
||||
return nil, err
|
||||
}
|
||||
@ -220,7 +218,10 @@ func (ja *JWTAuthenticator) Auth(
|
||||
}
|
||||
|
||||
// Is there more than one public key?
|
||||
if ja.publicKeyCrossLogin != nil && ja.config != nil && ja.config.TrustedExternalIssuer != "" {
|
||||
if ja.publicKeyCrossLogin != nil &&
|
||||
ja.config != nil &&
|
||||
ja.config.TrustedExternalIssuer != "" {
|
||||
|
||||
// Determine whether to use the external public key
|
||||
unvalidatedIssuer, success := t.Claims.(jwt.MapClaims)["iss"].(string)
|
||||
if success && unvalidatedIssuer == ja.config.TrustedExternalIssuer {
|
||||
|
@ -114,8 +114,9 @@ type ComplexityRoot struct {
|
||||
}
|
||||
|
||||
JobLinkResultList struct {
|
||||
Count func(childComplexity int) int
|
||||
Items func(childComplexity int) int
|
||||
Count func(childComplexity int) int
|
||||
Items func(childComplexity int) int
|
||||
ListQuery func(childComplexity int) int
|
||||
}
|
||||
|
||||
JobMetric struct {
|
||||
@ -629,6 +630,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
||||
|
||||
return e.complexity.JobLinkResultList.Items(childComplexity), true
|
||||
|
||||
case "JobLinkResultList.listQuery":
|
||||
if e.complexity.JobLinkResultList.ListQuery == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.JobLinkResultList.ListQuery(childComplexity), true
|
||||
|
||||
case "JobMetric.series":
|
||||
if e.complexity.JobMetric.Series == nil {
|
||||
break
|
||||
@ -1739,10 +1747,7 @@ input JobFilter {
|
||||
memUsedMax: FloatRange
|
||||
|
||||
exclusive: Int
|
||||
sharedNode: StringInput
|
||||
selfJobId: StringInput
|
||||
selfStartTime: Time
|
||||
selfDuration: Int
|
||||
node: StringInput
|
||||
}
|
||||
|
||||
input OrderByInput {
|
||||
@ -1776,6 +1781,7 @@ type JobResultList {
|
||||
}
|
||||
|
||||
type JobLinkResultList {
|
||||
listQuery: String
|
||||
items: [JobLink!]!
|
||||
count: Int
|
||||
}
|
||||
@ -3951,6 +3957,8 @@ func (ec *executionContext) fieldContext_Job_concurrentJobs(ctx context.Context,
|
||||
IsResolver: true,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
switch field.Name {
|
||||
case "listQuery":
|
||||
return ec.fieldContext_JobLinkResultList_listQuery(ctx, field)
|
||||
case "items":
|
||||
return ec.fieldContext_JobLinkResultList_items(ctx, field)
|
||||
case "count":
|
||||
@ -4140,6 +4148,47 @@ func (ec *executionContext) fieldContext_JobLink_jobId(ctx context.Context, fiel
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobLinkResultList_listQuery(ctx context.Context, field graphql.CollectedField, obj *model.JobLinkResultList) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobLinkResultList_listQuery(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.ListQuery, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*string)
|
||||
fc.Result = res
|
||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_JobLinkResultList_listQuery(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "JobLinkResultList",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type String does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _JobLinkResultList_items(ctx context.Context, field graphql.CollectedField, obj *model.JobLinkResultList) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_JobLinkResultList_items(ctx, field)
|
||||
if err != nil {
|
||||
@ -11148,7 +11197,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
asMap[k] = v
|
||||
}
|
||||
|
||||
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax", "exclusive", "sharedNode", "selfJobId", "selfStartTime", "selfDuration"}
|
||||
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax", "exclusive", "node"}
|
||||
for _, k := range fieldsInOrder {
|
||||
v, ok := asMap[k]
|
||||
if !ok {
|
||||
@ -11315,35 +11364,11 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "sharedNode":
|
||||
case "node":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sharedNode"))
|
||||
it.SharedNode, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "selfJobId":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("selfJobId"))
|
||||
it.SelfJobID, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "selfStartTime":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("selfStartTime"))
|
||||
it.SelfStartTime, err = ec.unmarshalOTime2ᚖtimeᚐTime(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "selfDuration":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("selfDuration"))
|
||||
it.SelfDuration, err = ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("node"))
|
||||
it.Node, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
@ -12055,6 +12080,10 @@ func (ec *executionContext) _JobLinkResultList(ctx context.Context, sel ast.Sele
|
||||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("JobLinkResultList")
|
||||
case "listQuery":
|
||||
|
||||
out.Values[i] = ec._JobLinkResultList_listQuery(ctx, field, obj)
|
||||
|
||||
case "items":
|
||||
|
||||
out.Values[i] = ec._JobLinkResultList_items(ctx, field, obj)
|
||||
|
@ -57,10 +57,7 @@ type JobFilter struct {
|
||||
LoadAvg *FloatRange `json:"loadAvg"`
|
||||
MemUsedMax *FloatRange `json:"memUsedMax"`
|
||||
Exclusive *int `json:"exclusive"`
|
||||
SharedNode *StringInput `json:"sharedNode"`
|
||||
SelfJobID *StringInput `json:"selfJobId"`
|
||||
SelfStartTime *time.Time `json:"selfStartTime"`
|
||||
SelfDuration *int `json:"selfDuration"`
|
||||
Node *StringInput `json:"node"`
|
||||
}
|
||||
|
||||
type JobLink struct {
|
||||
@ -69,8 +66,9 @@ type JobLink struct {
|
||||
}
|
||||
|
||||
type JobLinkResultList struct {
|
||||
Items []*JobLink `json:"items"`
|
||||
Count *int `json:"count"`
|
||||
ListQuery *string `json:"listQuery"`
|
||||
Items []*JobLink `json:"items"`
|
||||
Count *int `json:"count"`
|
||||
}
|
||||
|
||||
type JobMetricWithName struct {
|
||||
|
@ -33,31 +33,12 @@ func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag,
|
||||
|
||||
// ConcurrentJobs is the resolver for the concurrentJobs field.
|
||||
func (r *jobResolver) ConcurrentJobs(ctx context.Context, obj *schema.Job) (*model.JobLinkResultList, error) {
|
||||
exc := int(obj.Exclusive)
|
||||
if exc != 1 {
|
||||
filter := []*model.JobFilter{}
|
||||
jid := fmt.Sprint(obj.JobID)
|
||||
jdu := int(obj.Duration)
|
||||
filter = append(filter, &model.JobFilter{Exclusive: &exc})
|
||||
filter = append(filter, &model.JobFilter{SharedNode: &model.StringInput{Contains: &obj.Resources[0].Hostname}})
|
||||
filter = append(filter, &model.JobFilter{SelfJobID: &model.StringInput{Neq: &jid}})
|
||||
filter = append(filter, &model.JobFilter{SelfStartTime: &obj.StartTime, SelfDuration: &jdu})
|
||||
if obj.State == schema.JobStateRunning {
|
||||
obj.Duration = int32(time.Now().Unix() - obj.StartTimeUnix)
|
||||
}
|
||||
|
||||
jobLinks, err := r.Repo.QueryJobLinks(ctx, filter)
|
||||
if err != nil {
|
||||
log.Warn("Error while querying jobLinks")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count, err := r.Repo.CountJobs(ctx, filter)
|
||||
if err != nil {
|
||||
log.Warn("Error while counting jobLinks")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &model.JobLinkResultList{Items: jobLinks, Count: &count}
|
||||
|
||||
return result, nil
|
||||
if obj.Exclusive != 1 && obj.Duration > 600 {
|
||||
return r.Repo.FindConcurrentJobs(ctx, obj)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
@ -16,13 +16,13 @@ type Hooks struct{}
|
||||
|
||||
// Before hook will print the query with it's args and return the context with the timestamp
|
||||
func (h *Hooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
|
||||
log.Infof("SQL query %s %q", query, args)
|
||||
log.Debugf("SQL query %s %q", query, args)
|
||||
return context.WithValue(ctx, "begin", time.Now()), nil
|
||||
}
|
||||
|
||||
// After hook will get the timestamp registered on the Before hook and print the elapsed time
|
||||
func (h *Hooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
|
||||
begin := ctx.Value("begin").(time.Time)
|
||||
log.Infof("Took: %s\n", time.Since(begin))
|
||||
log.Debugf("Took: %s\n", time.Since(begin))
|
||||
return ctx, nil
|
||||
}
|
||||
|
@ -55,7 +55,6 @@ func GetJobRepository() *JobRepository {
|
||||
// start archiving worker
|
||||
go jobRepoInstance.archivingWorker()
|
||||
})
|
||||
|
||||
return jobRepoInstance
|
||||
}
|
||||
|
||||
@ -178,7 +177,7 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
||||
}
|
||||
|
||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||
log.Infof("Timer FetchMetadata %s", time.Since(start))
|
||||
log.Debugf("Timer FetchMetadata %s", time.Since(start))
|
||||
return job.MetaData, nil
|
||||
}
|
||||
|
||||
@ -238,7 +237,7 @@ func (r *JobRepository) Find(
|
||||
q = q.Where("job.start_time = ?", *startTime)
|
||||
}
|
||||
|
||||
log.Infof("Timer Find %s", time.Since(start))
|
||||
log.Debugf("Timer Find %s", time.Since(start))
|
||||
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
||||
}
|
||||
|
||||
@ -278,7 +277,7 @@ func (r *JobRepository) FindAll(
|
||||
}
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
log.Infof("Timer FindAll %s", time.Since(start))
|
||||
log.Debugf("Timer FindAll %s", time.Since(start))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
@ -292,6 +291,104 @@ func (r *JobRepository) FindById(jobId int64) (*schema.Job, error) {
|
||||
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
||||
}
|
||||
|
||||
func (r *JobRepository) FindConcurrentJobs(
|
||||
ctx context.Context,
|
||||
job *schema.Job) (*model.JobLinkResultList, error) {
|
||||
if job == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
query, qerr := SecurityCheck(ctx, sq.Select("job.id", "job.job_id", "job.start_time").From("job"))
|
||||
if qerr != nil {
|
||||
return nil, qerr
|
||||
}
|
||||
|
||||
query = query.Where("cluster = ?", job.Cluster)
|
||||
var startTime int64
|
||||
var stopTime int64
|
||||
|
||||
startTime = job.StartTimeUnix
|
||||
hostname := job.Resources[0].Hostname
|
||||
|
||||
if job.State == schema.JobStateRunning {
|
||||
stopTime = time.Now().Unix()
|
||||
} else {
|
||||
stopTime = startTime + int64(job.Duration)
|
||||
}
|
||||
|
||||
// Add 200s overlap for jobs start time at the end
|
||||
startTimeTail := startTime + 10
|
||||
stopTimeTail := stopTime - 200
|
||||
startTimeFront := startTime + 200
|
||||
|
||||
queryRunning := query.Where("job.job_state = ?").Where("(job.start_time BETWEEN ? AND ? OR job.start_time < ?)",
|
||||
"running", startTimeTail, stopTimeTail, startTime)
|
||||
queryRunning = queryRunning.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%"))
|
||||
|
||||
query = query.Where("job.job_state != ?").Where("((job.start_time BETWEEN ? AND ?) OR (job.start_time + job.duration) BETWEEN ? AND ? OR (job.start_time < ?) AND (job.start_time + job.duration) > ?)",
|
||||
"running", startTimeTail, stopTimeTail, startTimeFront, stopTimeTail, startTime, stopTime)
|
||||
query = query.Where("job.resources LIKE ?", fmt.Sprint("%", hostname, "%"))
|
||||
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running query: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := make([]*model.JobLink, 0, 10)
|
||||
queryString := fmt.Sprintf("cluster=%s", job.Cluster)
|
||||
|
||||
for rows.Next() {
|
||||
var id, jobId, startTime sql.NullInt64
|
||||
|
||||
if err = rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id.Valid {
|
||||
queryString += fmt.Sprintf("&jobId=%d", int(jobId.Int64))
|
||||
items = append(items,
|
||||
&model.JobLink{
|
||||
ID: fmt.Sprint(id.Int64),
|
||||
JobID: int(jobId.Int64),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
rows, err = queryRunning.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Errorf("Error while running query: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var id, jobId, startTime sql.NullInt64
|
||||
|
||||
if err := rows.Scan(&id, &jobId, &startTime); err != nil {
|
||||
log.Warn("Error while scanning rows")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id.Valid {
|
||||
queryString += fmt.Sprintf("&jobId=%d", int(jobId.Int64))
|
||||
items = append(items,
|
||||
&model.JobLink{
|
||||
ID: fmt.Sprint(id.Int64),
|
||||
JobID: int(jobId.Int64),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
cnt := len(items)
|
||||
|
||||
return &model.JobLinkResultList{
|
||||
ListQuery: &queryString,
|
||||
Items: items,
|
||||
Count: &cnt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start inserts a new job in the table, returning the unique job ID.
|
||||
// Statistics are not transfered!
|
||||
func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
|
||||
@ -344,7 +441,7 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
||||
if err != nil {
|
||||
log.Errorf(" DeleteJobsBefore(%d): error %#v", startTime, err)
|
||||
} else {
|
||||
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
log.Debugf("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||
}
|
||||
return cnt, err
|
||||
}
|
||||
@ -354,7 +451,7 @@ func (r *JobRepository) DeleteJobById(id int64) error {
|
||||
if err != nil {
|
||||
log.Errorf("DeleteJobById(%d): error %#v", id, err)
|
||||
} else {
|
||||
log.Infof("DeleteJobById(%d): Success", id)
|
||||
log.Debugf("DeleteJobById(%d): Success", id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -383,7 +480,7 @@ func (r *JobRepository) CountGroupedJobs(
|
||||
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
|
||||
runner = r.DB
|
||||
default:
|
||||
log.Infof("CountGroupedJobs() Weight %v unknown.", *weight)
|
||||
log.Debugf("CountGroupedJobs() Weight %v unknown.", *weight)
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,7 +515,7 @@ func (r *JobRepository) CountGroupedJobs(
|
||||
counts[group] = count
|
||||
}
|
||||
|
||||
log.Infof("Timer CountGroupedJobs %s", time.Since(start))
|
||||
log.Debugf("Timer CountGroupedJobs %s", time.Since(start))
|
||||
return counts, nil
|
||||
}
|
||||
|
||||
@ -457,7 +554,7 @@ func (r *JobRepository) MarkArchived(
|
||||
case "file_bw":
|
||||
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
||||
default:
|
||||
log.Infof("MarkArchived() Metric '%v' unknown", metric)
|
||||
log.Debugf("MarkArchived() Metric '%v' unknown", metric)
|
||||
}
|
||||
}
|
||||
|
||||
@ -476,6 +573,7 @@ func (r *JobRepository) archivingWorker() {
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
start := time.Now()
|
||||
// not using meta data, called to load JobMeta into Cache?
|
||||
// will fail if job meta not in repository
|
||||
if _, err := r.FetchMetadata(job); err != nil {
|
||||
@ -498,7 +596,7 @@ func (r *JobRepository) archivingWorker() {
|
||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("archiving job %d took %s", job.JobID, time.Since(start))
|
||||
log.Printf("archiving job (dbid: %d) successful", job.ID)
|
||||
r.archivePending.Done()
|
||||
}
|
||||
@ -517,49 +615,35 @@ func (r *JobRepository) WaitForArchiving() {
|
||||
r.archivePending.Wait()
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("no such jobname, project or user")
|
||||
var ErrForbidden = errors.New("not authorized")
|
||||
|
||||
// FindJobnameOrUserOrProject returns a jobName or a username or a projectId if a jobName or user or project matches the search term.
|
||||
// If query is found to be an integer (= conversion to INT datatype succeeds), skip back to parent call
|
||||
// If nothing matches the search, `ErrNotFound` is returned.
|
||||
|
||||
func (r *JobRepository) FindUserOrProjectOrJobname(ctx context.Context, searchterm string) (username string, project string, metasnip string, err error) {
|
||||
func (r *JobRepository) FindUserOrProjectOrJobname(user *auth.User, searchterm string) (jobid string, username string, project string, jobname string) {
|
||||
if _, err := strconv.Atoi(searchterm); err == nil { // Return empty on successful conversion: parent method will redirect for integer jobId
|
||||
return "", "", "", nil
|
||||
return searchterm, "", "", ""
|
||||
} else { // Has to have letters and logged-in user for other guesses
|
||||
user := auth.GetUser(ctx)
|
||||
if user != nil {
|
||||
// Find username in jobs (match)
|
||||
uresult, _ := r.FindColumnValue(user, searchterm, "job", "user", "user", false)
|
||||
if uresult != "" {
|
||||
return uresult, "", "", nil
|
||||
return "", uresult, "", ""
|
||||
}
|
||||
// Find username by name (like)
|
||||
nresult, _ := r.FindColumnValue(user, searchterm, "user", "username", "name", true)
|
||||
if nresult != "" {
|
||||
return nresult, "", "", nil
|
||||
return "", nresult, "", ""
|
||||
}
|
||||
// Find projectId in jobs (match)
|
||||
presult, _ := r.FindColumnValue(user, searchterm, "job", "project", "project", false)
|
||||
if presult != "" {
|
||||
return "", presult, "", nil
|
||||
}
|
||||
// Still no return (or not authorized for above): Try JobName
|
||||
// Match Metadata, on hit, parent method redirects to jobName GQL query
|
||||
err := sq.Select("job.cluster").Distinct().From("job").
|
||||
Where("job.meta_data LIKE ?", "%"+searchterm+"%").
|
||||
RunWith(r.stmtCache).QueryRow().Scan(&metasnip)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return "", "", "", err
|
||||
} else if err == nil {
|
||||
return "", "", metasnip[0:1], nil
|
||||
return "", "", presult, ""
|
||||
}
|
||||
}
|
||||
return "", "", "", ErrNotFound
|
||||
// Return searchterm if no match before: Forward as jobname query to GQL in handleSearchbar function
|
||||
return "", "", "", searchterm
|
||||
}
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("no such jobname, project or user")
|
||||
var ErrForbidden = errors.New("not authorized")
|
||||
|
||||
func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) {
|
||||
compareStr := " = ?"
|
||||
query := searchterm
|
||||
@ -635,7 +719,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("Timer Partitions %s", time.Since(start))
|
||||
log.Debugf("Timer Partitions %s", time.Since(start))
|
||||
return partitions.([]string), nil
|
||||
}
|
||||
|
||||
@ -680,7 +764,7 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Timer AllocatedNodes %s", time.Since(start))
|
||||
log.Debugf("Timer AllocatedNodes %s", time.Since(start))
|
||||
return subclusters, nil
|
||||
}
|
||||
|
||||
@ -709,7 +793,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
||||
if rowsAffected > 0 {
|
||||
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||
}
|
||||
log.Infof("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||
log.Debugf("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -722,9 +806,11 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
||||
}
|
||||
|
||||
if startTimeBegin == 0 {
|
||||
log.Infof("Find jobs before %d", startTimeEnd)
|
||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||
"job.start_time < %d", startTimeEnd))
|
||||
} else {
|
||||
log.Infof("Find jobs between %d and %d", startTimeBegin, startTimeEnd)
|
||||
query = sq.Select(jobColumns...).From("job").Where(fmt.Sprintf(
|
||||
"job.start_time BETWEEN %d AND %d", startTimeBegin, startTimeEnd))
|
||||
}
|
||||
@ -746,6 +832,7 @@ func (r *JobRepository) FindJobsBetween(startTimeBegin int64, startTimeEnd int64
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
log.Infof("Return job count %d", len(jobs))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"github.com/golang-migrate/migrate/v4/source/iofs"
|
||||
)
|
||||
|
||||
const Version uint = 4
|
||||
const Version uint = 6
|
||||
|
||||
//go:embed migrations/*
|
||||
var migrationFiles embed.FS
|
||||
@ -53,6 +53,8 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unsupported database backend: %s", backend)
|
||||
}
|
||||
|
||||
v, _, err := m.Version()
|
||||
@ -65,7 +67,7 @@ func checkDBVersion(backend string, db *sql.DB) error {
|
||||
}
|
||||
|
||||
if v < Version {
|
||||
return fmt.Errorf("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, Version)
|
||||
return fmt.Errorf("unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend -migrate-db", v, Version)
|
||||
}
|
||||
|
||||
if v > Version {
|
||||
@ -99,6 +101,8 @@ func MigrateDB(backend string, db string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unsupported database backend: %s", backend)
|
||||
}
|
||||
|
||||
if err := m.Up(); err != nil {
|
||||
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE tag DROP COLUMN insert_time;
|
||||
ALTER TABLE jobtag DROP COLUMN insert_time;
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE tag ADD COLUMN insert_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
|
||||
ALTER TABLE jobtag ADD COLUMN insert_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
|
@ -0,0 +1 @@
|
||||
ALTER TABLE configuration MODIFY value VARCHAR(255);
|
@ -0,0 +1 @@
|
||||
ALTER TABLE configuration MODIFY value TEXT;
|
@ -30,6 +30,8 @@ file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||
file_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
||||
UNIQUE (job_id, cluster, start_time));
|
||||
|
||||
|
||||
UPDATE job SET job_state='cancelled' WHERE job_state='canceled';
|
||||
INSERT INTO job_new SELECT * FROM job;
|
||||
DROP TABLE job;
|
||||
ALTER TABLE job_new RENAME TO job;
|
||||
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE tag DROP COLUMN insert_time;
|
||||
ALTER TABLE jobtag DROP COLUMN insert_time;
|
18
internal/repository/migrations/sqlite3/05_extend-tags.up.sql
Normal file
18
internal/repository/migrations/sqlite3/05_extend-tags.up.sql
Normal file
@ -0,0 +1,18 @@
|
||||
ALTER TABLE tag ADD COLUMN insert_ts TEXT DEFAULT NULL /* replace me */;
|
||||
ALTER TABLE jobtag ADD COLUMN insert_ts TEXT DEFAULT NULL /* replace me */;
|
||||
UPDATE tag SET insert_ts = CURRENT_TIMESTAMP;
|
||||
UPDATE jobtag SET insert_ts = CURRENT_TIMESTAMP;
|
||||
PRAGMA writable_schema = on;
|
||||
|
||||
UPDATE sqlite_master
|
||||
SET sql = replace(sql, 'DEFAULT NULL /* replace me */',
|
||||
'DEFAULT CURRENT_TIMESTAMP')
|
||||
WHERE type = 'table'
|
||||
AND name = 'tag';
|
||||
UPDATE sqlite_master
|
||||
SET sql = replace(sql, 'DEFAULT NULL /* replace me */',
|
||||
'DEFAULT CURRENT_TIMESTAMP')
|
||||
WHERE type = 'table'
|
||||
AND name = 'jobtag';
|
||||
|
||||
PRAGMA writable_schema = off;
|
@ -0,0 +1,10 @@
|
||||
CREATE TABLE IF NOT EXISTS configuration_new (
|
||||
username varchar(255),
|
||||
confkey varchar(255),
|
||||
value varchar(255),
|
||||
PRIMARY KEY (username, confkey),
|
||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||
|
||||
INSERT INTO configuration_new SELECT * FROM configuration;
|
||||
DROP TABLE configuration;
|
||||
ALTER TABLE configuration_new RENAME TO configuration;
|
@ -0,0 +1,10 @@
|
||||
CREATE TABLE IF NOT EXISTS configuration_new (
|
||||
username varchar(255),
|
||||
confkey varchar(255),
|
||||
value text,
|
||||
PRIMARY KEY (username, confkey),
|
||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||
|
||||
INSERT INTO configuration_new SELECT * FROM configuration;
|
||||
DROP TABLE configuration;
|
||||
ALTER TABLE configuration_new RENAME TO configuration;
|
@ -48,16 +48,9 @@ func (r *JobRepository) queryJobs(
|
||||
query = BuildWhereClause(f, query)
|
||||
}
|
||||
|
||||
sql, args, err := query.ToSql()
|
||||
if err != nil {
|
||||
log.Warn("Error while converting query to sql")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
log.Errorf("Error while running query: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -92,7 +85,6 @@ func (r *JobRepository) QueryJobs(
|
||||
order *model.OrderByInput) ([]*schema.Job, error) {
|
||||
|
||||
query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job"))
|
||||
|
||||
if qerr != nil {
|
||||
return nil, qerr
|
||||
}
|
||||
@ -100,62 +92,6 @@ func (r *JobRepository) QueryJobs(
|
||||
return r.queryJobs(query, filters, page, order)
|
||||
}
|
||||
|
||||
// SecurityCheck-less, private: returns a list of minimal job information (DB-ID and jobId) of shared jobs for link-building based the provided filters.
|
||||
func (r *JobRepository) queryJobLinks(
|
||||
query sq.SelectBuilder,
|
||||
filters []*model.JobFilter) ([]*model.JobLink, error) {
|
||||
|
||||
for _, f := range filters {
|
||||
query = BuildWhereClause(f, query)
|
||||
}
|
||||
|
||||
sql, args, err := query.ToSql()
|
||||
if err != nil {
|
||||
log.Warn("Error while converting query to sql")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
||||
rows, err := query.RunWith(r.stmtCache).Query()
|
||||
if err != nil {
|
||||
log.Error("Error while running query")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jobLinks := make([]*model.JobLink, 0, 50)
|
||||
for rows.Next() {
|
||||
jobLink, err := scanJobLink(rows)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
log.Warn("Error while scanning rows (JobLinks)")
|
||||
return nil, err
|
||||
}
|
||||
jobLinks = append(jobLinks, jobLink)
|
||||
}
|
||||
|
||||
return jobLinks, nil
|
||||
}
|
||||
|
||||
// testFunction for queryJobLinks
|
||||
func (r *JobRepository) testQueryJobLinks(
|
||||
filters []*model.JobFilter) ([]*model.JobLink, error) {
|
||||
|
||||
return r.queryJobLinks(sq.Select(jobColumns...).From("job"), filters)
|
||||
}
|
||||
|
||||
func (r *JobRepository) QueryJobLinks(
|
||||
ctx context.Context,
|
||||
filters []*model.JobFilter) ([]*model.JobLink, error) {
|
||||
|
||||
query, qerr := SecurityCheck(ctx, sq.Select("job.id", "job.job_id").From("job"))
|
||||
|
||||
if qerr != nil {
|
||||
return nil, qerr
|
||||
}
|
||||
|
||||
return r.queryJobLinks(query, filters)
|
||||
}
|
||||
|
||||
// SecurityCheck-less, private: Returns the number of jobs matching the filters
|
||||
func (r *JobRepository) countJobs(query sq.SelectBuilder,
|
||||
filters []*model.JobFilter) (int, error) {
|
||||
@ -164,13 +100,6 @@ func (r *JobRepository) countJobs(query sq.SelectBuilder,
|
||||
query = BuildWhereClause(f, query)
|
||||
}
|
||||
|
||||
sql, args, err := query.ToSql()
|
||||
if err != nil {
|
||||
log.Warn("Error while converting query to sql")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
||||
var count int
|
||||
if err := query.RunWith(r.DB).Scan(&count); err != nil {
|
||||
return 0, err
|
||||
@ -211,7 +140,7 @@ func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilde
|
||||
if len(user.Projects) != 0 {
|
||||
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil
|
||||
} else {
|
||||
log.Infof("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||
log.Debugf("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||
return query.Where("job.user = ?", user.Username), nil
|
||||
}
|
||||
} else if user.HasRole(auth.RoleUser) { // User : Only personal jobs
|
||||
@ -279,6 +208,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
||||
if filter.NumHWThreads != nil {
|
||||
query = buildIntCondition("job.num_hwthreads", filter.NumHWThreads, query)
|
||||
}
|
||||
if filter.Node != nil {
|
||||
query = buildStringCondition("job.resources", filter.Node, query)
|
||||
}
|
||||
if filter.FlopsAnyAvg != nil {
|
||||
query = buildFloatCondition("job.flops_any_avg", filter.FlopsAnyAvg, query)
|
||||
}
|
||||
@ -291,21 +223,6 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
||||
if filter.MemUsedMax != nil {
|
||||
query = buildFloatCondition("job.mem_used_max", filter.MemUsedMax, query)
|
||||
}
|
||||
// Shared Jobs Query
|
||||
if filter.Exclusive != nil {
|
||||
query = query.Where("job.exclusive = ?", *filter.Exclusive)
|
||||
}
|
||||
if filter.SharedNode != nil {
|
||||
query = buildStringCondition("job.resources", filter.SharedNode, query)
|
||||
}
|
||||
if filter.SelfJobID != nil {
|
||||
query = buildStringCondition("job.job_id", filter.SelfJobID, query)
|
||||
}
|
||||
if filter.SelfStartTime != nil && filter.SelfDuration != nil {
|
||||
start := filter.SelfStartTime.Unix() + 10 // There does not seem to be a portable way to get the current unix timestamp accross different DBs.
|
||||
end := start + int64(*filter.SelfDuration) - 20
|
||||
query = query.Where("((job.start_time BETWEEN ? AND ?) OR ((job.start_time + job.duration) BETWEEN ? AND ?))", start, end, start, end)
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
@ -346,11 +263,11 @@ func buildStringCondition(field string, cond *model.StringInput, query sq.Select
|
||||
return query.Where(field+" LIKE ?", fmt.Sprint("%", *cond.Contains, "%"))
|
||||
}
|
||||
if cond.In != nil {
|
||||
queryUsers := make([]string, len(cond.In))
|
||||
queryElements := make([]string, len(cond.In))
|
||||
for i, val := range cond.In {
|
||||
queryUsers[i] = val
|
||||
queryElements[i] = val
|
||||
}
|
||||
return query.Where(sq.Or{sq.Eq{"job.user": queryUsers}})
|
||||
return query.Where(sq.Or{sq.Eq{field: queryElements}})
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func (r *JobRepository) JobsStatsGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Timer JobsStatsGrouped %s", time.Since(start))
|
||||
log.Debugf("Timer JobsStatsGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ func (r *JobRepository) JobsStats(
|
||||
TotalAccHours: totalAccHours})
|
||||
}
|
||||
|
||||
log.Infof("Timer JobStats %s", time.Since(start))
|
||||
log.Debugf("Timer JobStats %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ func (r *JobRepository) JobCountGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Timer JobCountGrouped %s", time.Since(start))
|
||||
log.Debugf("Timer JobCountGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -300,7 +300,7 @@ func (r *JobRepository) AddJobCountGrouped(
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Timer AddJobCountGrouped %s", time.Since(start))
|
||||
log.Debugf("Timer AddJobCountGrouped %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -343,7 +343,7 @@ func (r *JobRepository) AddJobCount(
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Timer JobJobCount %s", time.Since(start))
|
||||
log.Debugf("Timer JobJobCount %s", time.Since(start))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ func (r *JobRepository) AddHistograms(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Timer AddHistograms %s", time.Since(start))
|
||||
log.Debugf("Timer AddHistograms %s", time.Since(start))
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
@ -406,6 +406,6 @@ func (r *JobRepository) jobsStatisticsHistogram(
|
||||
|
||||
points = append(points, &point)
|
||||
}
|
||||
log.Infof("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
log.Debugf("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||
return points, nil
|
||||
}
|
||||
|
@ -70,14 +70,14 @@ func (r *JobRepository) CreateTag(tagType string, tagName string) (tagId int64,
|
||||
|
||||
func (r *JobRepository) CountTags(user *auth.User) (tags []schema.Tag, counts map[string]int, err error) {
|
||||
tags = make([]schema.Tag, 0, 100)
|
||||
xrows, err := r.DB.Queryx("SELECT * FROM tag")
|
||||
xrows, err := r.DB.Queryx("SELECT id, tag_type, tag_name FROM tag")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for xrows.Next() {
|
||||
var t schema.Tag
|
||||
if err := xrows.StructScan(&t); err != nil {
|
||||
if err = xrows.StructScan(&t); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tags = append(tags, t)
|
||||
@ -89,7 +89,7 @@ func (r *JobRepository) CountTags(user *auth.User) (tags []schema.Tag, counts ma
|
||||
GroupBy("t.tag_name")
|
||||
|
||||
if user != nil && user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport}) { // ADMIN || SUPPORT: Count all jobs
|
||||
log.Info("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
||||
log.Debug("CountTags: User Admin or Support -> Count all Jobs for Tags")
|
||||
// Unchanged: Needs to be own case still, due to UserRole/NoRole compatibility handling in else case
|
||||
} else if user != nil && user.HasRole(auth.RoleManager) { // MANAGER: Count own jobs plus project's jobs
|
||||
// Build ("project1", "project2", ...) list of variable length directly in SQL string
|
||||
@ -107,7 +107,7 @@ func (r *JobRepository) CountTags(user *auth.User) (tags []schema.Tag, counts ma
|
||||
for rows.Next() {
|
||||
var tagName string
|
||||
var count int
|
||||
if err := rows.Scan(&tagName, &count); err != nil {
|
||||
if err = rows.Scan(&tagName, &count); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
counts[tagName] = count
|
||||
|
@ -8,14 +8,15 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/api"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/web"
|
||||
"github.com/gorilla/mux"
|
||||
@ -61,6 +62,16 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
||||
}
|
||||
|
||||
i["clusters"] = stats
|
||||
|
||||
if util.CheckFileExists("./var/notice.txt") {
|
||||
msg, err := os.ReadFile("./var/notice.txt")
|
||||
if err != nil {
|
||||
log.Warnf("failed to read notice.txt file: %s", err.Error())
|
||||
} else {
|
||||
i["message"] = string(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
@ -185,6 +196,9 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
}
|
||||
}
|
||||
}
|
||||
if query.Get("node") != "" {
|
||||
filterPresets["node"] = query.Get("node")
|
||||
}
|
||||
if query.Get("numNodes") != "" {
|
||||
parts := strings.Split(query.Get("numNodes"), "-")
|
||||
if len(parts) == 2 {
|
||||
@ -206,7 +220,13 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
}
|
||||
}
|
||||
if query.Get("jobId") != "" {
|
||||
filterPresets["jobId"] = query.Get("jobId")
|
||||
if len(query["jobId"]) == 1 {
|
||||
filterPresets["jobId"] = query.Get("jobId")
|
||||
filterPresets["jobIdMatch"] = "eq"
|
||||
} else {
|
||||
filterPresets["jobId"] = query["jobId"]
|
||||
filterPresets["jobIdMatch"] = "in"
|
||||
}
|
||||
}
|
||||
if query.Get("arrayJobId") != "" {
|
||||
if num, err := strconv.Atoi(query.Get("arrayJobId")); err == nil {
|
||||
@ -230,7 +250,7 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
||||
return filterPresets
|
||||
}
|
||||
|
||||
func SetupRoutes(router *mux.Router, version string, hash string, buildTime string) {
|
||||
func SetupRoutes(router *mux.Router, buildInfo web.Build) {
|
||||
userCfgRepo := repository.GetUserCfgRepo()
|
||||
for _, route := range routes {
|
||||
route := route
|
||||
@ -256,7 +276,7 @@ func SetupRoutes(router *mux.Router, version string, hash string, buildTime stri
|
||||
Title: title,
|
||||
User: *user,
|
||||
Roles: availableRoles,
|
||||
Build: web.Build{Version: version, Hash: hash, Buildtime: buildTime},
|
||||
Build: buildInfo,
|
||||
Config: conf,
|
||||
Infos: infos,
|
||||
}
|
||||
@ -270,66 +290,65 @@ func SetupRoutes(router *mux.Router, version string, hash string, buildTime stri
|
||||
}
|
||||
}
|
||||
|
||||
func HandleSearchBar(rw http.ResponseWriter, r *http.Request, api *api.RestApi) {
|
||||
func HandleSearchBar(rw http.ResponseWriter, r *http.Request, buildInfo web.Build) {
|
||||
user := auth.GetUser(r.Context())
|
||||
availableRoles, _ := auth.GetValidRolesMap(user)
|
||||
|
||||
if search := r.URL.Query().Get("searchId"); search != "" {
|
||||
user := auth.GetUser(r.Context())
|
||||
repo := repository.GetJobRepository()
|
||||
splitSearch := strings.Split(search, ":")
|
||||
|
||||
if len(splitSearch) == 2 {
|
||||
switch strings.Trim(splitSearch[0], " ") {
|
||||
case "jobId":
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery
|
||||
case "jobName":
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery
|
||||
case "projectId":
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery
|
||||
case "arrayJobId":
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?arrayJobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound) // All Users: Redirect to Tablequery
|
||||
case "username":
|
||||
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||
http.Redirect(rw, r, "/monitoring/users/?user="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect)
|
||||
http.Redirect(rw, r, "/monitoring/users/?user="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusFound)
|
||||
} else {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Users: Redirect to Tablequery
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Error", MsgType: "alert-danger", Message: "Missing Access Rights", User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
case "name":
|
||||
usernames, _ := api.JobRepository.FindColumnValues(user, strings.Trim(splitSearch[1], " "), "user", "username", "name")
|
||||
usernames, _ := repo.FindColumnValues(user, strings.Trim(splitSearch[1], " "), "user", "username", "name")
|
||||
if len(usernames) != 0 {
|
||||
joinedNames := strings.Join(usernames, "&user=")
|
||||
http.Redirect(rw, r, "/monitoring/users/?user="+joinedNames, http.StatusTemporaryRedirect)
|
||||
http.Redirect(rw, r, "/monitoring/users/?user="+joinedNames, http.StatusFound)
|
||||
} else {
|
||||
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||
http.Redirect(rw, r, "/monitoring/users/?user=NoUserNameFound", http.StatusTemporaryRedirect)
|
||||
http.Redirect(rw, r, "/monitoring/users/?user=NoUserNameFound", http.StatusPermanentRedirect)
|
||||
} else {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Users: Redirect to Tablequery
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Error", MsgType: "alert-danger", Message: "Missing Access Rights", User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Warnf("Searchbar type parameter '%s' unknown", strings.Trim(splitSearch[0], " "))
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Warning", MsgType: "alert-warning", Message: fmt.Sprintf("Unknown search type: %s", strings.Trim(splitSearch[0], " ")), User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
|
||||
} else if len(splitSearch) == 1 {
|
||||
username, project, jobname, err := api.JobRepository.FindUserOrProjectOrJobname(r.Context(), strings.Trim(search, " "))
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error while searchbar best guess: %v", err.Error())
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||
}
|
||||
jobid, username, project, jobname := repo.FindUserOrProjectOrJobname(user, strings.Trim(search, " "))
|
||||
|
||||
if username != "" {
|
||||
http.Redirect(rw, r, "/monitoring/user/"+username, http.StatusTemporaryRedirect) // User: Redirect to user page
|
||||
if jobid != "" {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(jobid), http.StatusFound) // JobId (Match)
|
||||
} else if username != "" {
|
||||
http.Redirect(rw, r, "/monitoring/user/"+username, http.StatusFound) // User: Redirect to user page of first match
|
||||
} else if project != "" {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // projectId (equal)
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(project), http.StatusFound) // projectId (equal)
|
||||
} else if jobname != "" {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // JobName (contains)
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(jobname), http.StatusFound) // JobName (contains)
|
||||
} else {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // No Result: Probably jobId
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Info", MsgType: "alert-info", Message: "Search without result", User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Warnf("Searchbar query parameters malformed: %v", search)
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Error", MsgType: "alert-danger", Message: "Searchbar query parameters malformed", User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
|
||||
} else {
|
||||
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect)
|
||||
web.RenderTemplate(rw, r, "message.tmpl", &web.Page{Title: "Warning", MsgType: "alert-warning", Message: "Empty search", User: *user, Roles: availableRoles, Build: buildInfo})
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
@ -20,6 +20,7 @@ func GetFilesize(filePath string) int64 {
|
||||
fileInfo, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
log.Errorf("Error on Stat %s: %v", filePath, err)
|
||||
return 0
|
||||
}
|
||||
return fileInfo.Size()
|
||||
}
|
||||
@ -28,6 +29,7 @@ func GetFilecount(path string) int {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error on ReadDir %s: %v", path, err)
|
||||
return 0
|
||||
}
|
||||
|
||||
return len(files)
|
||||
|
75
internal/util/util_test.go
Normal file
75
internal/util/util_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package util_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
)
|
||||
|
||||
func TestCheckFileExists(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
if !util.CheckFileExists(tmpdir) {
|
||||
t.Fatal("expected true, got false")
|
||||
}
|
||||
|
||||
filePath := filepath.Join(tmpdir, "version.txt")
|
||||
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !util.CheckFileExists(filePath) {
|
||||
t.Fatal("expected true, got false")
|
||||
}
|
||||
|
||||
filePath = filepath.Join(tmpdir, "version-test.txt")
|
||||
if util.CheckFileExists(filePath) {
|
||||
t.Fatal("expected false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFileSize(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
filePath := filepath.Join(tmpdir, "data.json")
|
||||
|
||||
if s := util.GetFilesize(filePath); s > 0 {
|
||||
t.Fatalf("expected 0, got %d", s)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s := util.GetFilesize(filePath); s == 0 {
|
||||
t.Fatal("expected not 0, got 0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFileCount(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
if c := util.GetFilecount(tmpdir); c != 0 {
|
||||
t.Fatalf("expected 0, got %d", c)
|
||||
}
|
||||
|
||||
filePath := filepath.Join(tmpdir, "data-1.json")
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filePath = filepath.Join(tmpdir, "data-2.json")
|
||||
if err := os.WriteFile(filePath, []byte(fmt.Sprintf("%d", 1)), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := util.GetFilecount(tmpdir); c != 2 {
|
||||
t.Fatalf("expected 2, got %d", c)
|
||||
}
|
||||
|
||||
if c := util.GetFilecount(filePath); c != 0 {
|
||||
t.Fatalf("expected 0, got %d", c)
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// Copyright (C) 2023 NHR@FAU, University Erlangen-Nuremberg.
|
||||
// All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -348,7 +348,7 @@ func (fsa *FsArchive) Compress(jobs []*schema.Job) {
|
||||
|
||||
for _, job := range jobs {
|
||||
fileIn := getPath(job, fsa.path, "data.json")
|
||||
if !util.CheckFileExists(fileIn) && util.GetFilesize(fileIn) > 2000 {
|
||||
if util.CheckFileExists(fileIn) && util.GetFilesize(fileIn) > 2000 {
|
||||
util.CompressFile(fileIn, getPath(job, fsa.path, "data.json.gz"))
|
||||
cnt++
|
||||
}
|
||||
@ -372,6 +372,7 @@ func (fsa *FsArchive) CompressLast(starttime int64) int64 {
|
||||
return starttime
|
||||
}
|
||||
|
||||
log.Infof("fsBackend Compress - start %d last %d", starttime, last)
|
||||
os.WriteFile(filename, []byte(fmt.Sprintf("%d", starttime)), 0644)
|
||||
return last
|
||||
}
|
||||
|
@ -124,9 +124,7 @@ type JobStatistics struct {
|
||||
// Tag model
|
||||
// @Description Defines a tag using name and type.
|
||||
type Tag struct {
|
||||
// The unique DB identifier of a tag
|
||||
// The unique DB identifier of a tag
|
||||
ID int64 `json:"id" db:"id"`
|
||||
ID int64 `json:"id" db:"id"` // The unique DB identifier of a tag
|
||||
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
||||
Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "schemafs://job-metric-statistics.schema.json",
|
||||
"$id": "embedfs://job-metric-statistics.schema.json",
|
||||
"title": "Job statistics",
|
||||
"description": "Format specification for job metric statistics",
|
||||
"type": "object",
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
if [ -d './var' ]; then
|
||||
echo 'Directory ./var already exists! Skipping initialization.'
|
||||
./cc-backend --server --dev
|
||||
./cc-backend -server -dev
|
||||
else
|
||||
make
|
||||
wget https://hpc-mover.rrze.uni-erlangen.de/HPC-Data/0x7b58aefb/eig7ahyo6fo2bais0ephuf2aitohv1ai/job-archive-demo.tar
|
||||
@ -12,6 +12,6 @@ else
|
||||
cp ./configs/env-template.txt .env
|
||||
cp ./configs/config-demo.json config.json
|
||||
|
||||
./cc-backend --migrate-db
|
||||
./cc-backend --server --dev --init-db --add-user demo:admin:demo
|
||||
./cc-backend -migrate-db
|
||||
./cc-backend -server -dev -init-db -add-user demo:admin:demo
|
||||
fi
|
||||
|
@ -93,7 +93,7 @@
|
||||
<InputGroup>
|
||||
<Input type="text" placeholder="Search 'type:<query>' ..." name="searchId"/>
|
||||
<Button outline type="submit"><Icon name="search"/></Button>
|
||||
<InputGroupText style="cursor:help;" title={(authlevel >= roles.support) ? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | username | name" : "Example: 'jobName:myjob', Types are jobId | jobName | projectId"}><Icon name="info-circle"/></InputGroupText>
|
||||
<InputGroupText style="cursor:help;" title={(authlevel >= roles.support) ? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | arrayJobId | username | name" : "Example: 'jobName:myjob', Types are jobId | jobName | projectId | arrayJobId "}><Icon name="info-circle"/></InputGroupText>
|
||||
</InputGroup>
|
||||
</form>
|
||||
{#if username}
|
||||
|
@ -1,21 +1,37 @@
|
||||
<script>
|
||||
import { init, groupByScope, fetchMetricsStore, checkMetricDisabled } from './utils.js'
|
||||
import { Row, Col, Card, Spinner, TabContent, TabPane,
|
||||
CardBody, CardHeader, CardTitle, Button, Icon } from 'sveltestrap'
|
||||
import PlotTable from './PlotTable.svelte'
|
||||
import Metric from './Metric.svelte'
|
||||
import PolarPlot from './plots/Polar.svelte'
|
||||
import Roofline from './plots/Roofline.svelte'
|
||||
import JobInfo from './joblist/JobInfo.svelte'
|
||||
import TagManagement from './TagManagement.svelte'
|
||||
import MetricSelection from './MetricSelection.svelte'
|
||||
import Zoom from './Zoom.svelte'
|
||||
import StatsTable from './StatsTable.svelte'
|
||||
import { getContext } from 'svelte'
|
||||
import {
|
||||
init,
|
||||
groupByScope,
|
||||
fetchMetricsStore,
|
||||
checkMetricDisabled,
|
||||
} from "./utils.js";
|
||||
import {
|
||||
Row,
|
||||
Col,
|
||||
Card,
|
||||
Spinner,
|
||||
TabContent,
|
||||
TabPane,
|
||||
CardBody,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
Button,
|
||||
Icon,
|
||||
} from "sveltestrap";
|
||||
import PlotTable from "./PlotTable.svelte";
|
||||
import Metric from "./Metric.svelte";
|
||||
import PolarPlot from "./plots/Polar.svelte";
|
||||
import Roofline from "./plots/Roofline.svelte";
|
||||
import JobInfo from "./joblist/JobInfo.svelte";
|
||||
import TagManagement from "./TagManagement.svelte";
|
||||
import MetricSelection from "./MetricSelection.svelte";
|
||||
import Zoom from "./Zoom.svelte";
|
||||
import StatsTable from "./StatsTable.svelte";
|
||||
import { getContext } from "svelte";
|
||||
|
||||
export let dbid
|
||||
export let authlevel
|
||||
export let roles
|
||||
export let dbid;
|
||||
export let authlevel;
|
||||
export let roles;
|
||||
|
||||
const { query: initq } = init(`
|
||||
job(id: "${dbid}") {
|
||||
@ -27,147 +43,250 @@
|
||||
resources { hostname, hwthreads, accelerators },
|
||||
metaData,
|
||||
userData { name, email },
|
||||
concurrentJobs { items { id, jobId }, count }
|
||||
concurrentJobs { items { id, jobId }, count, listQuery }
|
||||
}
|
||||
`)
|
||||
`);
|
||||
|
||||
const ccconfig = getContext('cc-config'),
|
||||
clusters = getContext('clusters')
|
||||
const ccconfig = getContext("cc-config"),
|
||||
clusters = getContext("clusters"),
|
||||
metrics = getContext("metrics")
|
||||
|
||||
let isMetricsSelectionOpen = false, selectedMetrics = [], isFetched = new Set()
|
||||
const [jobMetrics, startFetching] = fetchMetricsStore()
|
||||
getContext('on-init')(() => {
|
||||
let job = $initq.data.job
|
||||
if (!job)
|
||||
return
|
||||
let isMetricsSelectionOpen = false,
|
||||
selectedMetrics = [],
|
||||
isFetched = new Set();
|
||||
const [jobMetrics, startFetching] = fetchMetricsStore();
|
||||
getContext("on-init")(() => {
|
||||
let job = $initq.data.job;
|
||||
if (!job) return;
|
||||
|
||||
selectedMetrics = ccconfig[`job_view_selectedMetrics:${job.cluster}`]
|
||||
|| clusters.find(c => c.name == job.cluster).metricConfig.map(mc => mc.name)
|
||||
selectedMetrics =
|
||||
ccconfig[`job_view_selectedMetrics:${job.cluster}`] ||
|
||||
clusters
|
||||
.find((c) => c.name == job.cluster)
|
||||
.metricConfig.map((mc) => mc.name);
|
||||
|
||||
let toFetch = new Set([
|
||||
'flops_any', 'mem_bw',
|
||||
"flops_any",
|
||||
"mem_bw",
|
||||
...selectedMetrics,
|
||||
...(ccconfig[`job_view_polarPlotMetrics:${job.cluster}`] || ccconfig[`job_view_polarPlotMetrics`]),
|
||||
...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] || ccconfig[`job_view_nodestats_selectedMetrics`])])
|
||||
...(ccconfig[`job_view_polarPlotMetrics:${job.cluster}`] ||
|
||||
ccconfig[`job_view_polarPlotMetrics`]),
|
||||
...(ccconfig[`job_view_nodestats_selectedMetrics:${job.cluster}`] ||
|
||||
ccconfig[`job_view_nodestats_selectedMetrics`]),
|
||||
]);
|
||||
|
||||
// Select default Scopes to load
|
||||
if (job.numAcc === 0) { // No Accels
|
||||
startFetching(job, [...toFetch], job.numNodes > 2 ? ["node"] : ["node", "core"])
|
||||
} else { // Accels
|
||||
startFetching(job, [...toFetch], job.numNodes > 2 ? ["node", "accelerator"] : ["node", "accelerator", "core"])
|
||||
// Select default Scopes to load: Check before if accelerator metrics are not on accelerator scope by default
|
||||
const accMetrics = ['acc_utilization', 'acc_mem_used', 'acc_power', 'nv_mem_util', 'nv_sm_clock', 'nv_temp']
|
||||
const accNodeOnly = [...toFetch].some(function(m) {
|
||||
if (accMetrics.includes(m)) {
|
||||
const mc = metrics(job.cluster, m)
|
||||
return mc.scope !== 'accelerator'
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if (job.numAcc === 0 || accNodeOnly === true) {
|
||||
// No Accels or Accels on Node Scope
|
||||
startFetching(
|
||||
job,
|
||||
[...toFetch],
|
||||
job.numNodes > 2 ? ["node"] : ["node", "core"]
|
||||
);
|
||||
} else {
|
||||
// Accels and not on node scope
|
||||
startFetching(
|
||||
job,
|
||||
[...toFetch],
|
||||
job.numNodes > 2
|
||||
? ["node", "accelerator"]
|
||||
: ["node", "accelerator", "core"]
|
||||
);
|
||||
}
|
||||
|
||||
isFetched = toFetch
|
||||
})
|
||||
|
||||
isFetched = toFetch;
|
||||
});
|
||||
|
||||
const lazyFetchMoreMetrics = () => {
|
||||
let notYetFetched = new Set()
|
||||
let notYetFetched = new Set();
|
||||
for (let m of selectedMetrics) {
|
||||
if (!isFetched.has(m)) {
|
||||
notYetFetched.add(m)
|
||||
isFetched.add(m)
|
||||
notYetFetched.add(m);
|
||||
isFetched.add(m);
|
||||
}
|
||||
}
|
||||
|
||||
if (notYetFetched.size > 0)
|
||||
startFetching($initq.data.job, [...notYetFetched], $initq.data.job.numNodes > 2 ? ["node"] : ["node", "core"])
|
||||
}
|
||||
startFetching(
|
||||
$initq.data.job,
|
||||
[...notYetFetched],
|
||||
$initq.data.job.numNodes > 2 ? ["node"] : ["node", "core"]
|
||||
);
|
||||
};
|
||||
|
||||
// Fetch more data once required:
|
||||
$: if ($initq.data && $jobMetrics.data && selectedMetrics) lazyFetchMoreMetrics();
|
||||
$: if ($initq.data && $jobMetrics.data && selectedMetrics)
|
||||
lazyFetchMoreMetrics();
|
||||
|
||||
let plots = {}, jobTags, fullWidth, statsTable
|
||||
$: polarPlotSize = Math.min(fullWidth / 3 - 10, 300)
|
||||
$: document.title = $initq.fetching ? 'Loading...' : ($initq.error ? 'Error' : `Job ${$initq.data.job.jobId} - ClusterCockpit`)
|
||||
let plots = {},
|
||||
jobTags,
|
||||
fullWidth,
|
||||
statsTable;
|
||||
$: polarPlotSize = Math.min(fullWidth / 3 - 10, 300);
|
||||
$: document.title = $initq.fetching
|
||||
? "Loading..."
|
||||
: $initq.error
|
||||
? "Error"
|
||||
: `Job ${$initq.data.job.jobId} - ClusterCockpit`;
|
||||
|
||||
// Find out what metrics or hosts are missing:
|
||||
let missingMetrics = [], missingHosts = [], somethingMissing = false
|
||||
let missingMetrics = [],
|
||||
missingHosts = [],
|
||||
somethingMissing = false;
|
||||
$: if ($initq.data && $jobMetrics.data) {
|
||||
let job = $initq.data.job,
|
||||
metrics = $jobMetrics.data.jobMetrics,
|
||||
metricNames = clusters.find(c => c.name == job.cluster).metricConfig.map(mc => mc.name)
|
||||
metricNames = clusters
|
||||
.find((c) => c.name == job.cluster)
|
||||
.metricConfig.map((mc) => mc.name);
|
||||
|
||||
// Metric not found in JobMetrics && Metric not explicitly disabled: Was expected, but is Missing
|
||||
missingMetrics = metricNames.filter(metric => (!metrics.some(jm => jm.name == metric) && !checkMetricDisabled(metric, $initq.data.job.cluster, $initq.data.job.subCluster)))
|
||||
missingHosts = job.resources.map(({ hostname }) => ({
|
||||
hostname: hostname,
|
||||
metrics: metricNames.filter(metric => !metrics.some(jm => jm.scope == 'node' && jm.metric.series.some(series => series.hostname == hostname)))
|
||||
})).filter(({ metrics }) => metrics.length > 0)
|
||||
somethingMissing = missingMetrics.length > 0 || missingHosts.length > 0
|
||||
missingMetrics = metricNames.filter(
|
||||
(metric) =>
|
||||
!metrics.some((jm) => jm.name == metric) &&
|
||||
!checkMetricDisabled(
|
||||
metric,
|
||||
$initq.data.job.cluster,
|
||||
$initq.data.job.subCluster
|
||||
)
|
||||
);
|
||||
missingHosts = job.resources
|
||||
.map(({ hostname }) => ({
|
||||
hostname: hostname,
|
||||
metrics: metricNames.filter(
|
||||
(metric) =>
|
||||
!metrics.some(
|
||||
(jm) =>
|
||||
jm.scope == "node" &&
|
||||
jm.metric.series.some(
|
||||
(series) => series.hostname == hostname
|
||||
)
|
||||
)
|
||||
),
|
||||
}))
|
||||
.filter(({ metrics }) => metrics.length > 0);
|
||||
somethingMissing = missingMetrics.length > 0 || missingHosts.length > 0;
|
||||
}
|
||||
|
||||
const orderAndMap = (grouped, selectedMetrics) =>
|
||||
selectedMetrics.map(metric => ({
|
||||
const orderAndMap = (grouped, selectedMetrics) =>
|
||||
selectedMetrics.map((metric) => ({
|
||||
metric: metric,
|
||||
data: grouped.find((group) =>
|
||||
group[0].name == metric
|
||||
data: grouped.find((group) => group[0].name == metric),
|
||||
disabled: checkMetricDisabled(
|
||||
metric,
|
||||
$initq.data.job.cluster,
|
||||
$initq.data.job.subCluster
|
||||
),
|
||||
disabled: checkMetricDisabled(metric, $initq.data.job.cluster, $initq.data.job.subCluster)
|
||||
}))
|
||||
}));
|
||||
</script>
|
||||
|
||||
<div class="row" bind:clientWidth={fullWidth}></div>
|
||||
<div class="row" bind:clientWidth={fullWidth} />
|
||||
<Row>
|
||||
<Col>
|
||||
{#if $initq.error}
|
||||
<Card body color="danger">{$initq.error.message}</Card>
|
||||
{:else if $initq.data}
|
||||
<JobInfo job={$initq.data.job} jobTags={jobTags}/>
|
||||
<JobInfo job={$initq.data.job} {jobTags} />
|
||||
{:else}
|
||||
<Spinner secondary/>
|
||||
<Spinner secondary />
|
||||
{/if}
|
||||
</Col>
|
||||
{#if $jobMetrics.data && $initq.data}
|
||||
{#if $initq.data.job.concurrentJobs != null}
|
||||
{#if $initq.data.job.concurrentJobs != null && $initq.data.job.concurrentJobs.items.length != 0}
|
||||
{#if authlevel > roles.manager}
|
||||
<Col>
|
||||
<h5>Concurrent Jobs <Icon name="info-circle" style="cursor:help;" title="Shared jobs running on the same node with overlapping runtimes"/></h5>
|
||||
<h5>
|
||||
Concurrent Jobs <Icon
|
||||
name="info-circle"
|
||||
style="cursor:help;"
|
||||
title="Shared jobs running on the same node with overlapping runtimes"
|
||||
/>
|
||||
</h5>
|
||||
<ul>
|
||||
{#each $initq.data.job.concurrentJobs.items as pjob, index}
|
||||
<li><a href="/monitoring/job/{pjob.id}" target="_blank">{pjob.jobId}</a></li>
|
||||
{/each}
|
||||
<li>
|
||||
<a
|
||||
href="/monitoring/jobs/?{$initq.data.job
|
||||
.concurrentJobs.listQuery}"
|
||||
target="_blank">See All</a
|
||||
>
|
||||
</li>
|
||||
{#each $initq.data.job.concurrentJobs.items as pjob, index}
|
||||
<li>
|
||||
<a
|
||||
href="/monitoring/job/{pjob.id}"
|
||||
target="_blank">{pjob.jobId}</a
|
||||
>
|
||||
</li>
|
||||
{/each}
|
||||
</ul>
|
||||
</Col>
|
||||
{:else}
|
||||
<Col>
|
||||
<h5>{$initq.data.job.concurrentJobs.items.length} Concurrent Jobs</h5>
|
||||
<p>Number of shared jobs on the same node with overlapping runtimes.</p>
|
||||
<h5>
|
||||
{$initq.data.job.concurrentJobs.items.length} Concurrent
|
||||
Jobs
|
||||
</h5>
|
||||
<p>
|
||||
Number of shared jobs on the same node with overlapping
|
||||
runtimes.
|
||||
</p>
|
||||
</Col>
|
||||
{/if}
|
||||
{/if}
|
||||
<Col>
|
||||
<PolarPlot
|
||||
width={polarPlotSize} height={polarPlotSize}
|
||||
metrics={ccconfig[`job_view_polarPlotMetrics:${$initq.data.job.cluster}`] || ccconfig[`job_view_polarPlotMetrics`]}
|
||||
width={polarPlotSize}
|
||||
height={polarPlotSize}
|
||||
metrics={ccconfig[
|
||||
`job_view_polarPlotMetrics:${$initq.data.job.cluster}`
|
||||
] || ccconfig[`job_view_polarPlotMetrics`]}
|
||||
cluster={$initq.data.job.cluster}
|
||||
jobMetrics={$jobMetrics.data.jobMetrics}/>
|
||||
jobMetrics={$jobMetrics.data.jobMetrics}
|
||||
/>
|
||||
</Col>
|
||||
<Col>
|
||||
<Roofline
|
||||
width={fullWidth / 3 - 10} height={polarPlotSize}
|
||||
width={fullWidth / 3 - 10}
|
||||
height={polarPlotSize}
|
||||
cluster={clusters
|
||||
.find(c => c.name == $initq.data.job.cluster).subClusters
|
||||
.find(sc => sc.name == $initq.data.job.subCluster)}
|
||||
flopsAny={$jobMetrics.data.jobMetrics.find(m => m.name == 'flops_any' && m.scope == 'node')}
|
||||
memBw={$jobMetrics.data.jobMetrics.find(m => m.name == 'mem_bw' && m.scope == 'node')} />
|
||||
.find((c) => c.name == $initq.data.job.cluster)
|
||||
.subClusters.find(
|
||||
(sc) => sc.name == $initq.data.job.subCluster
|
||||
)}
|
||||
flopsAny={$jobMetrics.data.jobMetrics.find(
|
||||
(m) => m.name == "flops_any" && m.scope == "node"
|
||||
)}
|
||||
memBw={$jobMetrics.data.jobMetrics.find(
|
||||
(m) => m.name == "mem_bw" && m.scope == "node"
|
||||
)}
|
||||
/>
|
||||
</Col>
|
||||
{:else}
|
||||
<Col></Col>
|
||||
<Col></Col>
|
||||
<Col />
|
||||
<Col />
|
||||
{/if}
|
||||
</Row>
|
||||
<br/>
|
||||
<br />
|
||||
<Row>
|
||||
<Col xs="auto">
|
||||
{#if $initq.data}
|
||||
<TagManagement job={$initq.data.job} bind:jobTags={jobTags}/>
|
||||
<TagManagement job={$initq.data.job} bind:jobTags />
|
||||
{/if}
|
||||
</Col>
|
||||
<Col xs="auto">
|
||||
{#if $initq.data}
|
||||
<Button outline
|
||||
on:click={() => (isMetricsSelectionOpen = true)}>
|
||||
<Icon name="graph-up"/> Metrics
|
||||
<Button outline on:click={() => (isMetricsSelectionOpen = true)}>
|
||||
<Icon name="graph-up" /> Metrics
|
||||
</Button>
|
||||
{/if}
|
||||
</Col>
|
||||
@ -175,97 +294,139 @@
|
||||
<Zoom timeseriesPlots={plots} />
|
||||
</Col>
|
||||
</Row>
|
||||
<br/>
|
||||
<br />
|
||||
<Row>
|
||||
<Col>
|
||||
{#if $jobMetrics.error}
|
||||
{#if $initq.data.job.monitoringStatus == 0 || $initq.data.job.monitoringStatus == 2}
|
||||
<Card body color="warning">Not monitored or archiving failed</Card>
|
||||
<br/>
|
||||
<Card body color="warning"
|
||||
>Not monitored or archiving failed</Card
|
||||
>
|
||||
<br />
|
||||
{/if}
|
||||
<Card body color="danger">{$jobMetrics.error.message}</Card>
|
||||
{:else if $jobMetrics.fetching}
|
||||
<Spinner secondary/>
|
||||
<Spinner secondary />
|
||||
{:else if $jobMetrics.data && $initq.data}
|
||||
<PlotTable
|
||||
let:item
|
||||
let:width
|
||||
renderFor="job"
|
||||
items={orderAndMap(groupByScope($jobMetrics.data.jobMetrics), selectedMetrics)}
|
||||
itemsPerRow={ccconfig.plot_view_plotsPerRow}>
|
||||
items={orderAndMap(
|
||||
groupByScope($jobMetrics.data.jobMetrics),
|
||||
selectedMetrics
|
||||
)}
|
||||
itemsPerRow={ccconfig.plot_view_plotsPerRow}
|
||||
>
|
||||
{#if item.data}
|
||||
<Metric
|
||||
bind:this={plots[item.metric]}
|
||||
on:more-loaded={({ detail }) => statsTable.moreLoaded(detail)}
|
||||
on:more-loaded={({ detail }) =>
|
||||
statsTable.moreLoaded(detail)}
|
||||
job={$initq.data.job}
|
||||
metricName={item.metric}
|
||||
rawData={item.data.map(x => x.metric)}
|
||||
scopes={item.data.map(x => x.scope)}
|
||||
width={width}
|
||||
isShared={($initq.data.job.exclusive != 1)}/>
|
||||
rawData={item.data.map((x) => x.metric)}
|
||||
scopes={item.data.map((x) => x.scope)}
|
||||
{width}
|
||||
isShared={$initq.data.job.exclusive != 1}
|
||||
/>
|
||||
{:else}
|
||||
<Card body color="warning">No dataset returned for <code>{item.metric}</code></Card>
|
||||
<Card body color="warning"
|
||||
>No dataset returned for <code>{item.metric}</code
|
||||
></Card
|
||||
>
|
||||
{/if}
|
||||
</PlotTable>
|
||||
{/if}
|
||||
</Col>
|
||||
</Row>
|
||||
<br/>
|
||||
<br />
|
||||
<Row>
|
||||
<Col>
|
||||
{#if $initq.data}
|
||||
<TabContent>
|
||||
{#if somethingMissing}
|
||||
<TabPane tabId="resources" tab="Resources" active={somethingMissing}>
|
||||
<div style="margin: 10px;"><Card color="warning">
|
||||
<CardHeader>
|
||||
<CardTitle>Missing Metrics/Reseources</CardTitle>
|
||||
</CardHeader>
|
||||
<CardBody>
|
||||
{#if missingMetrics.length > 0}
|
||||
<p>No data at all is available for the metrics: {missingMetrics.join(', ')}</p>
|
||||
{/if}
|
||||
{#if missingHosts.length > 0}
|
||||
<p>Some metrics are missing for the following hosts:</p>
|
||||
<ul>
|
||||
{#each missingHosts as missing}
|
||||
<li>{missing.hostname}: {missing.metrics.join(', ')}</li>
|
||||
{/each}
|
||||
</ul>
|
||||
{/if}
|
||||
</CardBody>
|
||||
</Card></div>
|
||||
</TabPane>
|
||||
{/if}
|
||||
<TabPane tabId="stats" tab="Statistics Table" active={!somethingMissing}>
|
||||
{#if $jobMetrics.data}
|
||||
{#key $jobMetrics.data}
|
||||
<StatsTable
|
||||
bind:this={statsTable}
|
||||
job={$initq.data.job}
|
||||
jobMetrics={$jobMetrics.data.jobMetrics} />
|
||||
{/key}
|
||||
<TabContent>
|
||||
{#if somethingMissing}
|
||||
<TabPane
|
||||
tabId="resources"
|
||||
tab="Resources"
|
||||
active={somethingMissing}
|
||||
>
|
||||
<div style="margin: 10px;">
|
||||
<Card color="warning">
|
||||
<CardHeader>
|
||||
<CardTitle
|
||||
>Missing Metrics/Reseources</CardTitle
|
||||
>
|
||||
</CardHeader>
|
||||
<CardBody>
|
||||
{#if missingMetrics.length > 0}
|
||||
<p>
|
||||
No data at all is available for the
|
||||
metrics: {missingMetrics.join(", ")}
|
||||
</p>
|
||||
{/if}
|
||||
{#if missingHosts.length > 0}
|
||||
<p>
|
||||
Some metrics are missing for the
|
||||
following hosts:
|
||||
</p>
|
||||
<ul>
|
||||
{#each missingHosts as missing}
|
||||
<li>
|
||||
{missing.hostname}: {missing.metrics.join(
|
||||
", "
|
||||
)}
|
||||
</li>
|
||||
{/each}
|
||||
</ul>
|
||||
{/if}
|
||||
</CardBody>
|
||||
</Card>
|
||||
</div>
|
||||
</TabPane>
|
||||
{/if}
|
||||
</TabPane>
|
||||
<TabPane tabId="job-script" tab="Job Script">
|
||||
<div class="pre-wrapper">
|
||||
{#if $initq.data.job.metaData?.jobScript}
|
||||
<pre><code>{$initq.data.job.metaData?.jobScript}</code></pre>
|
||||
{:else}
|
||||
<Card body color="warning">No job script available</Card>
|
||||
<TabPane
|
||||
tabId="stats"
|
||||
tab="Statistics Table"
|
||||
active={!somethingMissing}
|
||||
>
|
||||
{#if $jobMetrics.data}
|
||||
{#key $jobMetrics.data}
|
||||
<StatsTable
|
||||
bind:this={statsTable}
|
||||
job={$initq.data.job}
|
||||
jobMetrics={$jobMetrics.data.jobMetrics}
|
||||
/>
|
||||
{/key}
|
||||
{/if}
|
||||
</div>
|
||||
</TabPane>
|
||||
<TabPane tabId="slurm-info" tab="Slurm Info">
|
||||
<div class="pre-wrapper">
|
||||
{#if $initq.data.job.metaData?.slurmInfo}
|
||||
<pre><code>{$initq.data.job.metaData?.slurmInfo}</code></pre>
|
||||
{:else}
|
||||
<Card body color="warning">No additional slurm information available</Card>
|
||||
{/if}
|
||||
</div>
|
||||
</TabPane>
|
||||
</TabContent>
|
||||
</TabPane>
|
||||
<TabPane tabId="job-script" tab="Job Script">
|
||||
<div class="pre-wrapper">
|
||||
{#if $initq.data.job.metaData?.jobScript}
|
||||
<pre><code
|
||||
>{$initq.data.job.metaData?.jobScript}</code
|
||||
></pre>
|
||||
{:else}
|
||||
<Card body color="warning"
|
||||
>No job script available</Card
|
||||
>
|
||||
{/if}
|
||||
</div>
|
||||
</TabPane>
|
||||
<TabPane tabId="slurm-info" tab="Slurm Info">
|
||||
<div class="pre-wrapper">
|
||||
{#if $initq.data.job.metaData?.slurmInfo}
|
||||
<pre><code
|
||||
>{$initq.data.job.metaData?.slurmInfo}</code
|
||||
></pre>
|
||||
{:else}
|
||||
<Card body color="warning"
|
||||
>No additional slurm information available</Card
|
||||
>
|
||||
{/if}
|
||||
</div>
|
||||
</TabPane>
|
||||
</TabContent>
|
||||
{/if}
|
||||
</Col>
|
||||
</Row>
|
||||
@ -275,7 +436,8 @@
|
||||
cluster={$initq.data.job.cluster}
|
||||
configName="job_view_selectedMetrics"
|
||||
bind:metrics={selectedMetrics}
|
||||
bind:isOpen={isMetricsSelectionOpen} />
|
||||
bind:isOpen={isMetricsSelectionOpen}
|
||||
/>
|
||||
{/if}
|
||||
|
||||
<style>
|
||||
|
@ -119,10 +119,10 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">
|
||||
<!-- {({ -->
|
||||
<!-- USER: "Username", -->
|
||||
<!-- PROJECT: "Project Name", -->
|
||||
<!-- })[type]} -->
|
||||
{({
|
||||
USER: "Username",
|
||||
PROJECT: "Project Name",
|
||||
})[type]}
|
||||
<Button
|
||||
color={sorting.field == "id" ? "primary" : "light"}
|
||||
size="sm"
|
||||
@ -216,14 +216,14 @@
|
||||
>
|
||||
{:else if type == "PROJECT"}
|
||||
<a href="/monitoring/jobs/?project={row.id}"
|
||||
>{row.id}</a
|
||||
>{scrambleNames ? scramble(row.id) : row.id}</a
|
||||
>
|
||||
{:else}
|
||||
{row.id}
|
||||
{/if}
|
||||
</td>
|
||||
{#if type == "USER"}
|
||||
<td>{row?.name ? row.name : ""}</td>
|
||||
<td>{scrambleNames ? scramble(row?.name?row.name:"-") : row?.name?row.name:"-"}</td>
|
||||
{/if}
|
||||
<td>{row.totalJobs}</td>
|
||||
<td>{row.totalWalltime}</td>
|
||||
|
@ -1,121 +1,226 @@
|
||||
<script>
|
||||
import { init, checkMetricDisabled } from './utils.js'
|
||||
import { Row, Col, InputGroup, InputGroupText, Icon, Spinner, Card } from 'sveltestrap'
|
||||
import { queryStore, gql, getContextClient } from '@urql/svelte'
|
||||
import TimeSelection from './filters/TimeSelection.svelte'
|
||||
import PlotTable from './PlotTable.svelte'
|
||||
import MetricPlot from './plots/MetricPlot.svelte'
|
||||
import { getContext } from 'svelte'
|
||||
import { init, checkMetricDisabled } from "./utils.js";
|
||||
import {
|
||||
Row,
|
||||
Col,
|
||||
InputGroup,
|
||||
InputGroupText,
|
||||
Icon,
|
||||
Spinner,
|
||||
Card,
|
||||
} from "sveltestrap";
|
||||
import { queryStore, gql, getContextClient } from "@urql/svelte";
|
||||
import TimeSelection from "./filters/TimeSelection.svelte";
|
||||
import PlotTable from "./PlotTable.svelte";
|
||||
import MetricPlot from "./plots/MetricPlot.svelte";
|
||||
import { getContext } from "svelte";
|
||||
|
||||
export let cluster
|
||||
export let hostname
|
||||
export let from = null
|
||||
export let to = null
|
||||
export let cluster;
|
||||
export let hostname;
|
||||
export let from = null;
|
||||
export let to = null;
|
||||
|
||||
const { query: initq } = init()
|
||||
const { query: initq } = init();
|
||||
|
||||
if (from == null || to == null) {
|
||||
to = new Date(Date.now())
|
||||
from = new Date(to.getTime())
|
||||
from.setMinutes(from.getMinutes() - 30)
|
||||
to = new Date(Date.now());
|
||||
from = new Date(to.getTime());
|
||||
from.setMinutes(from.getMinutes() - 30);
|
||||
}
|
||||
|
||||
const ccconfig = getContext('cc-config')
|
||||
const clusters = getContext('clusters')
|
||||
const ccconfig = getContext("cc-config");
|
||||
const clusters = getContext("clusters");
|
||||
const client = getContextClient();
|
||||
const query = gql`query($cluster: String!, $nodes: [String!], $from: Time!, $to: Time!) {
|
||||
nodeMetrics(cluster: $cluster, nodes: $nodes, from: $from, to: $to) {
|
||||
host
|
||||
subCluster
|
||||
metrics {
|
||||
name
|
||||
scope
|
||||
metric {
|
||||
timestep
|
||||
unit { base, prefix }
|
||||
series {
|
||||
statistics { min, avg, max }
|
||||
data
|
||||
const nodeMetricsQuery = gql`
|
||||
query ($cluster: String!, $nodes: [String!], $from: Time!, $to: Time!) {
|
||||
nodeMetrics(
|
||||
cluster: $cluster
|
||||
nodes: $nodes
|
||||
from: $from
|
||||
to: $to
|
||||
) {
|
||||
host
|
||||
subCluster
|
||||
metrics {
|
||||
name
|
||||
scope
|
||||
metric {
|
||||
timestep
|
||||
unit {
|
||||
base
|
||||
prefix
|
||||
}
|
||||
series {
|
||||
statistics {
|
||||
min
|
||||
avg
|
||||
max
|
||||
}
|
||||
data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`;
|
||||
`;
|
||||
|
||||
$: nodesQuery = queryStore({
|
||||
$: nodeMetricsData = queryStore({
|
||||
client: client,
|
||||
query: query,
|
||||
query: nodeMetricsQuery,
|
||||
variables: {
|
||||
cluster: cluster,
|
||||
nodes: [hostname],
|
||||
from: from.toISOString(),
|
||||
to: to.toISOString(),
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
let metricUnits = {}
|
||||
$: if ($nodesQuery.data) {
|
||||
let thisCluster = clusters.find(c => c.name == cluster)
|
||||
let itemsPerPage = ccconfig.plot_list_jobsPerPage;
|
||||
let page = 1;
|
||||
let paging = { itemsPerPage, page };
|
||||
let sorting = { field: "startTime", order: "DESC" };
|
||||
$: filter = [
|
||||
{ cluster: { eq: cluster } },
|
||||
{ node: { contains: hostname } },
|
||||
{ state: ["running"] },
|
||||
// {startTime: {
|
||||
// from: from.toISOString(),
|
||||
// to: to.toISOString()
|
||||
// }}
|
||||
];
|
||||
|
||||
const nodeJobsQuery = gql`
|
||||
query (
|
||||
$filter: [JobFilter!]!
|
||||
$sorting: OrderByInput!
|
||||
$paging: PageRequest!
|
||||
) {
|
||||
jobs(filter: $filter, order: $sorting, page: $paging) {
|
||||
# items {
|
||||
# id
|
||||
# jobId
|
||||
# }
|
||||
count
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
$: nodeJobsData = queryStore({
|
||||
client: client,
|
||||
query: nodeJobsQuery,
|
||||
variables: { paging, sorting, filter },
|
||||
});
|
||||
|
||||
let metricUnits = {};
|
||||
$: if ($nodeMetricsData.data) {
|
||||
let thisCluster = clusters.find((c) => c.name == cluster);
|
||||
if (thisCluster) {
|
||||
for (let metric of thisCluster.metricConfig) {
|
||||
if (metric.unit.prefix || metric.unit.base) {
|
||||
metricUnits[metric.name] = '(' + (metric.unit.prefix ? metric.unit.prefix : '') + (metric.unit.base ? metric.unit.base : '') + ')'
|
||||
} else { // If no unit defined: Omit Unit Display
|
||||
metricUnits[metric.name] = ''
|
||||
metricUnits[metric.name] =
|
||||
"(" +
|
||||
(metric.unit.prefix ? metric.unit.prefix : "") +
|
||||
(metric.unit.base ? metric.unit.base : "") +
|
||||
")";
|
||||
} else {
|
||||
// If no unit defined: Omit Unit Display
|
||||
metricUnits[metric.name] = "";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// $: console.log($nodesQuery?.data?.nodeMetrics[0].metrics)
|
||||
const dateToUnixEpoch = (rfc3339) => Math.floor(Date.parse(rfc3339) / 1000);
|
||||
</script>
|
||||
|
||||
<Row>
|
||||
{#if $initq.error}
|
||||
<Card body color="danger">{$initq.error.message}</Card>
|
||||
{:else if $initq.fetching}
|
||||
<Spinner/>
|
||||
<Spinner />
|
||||
{:else}
|
||||
<Col>
|
||||
<InputGroup>
|
||||
<InputGroupText><Icon name="hdd"/></InputGroupText>
|
||||
<InputGroupText><Icon name="hdd" /></InputGroupText>
|
||||
<InputGroupText>{hostname} ({cluster})</InputGroupText>
|
||||
</InputGroup>
|
||||
</Col>
|
||||
<Col>
|
||||
<TimeSelection
|
||||
bind:from={from}
|
||||
bind:to={to} />
|
||||
{#if $nodeJobsData.fetching}
|
||||
<Spinner />
|
||||
{:else if $nodeJobsData.data}
|
||||
Currently running jobs on this node: {$nodeJobsData.data.jobs
|
||||
.count}
|
||||
[
|
||||
<a
|
||||
href="/monitoring/jobs/?cluster={cluster}&state=running&node={hostname}"
|
||||
target="_blank">View in Job List</a
|
||||
> ]
|
||||
{:else}
|
||||
No currently running jobs.
|
||||
{/if}
|
||||
</Col>
|
||||
<Col>
|
||||
<TimeSelection bind:from bind:to />
|
||||
</Col>
|
||||
{/if}
|
||||
</Row>
|
||||
<br/>
|
||||
<br />
|
||||
<Row>
|
||||
<Col>
|
||||
{#if $nodesQuery.error}
|
||||
<Card body color="danger">{$nodesQuery.error.message}</Card>
|
||||
{:else if $nodesQuery.fetching || $initq.fetching}
|
||||
<Spinner/>
|
||||
{#if $nodeMetricsData.error}
|
||||
<Card body color="danger">{$nodeMetricsData.error.message}</Card>
|
||||
{:else if $nodeMetricsData.fetching || $initq.fetching}
|
||||
<Spinner />
|
||||
{:else}
|
||||
<PlotTable
|
||||
let:item
|
||||
let:width
|
||||
renderFor="node"
|
||||
itemsPerRow={ccconfig.plot_view_plotsPerRow}
|
||||
items={$nodesQuery.data.nodeMetrics[0].metrics
|
||||
.map(m => ({ ...m, disabled: checkMetricDisabled(m.name, cluster, $nodesQuery.data.nodeMetrics[0].subCluster)}))
|
||||
.sort((a, b) => a.name.localeCompare(b.name))}>
|
||||
|
||||
<h4 style="text-align: center; padding-top:15px;">{item.name} {metricUnits[item.name]}</h4>
|
||||
items={$nodeMetricsData.data.nodeMetrics[0].metrics
|
||||
.map((m) => ({
|
||||
...m,
|
||||
disabled: checkMetricDisabled(
|
||||
m.name,
|
||||
cluster,
|
||||
$nodeMetricsData.data.nodeMetrics[0].subCluster
|
||||
),
|
||||
}))
|
||||
.sort((a, b) => a.name.localeCompare(b.name))}
|
||||
>
|
||||
<h4 style="text-align: center; padding-top:15px;">
|
||||
{item.name}
|
||||
{metricUnits[item.name]}
|
||||
</h4>
|
||||
{#if item.disabled === false && item.metric}
|
||||
<MetricPlot
|
||||
width={width} height={300} metric={item.name} timestep={item.metric.timestep}
|
||||
cluster={clusters.find(c => c.name == cluster)} subCluster={$nodesQuery.data.nodeMetrics[0].subCluster}
|
||||
series={item.metric.series} />
|
||||
{width}
|
||||
height={300}
|
||||
metric={item.name}
|
||||
timestep={item.metric.timestep}
|
||||
cluster={clusters.find((c) => c.name == cluster)}
|
||||
subCluster={$nodeMetricsData.data.nodeMetrics[0]
|
||||
.subCluster}
|
||||
series={item.metric.series}
|
||||
/>
|
||||
{:else if item.disabled === true && item.metric}
|
||||
<Card style="margin-left: 2rem;margin-right: 2rem;" body color="info">Metric disabled for subcluster <code>{item.name}:{$nodesQuery.data.nodeMetrics[0].subCluster}</code></Card>
|
||||
<Card
|
||||
style="margin-left: 2rem;margin-right: 2rem;"
|
||||
body
|
||||
color="info"
|
||||
>Metric disabled for subcluster <code
|
||||
>{item.name}:{$nodeMetricsData.data.nodeMetrics[0]
|
||||
.subCluster}</code
|
||||
></Card
|
||||
>
|
||||
{:else}
|
||||
<Card style="margin-left: 2rem;margin-right: 2rem;" body color="warning">No dataset returned for <code>{item.name}</code></Card>
|
||||
<Card
|
||||
style="margin-left: 2rem;margin-right: 2rem;"
|
||||
body
|
||||
color="warning"
|
||||
>No dataset returned for <code>{item.name}</code></Card
|
||||
>
|
||||
{/if}
|
||||
</PlotTable>
|
||||
{/if}
|
||||
|
@ -32,8 +32,9 @@
|
||||
export let startTimeQuickSelect = false
|
||||
|
||||
let filters = {
|
||||
projectMatch: filterPresets.projectMatch || 'contains',
|
||||
userMatch: filterPresets.userMatch || 'contains',
|
||||
projectMatch: filterPresets.projectMatch || 'contains',
|
||||
userMatch: filterPresets.userMatch || 'contains',
|
||||
jobIdMatch: filterPresets.jobIdMatch || 'eq',
|
||||
|
||||
cluster: filterPresets.cluster || null,
|
||||
partition: filterPresets.partition || null,
|
||||
@ -47,6 +48,7 @@
|
||||
project: filterPresets.project || '',
|
||||
jobName: filterPresets.jobName || '',
|
||||
|
||||
node: filterPresets.node || null,
|
||||
numNodes: filterPresets.numNodes || { from: null, to: null },
|
||||
numHWThreads: filterPresets.numHWThreads || { from: null, to: null },
|
||||
numAccelerators: filterPresets.numAccelerators || { from: null, to: null },
|
||||
@ -74,6 +76,8 @@
|
||||
let items = []
|
||||
if (filters.cluster)
|
||||
items.push({ cluster: { eq: filters.cluster } })
|
||||
if (filters.node)
|
||||
items.push({ node: { contains: filters.node } })
|
||||
if (filters.partition)
|
||||
items.push({ partition: { eq: filters.partition } })
|
||||
if (filters.states.length != allJobStates.length)
|
||||
@ -85,7 +89,7 @@
|
||||
if (filters.duration.from || filters.duration.to)
|
||||
items.push({ duration: { from: filters.duration.from, to: filters.duration.to } })
|
||||
if (filters.jobId)
|
||||
items.push({ jobId: { eq: filters.jobId } })
|
||||
items.push({ jobId: { [filters.jobIdMatch]: filters.jobId } })
|
||||
if (filters.arrayJobId != null)
|
||||
items.push({ arrayJobId: filters.arrayJobId })
|
||||
if (filters.numNodes.from != null || filters.numNodes.to != null)
|
||||
@ -114,6 +118,8 @@
|
||||
let opts = []
|
||||
if (filters.cluster)
|
||||
opts.push(`cluster=${filters.cluster}`)
|
||||
if (filters.node)
|
||||
opts.push(`node=${filters.node}`)
|
||||
if (filters.partition)
|
||||
opts.push(`partition=${filters.partition}`)
|
||||
if (filters.states.length != allJobStates.length)
|
||||
@ -125,6 +131,15 @@
|
||||
// } else {
|
||||
opts.push(`startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`)
|
||||
// }
|
||||
if (filters.jobId.length != 0)
|
||||
if (filters.jobIdMatch != 'in') {
|
||||
opts.push(`jobId=${filters.jobId}`)
|
||||
} else {
|
||||
for (let singleJobId of filters.jobId)
|
||||
opts.push(`jobId=${singleJobId}`)
|
||||
}
|
||||
if (filters.jobIdMatch != 'eq')
|
||||
opts.push(`jobIdMatch=${filters.jobIdMatch}`)
|
||||
for (let tag of filters.tags)
|
||||
opts.push(`tag=${tag}`)
|
||||
if (filters.duration.from && filters.duration.to)
|
||||
@ -272,6 +287,12 @@
|
||||
</Info>
|
||||
{/if}
|
||||
|
||||
{#if filters.node != null }
|
||||
<Info icon="hdd-stack" on:click={() => (isResourcesOpen = true)}>
|
||||
Node: {filters.node}
|
||||
</Info>
|
||||
{/if}
|
||||
|
||||
{#if filters.stats.length > 0}
|
||||
<Info icon="bar-chart" on:click={() => (isStatsOpen = true)}>
|
||||
{filters.stats.map(stat => `${stat.text}: ${stat.from} - ${stat.to}`).join(', ')}
|
||||
@ -318,6 +339,7 @@
|
||||
bind:numNodes={filters.numNodes}
|
||||
bind:numHWThreads={filters.numHWThreads}
|
||||
bind:numAccelerators={filters.numAccelerators}
|
||||
bind:namedNode={filters.node}
|
||||
bind:isNodesModified={isNodesModified}
|
||||
bind:isHwthreadsModified={isHwthreadsModified}
|
||||
bind:isAccsModified={isAccsModified}
|
||||
|
@ -16,8 +16,9 @@
|
||||
export let isNodesModified = false
|
||||
export let isHwthreadsModified = false
|
||||
export let isAccsModified = false
|
||||
export let namedNode = null
|
||||
|
||||
let pendingNumNodes = numNodes, pendingNumHWThreads = numHWThreads, pendingNumAccelerators = numAccelerators
|
||||
let pendingNumNodes = numNodes, pendingNumHWThreads = numHWThreads, pendingNumAccelerators = numAccelerators, pendingNamedNode = namedNode
|
||||
|
||||
const findMaxNumAccels = clusters => clusters.reduce((max, cluster) => Math.max(max,
|
||||
cluster.subClusters.reduce((max, sc) => Math.max(max, sc.topology.accelerators?.length || 0), 0)), 0)
|
||||
@ -76,7 +77,9 @@
|
||||
Select number of utilized Resources
|
||||
</ModalHeader>
|
||||
<ModalBody>
|
||||
<h6>Number of Nodes</h6>
|
||||
<h6>Named Node</h6>
|
||||
<input type="text" class="form-control" bind:value={pendingNamedNode}>
|
||||
<h6 style="margin-top: 1rem;">Number of Nodes</h6>
|
||||
<DoubleRangeSlider
|
||||
on:change={({ detail }) => {
|
||||
pendingNumNodes = { from: detail[0], to: detail[1] }
|
||||
@ -117,7 +120,8 @@
|
||||
numNodes ={ from: pendingNumNodes.from, to: pendingNumNodes.to }
|
||||
numHWThreads = { from: pendingNumHWThreads.from, to: pendingNumHWThreads.to }
|
||||
numAccelerators = { from: pendingNumAccelerators.from, to: pendingNumAccelerators.to }
|
||||
dispatch('update', { numNodes, numHWThreads, numAccelerators })
|
||||
namedNode = pendingNamedNode
|
||||
dispatch('update', { numNodes, numHWThreads, numAccelerators, namedNode })
|
||||
}}>
|
||||
Close & Apply
|
||||
</Button>
|
||||
@ -126,13 +130,15 @@
|
||||
pendingNumNodes = { from: null, to: null }
|
||||
pendingNumHWThreads = { from: null, to: null }
|
||||
pendingNumAccelerators = { from: null, to: null }
|
||||
pendingNamedNode = null
|
||||
numNodes = { from: pendingNumNodes.from, to: pendingNumNodes.to }
|
||||
numHWThreads = { from: pendingNumHWThreads.from, to: pendingNumHWThreads.to }
|
||||
numAccelerators = { from: pendingNumAccelerators.from, to: pendingNumAccelerators.to }
|
||||
isNodesModified = false
|
||||
isHwthreadsModified = false
|
||||
isAccsModified = false
|
||||
dispatch('update', { numNodes, numHWThreads, numAccelerators })
|
||||
namedNode = pendingNamedNode
|
||||
dispatch('update', { numNodes, numHWThreads, numAccelerators, namedNode})
|
||||
}}>Reset</Button>
|
||||
<Button on:click={() => (isOpen = false)}>Close</Button>
|
||||
</ModalFooter>
|
||||
|
@ -7,7 +7,10 @@
|
||||
-->
|
||||
<script context="module">
|
||||
export const scrambleNames = window.localStorage.getItem("cc-scramble-names")
|
||||
export const scramble = (str) => [...str].reduce((x, c, i) => x * 7 + c.charCodeAt(0) * i * 21, 5).toString(32)
|
||||
export const scramble = function(str) {
|
||||
if (str === '-') return str
|
||||
else return [...str].reduce((x, c, i) => x * 7 + c.charCodeAt(0) * i * 21, 5).toString(32).substr(0, 6)
|
||||
}
|
||||
</script>
|
||||
<script>
|
||||
import Tag from '../Tag.svelte';
|
||||
|
@ -270,6 +270,7 @@
|
||||
}
|
||||
|
||||
export function findThresholds(metricConfig, scope, subCluster) {
|
||||
// console.log('NAME ' + metricConfig.name + ' / SCOPE ' + scope + ' / SUBCLUSTER ' + subCluster.name)
|
||||
if (!metricConfig || !scope || !subCluster) {
|
||||
console.warn('Argument missing for findThresholds!')
|
||||
return null
|
||||
@ -280,8 +281,10 @@
|
||||
// console.log('subClusterConfigs array empty, use metricConfig defaults')
|
||||
return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert }
|
||||
} else if (metricConfig.subClusters && metricConfig.subClusters.length > 0) {
|
||||
// console.log('subClusterConfigs found, find and use subCluster Settings')
|
||||
return metricConfig.subClusters.find(sc => sc.name == subCluster.name)
|
||||
// console.log('subClusterConfigs found, use subCluster Settings if matching jobs subcluster:')
|
||||
let forSubCluster = metricConfig.subClusters.find(sc => sc.name == subCluster.name)
|
||||
if (forSubCluster && forSubCluster.normal && forSubCluster.caution && forSubCluster.alert) return forSubCluster
|
||||
else return { normal: metricConfig.normal, caution: metricConfig.caution, alert: metricConfig.alert }
|
||||
} else {
|
||||
console.warn('metricConfig.subClusters not found!')
|
||||
return null
|
||||
|
@ -1,4 +1,20 @@
|
||||
{{define "content"}}
|
||||
{{if .Infos.message }}
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-6">
|
||||
<div class="alert alert-info p-3" role="alert">
|
||||
<div class="row align-items-center">
|
||||
<div class="col-2">
|
||||
<h2><i class="bi-info-circle-fill m-3"></i></h2>
|
||||
</div>
|
||||
<div class="col-10">
|
||||
{{.Infos.message}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
<div class="row">
|
||||
<div class="col">
|
||||
<h2>Clusters</h2>
|
||||
|
@ -17,15 +17,9 @@
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-4 mx-auto">
|
||||
{{if .Error}}
|
||||
<div class="alert alert-warning" role="alert">
|
||||
{{.Error}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
{{if .Info}}
|
||||
<div class="alert alert-success" role="alert">
|
||||
{{.Info}}
|
||||
{{if .MsgType}}
|
||||
<div class="alert {{.MsgType}}" role="alert">
|
||||
{{.Message}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
|
17
web/templates/message.tmpl
Normal file
17
web/templates/message.tmpl
Normal file
@ -0,0 +1,17 @@
|
||||
|
||||
{{define "content"}}
|
||||
<div class="row justify-content-center">
|
||||
<div class="col-4">
|
||||
<div class="alert {{.MsgType}} p-3 text-center fs-3" role="alert">
|
||||
{{if eq .MsgType "alert-info"}}
|
||||
<i class="bi-info-circle-fill me-3"></i>
|
||||
{{else if eq .MsgType "alert-warning"}}
|
||||
<i class="bi-question-circle-fill me-3"></i>
|
||||
{{else if eq .MsgType "alert-danger"}}
|
||||
<i class="bi-exclamation-circle-fill me-3"></i>
|
||||
{{end}}
|
||||
{{.Message}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
23
web/web.go
23
web/web.go
@ -9,11 +9,11 @@ import (
|
||||
"html/template"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||
"github.com/ClusterCockpit/cc-backend/internal/util"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||
)
|
||||
@ -47,8 +47,16 @@ func init() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if path == "templates/login.tmpl" {
|
||||
if util.CheckFileExists("./var/login.tmpl") {
|
||||
log.Info("overwrite login.tmpl with local file")
|
||||
templates[strings.TrimPrefix(path, "templates/")] =
|
||||
template.Must(template.Must(base.Clone()).ParseFiles("./var/login.tmpl"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if path == "templates/imprint.tmpl" {
|
||||
if _, err := os.Stat("./var/imprint.tmpl"); err == nil {
|
||||
if util.CheckFileExists("./var/imprint.tmpl") {
|
||||
log.Info("overwrite imprint.tmpl with local file")
|
||||
templates[strings.TrimPrefix(path, "templates/")] =
|
||||
template.Must(template.Must(base.Clone()).ParseFiles("./var/imprint.tmpl"))
|
||||
@ -56,7 +64,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
if path == "templates/privacy.tmpl" {
|
||||
if _, err := os.Stat("./var/privacy.tmpl"); err == nil {
|
||||
if util.CheckFileExists("./var/privacy.tmpl") {
|
||||
log.Info("overwrite privacy.tmpl with local file")
|
||||
templates[strings.TrimPrefix(path, "templates/")] =
|
||||
template.Must(template.Must(base.Clone()).ParseFiles("./var/privacy.tmpl"))
|
||||
@ -82,8 +90,8 @@ type Build struct {
|
||||
|
||||
type Page struct {
|
||||
Title string // Page title
|
||||
Error string // For generic use (e.g. the exact error message on /login)
|
||||
Info string // For generic use (e.g. "Logout successfull" on /login)
|
||||
MsgType string // For generic use in message boxes
|
||||
Message string // For generic use in message boxes
|
||||
User auth.User // Information about the currently logged in user (Full User Info)
|
||||
Roles map[string]auth.Role // Available roles for frontend render checks
|
||||
Build Build // Latest information about the application
|
||||
@ -96,8 +104,7 @@ type Page struct {
|
||||
func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *Page) {
|
||||
t, ok := templates[file]
|
||||
if !ok {
|
||||
log.Fatalf("WEB/WEB > template '%s' not found", file)
|
||||
panic("template not found")
|
||||
log.Errorf("WEB/WEB > template '%s' not found", file)
|
||||
}
|
||||
|
||||
if page.Clusters == nil {
|
||||
@ -106,7 +113,7 @@ func RenderTemplate(rw http.ResponseWriter, r *http.Request, file string, page *
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Page config : %v\n", page.Config)
|
||||
log.Debugf("Page config : %v\n", page.Config)
|
||||
if err := t.Execute(rw, page); err != nil {
|
||||
log.Errorf("Template error: %s", err.Error())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user