mirror of
https://github.com/ClusterCockpit/cc-backend
synced 2024-12-25 04:49:05 +01:00
Merge branch 'master' into import-data-sanitation
This commit is contained in:
commit
f8ba79e9e7
19
Makefile
19
Makefile
@ -1,5 +1,6 @@
|
|||||||
TARGET = ./cc-backend
|
TARGET = ./cc-backend
|
||||||
VAR = ./var
|
VAR = ./var
|
||||||
|
DB = ./var/job.db
|
||||||
FRONTEND = ./web/frontend
|
FRONTEND = ./web/frontend
|
||||||
VERSION = 0.1
|
VERSION = 0.1
|
||||||
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
|
||||||
@ -23,11 +24,11 @@ SVELTE_SRC = $(wildcard $(FRONTEND)/src/*.svelte) \
|
|||||||
$(wildcard $(FRONTEND)/src/plots/*.svelte) \
|
$(wildcard $(FRONTEND)/src/plots/*.svelte) \
|
||||||
$(wildcard $(FRONTEND)/src/joblist/*.svelte)
|
$(wildcard $(FRONTEND)/src/joblist/*.svelte)
|
||||||
|
|
||||||
.PHONY: clean test $(TARGET)
|
.PHONY: clean test tags $(TARGET)
|
||||||
|
|
||||||
.NOTPARALLEL:
|
.NOTPARALLEL:
|
||||||
|
|
||||||
$(TARGET): $(VAR) $(SVELTE_TARGETS)
|
$(TARGET): $(VAR) $(DB) $(SVELTE_TARGETS)
|
||||||
$(info ===> BUILD cc-backend)
|
$(info ===> BUILD cc-backend)
|
||||||
@go build -ldflags=${LD_FLAGS} ./cmd/cc-backend
|
@go build -ldflags=${LD_FLAGS} ./cmd/cc-backend
|
||||||
|
|
||||||
@ -42,11 +43,17 @@ test:
|
|||||||
@go vet ./...
|
@go vet ./...
|
||||||
@go test ./...
|
@go test ./...
|
||||||
|
|
||||||
$(SVELTE_TARGETS): $(SVELTE_SRC)
|
tags:
|
||||||
$(info ===> BUILD frontend)
|
$(info ===> TAGS)
|
||||||
cd web/frontend && yarn build
|
@ctags -R
|
||||||
|
|
||||||
$(VAR):
|
$(VAR):
|
||||||
@mkdir $(VAR)
|
@mkdir $(VAR)
|
||||||
@touch ./var/job.db
|
|
||||||
cd web/frontend && yarn install
|
cd web/frontend && yarn install
|
||||||
|
|
||||||
|
$(DB):
|
||||||
|
./cc-backend --migrate-db
|
||||||
|
|
||||||
|
$(SVELTE_TARGETS): $(SVELTE_SRC)
|
||||||
|
$(info ===> BUILD frontend)
|
||||||
|
cd web/frontend && yarn build
|
||||||
|
31
README.md
31
README.md
@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
This is a Golang backend implementation for a REST and GraphQL API according to the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications).
|
This is a Golang backend implementation for a REST and GraphQL API according to the [ClusterCockpit specifications](https://github.com/ClusterCockpit/cc-specifications).
|
||||||
It also includes a web interface for ClusterCockpit.
|
It also includes a web interface for ClusterCockpit.
|
||||||
|
While there is a backend for the InfluxDB timeseries database, the only tested and supported setup is using cc-metric-store as a mtric data backend.
|
||||||
|
We will add documentation how to integrate ClusterCockpit with other timeseries databases in the future.
|
||||||
This implementation replaces the previous PHP Symfony based ClusterCockpit web-interface.
|
This implementation replaces the previous PHP Symfony based ClusterCockpit web-interface.
|
||||||
[Here](https://github.com/ClusterCockpit/ClusterCockpit/wiki/Why-we-switched-from-PHP-Symfony-to-a-Golang-based-solution) is a discussion of the reasons why we switched from PHP Symfony to a Golang based solution.
|
[Here](https://github.com/ClusterCockpit/ClusterCockpit/wiki/Why-we-switched-from-PHP-Symfony-to-a-Golang-based-solution) is a discussion of the reasons why we switched from PHP Symfony to a Golang based solution.
|
||||||
|
|
||||||
@ -24,6 +26,18 @@ You find more detailed information here:
|
|||||||
* `./configs/README.md`: Infos about configuration and setup of cc-backend.
|
* `./configs/README.md`: Infos about configuration and setup of cc-backend.
|
||||||
* `./init/README.md`: Infos on how to setup cc-backend as systemd service on Linux.
|
* `./init/README.md`: Infos on how to setup cc-backend as systemd service on Linux.
|
||||||
* `./tools/README.md`: Infos on the JWT authorizatin token workflows in ClusterCockpit.
|
* `./tools/README.md`: Infos on the JWT authorizatin token workflows in ClusterCockpit.
|
||||||
|
* `./docs`: You can find further documentation here. There is also a Hands-on tutorial that is recommended to get familiar with the ClusterCockpit setup.
|
||||||
|
|
||||||
|
**NOTICE**
|
||||||
|
|
||||||
|
ClusterCockpit requires a recent version of the golang toolchain.
|
||||||
|
You can check in `go.mod` what is the current minimal golang version required.
|
||||||
|
Homebrew and Archlinux usually have up to date golang versions. For other Linux
|
||||||
|
distros this often means you have to install the golang compiler yourself.
|
||||||
|
Fortunatly this is easy with golang. Since a lot of functionality is based on
|
||||||
|
the go standard library it is crucial for security and performance to use a
|
||||||
|
recent golang version. Also an old golang tool chain may restrict the supported
|
||||||
|
versions of third party packages.
|
||||||
|
|
||||||
## Demo Setup
|
## Demo Setup
|
||||||
|
|
||||||
@ -31,7 +45,7 @@ We provide a shell skript that downloads demo data and automatically builds and
|
|||||||
You need `wget`, `go`, and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk).
|
You need `wget`, `go`, and `yarn` in your path to start the demo. The demo will download 32MB of data (223MB on disk).
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone git@github.com:ClusterCockpit/cc-backend.git
|
git clone https://github.com/ClusterCockpit/cc-backend.git
|
||||||
cd ./cc-backend
|
cd ./cc-backend
|
||||||
./startDemo.sh
|
./startDemo.sh
|
||||||
```
|
```
|
||||||
@ -48,7 +62,7 @@ There is a Makefile to automate the build of cc-backend. The Makefile supports t
|
|||||||
|
|
||||||
A common workflow to setup cc-backend fron scratch is:
|
A common workflow to setup cc-backend fron scratch is:
|
||||||
```sh
|
```sh
|
||||||
git clone git@github.com:ClusterCockpit/cc-backend.git
|
git clone https://github.com/ClusterCockpit/cc-backend.git
|
||||||
|
|
||||||
# Build binary
|
# Build binary
|
||||||
cd ./cc-backend/
|
cd ./cc-backend/
|
||||||
@ -95,6 +109,15 @@ A config file in the JSON format has to be provided using `--config` to override
|
|||||||
By default, if there is a `config.json` file in the current directory of the `cc-backend` process, it will be loaded even without the `--config` flag.
|
By default, if there is a `config.json` file in the current directory of the `cc-backend` process, it will be loaded even without the `--config` flag.
|
||||||
You find documentation of all supported configuration and command line options [here](./configs.README.md).
|
You find documentation of all supported configuration and command line options [here](./configs.README.md).
|
||||||
|
|
||||||
|
## Database initialization and migration
|
||||||
|
|
||||||
|
Every cc-backend version supports a specific database version.
|
||||||
|
On startup the version of the sqlite database is validated and cc-backend will terminate if the version does not match.
|
||||||
|
cc-backend supports to migrate the database schema up to the required version using the `--migrate-db` command line option.
|
||||||
|
In case the database file does not yet exist it is created and initialized by the `--migrate-db` command line option.
|
||||||
|
In case you want to use a newer database version with an older version of cc-backend you can downgrade a database using the external [migrate](https://github.com/golang-migrate/migrate) tool.
|
||||||
|
In this case you have to provide the path to the migration files in a recent source tree: `./internal/repository/migrations/`.
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
In case the REST or GraphQL API is changed the according code generators have to be used.
|
In case the REST or GraphQL API is changed the according code generators have to be used.
|
||||||
|
|
||||||
@ -112,8 +135,10 @@ This project integrates [swagger ui](https://swagger.io/tools/swagger-ui/) to do
|
|||||||
The swagger doc files can be found in `./api/`.
|
The swagger doc files can be found in `./api/`.
|
||||||
You can generate the configuration of swagger-ui by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `.
|
You can generate the configuration of swagger-ui by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `.
|
||||||
You need to move the generated `./api/doc.go` to `./internal/api/doc.go`.
|
You need to move the generated `./api/doc.go` to `./internal/api/doc.go`.
|
||||||
If you start cc-backend with flag `--dev` the Swagger UI is available at http://localhost:8080/swagger .
|
If you start cc-backend with flag `--dev` the Swagger UI is available at http://localhost:8080/swagger/ .
|
||||||
|
You have to enter a JWT key for a user with role API.
|
||||||
|
|
||||||
|
**NOTICE** The user owning the JWT token must not be logged in the same browser (have a running session), otherwise Swagger requests will not work. It is recommended to create a separate user that has just the API role.
|
||||||
|
|
||||||
## Project Structure
|
## Project Structure
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
|
jobName: String
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -212,6 +213,7 @@ input JobFilter {
|
|||||||
arrayJobId: Int
|
arrayJobId: Int
|
||||||
user: StringInput
|
user: StringInput
|
||||||
project: StringInput
|
project: StringInput
|
||||||
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
@ -245,6 +247,7 @@ input StringInput {
|
|||||||
contains: String
|
contains: String
|
||||||
startsWith: String
|
startsWith: String
|
||||||
endsWith: String
|
endsWith: String
|
||||||
|
in: [String!]
|
||||||
}
|
}
|
||||||
|
|
||||||
input IntRange { from: Int!, to: Int! }
|
input IntRange { from: Int!, to: Int! }
|
||||||
@ -265,6 +268,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
id: ID! # If `groupBy` was used, ID of the user/project/cluster
|
||||||
|
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
|
414
api/swagger.json
414
api/swagger.json
@ -1,9 +1,8 @@
|
|||||||
{
|
{
|
||||||
"swagger": "2.0",
|
"swagger": "2.0",
|
||||||
"info": {
|
"info": {
|
||||||
"description": "Defines a tag using name and type.",
|
"description": "API for batch job control.",
|
||||||
"title": "ClusterCockpit REST API",
|
"title": "ClusterCockpit REST API",
|
||||||
"termsOfService": "https://monitoring.nhr.fau.de/imprint",
|
|
||||||
"contact": {
|
"contact": {
|
||||||
"name": "ClusterCockpit Project",
|
"name": "ClusterCockpit Project",
|
||||||
"url": "https://github.com/ClusterCockpit",
|
"url": "https://github.com/ClusterCockpit",
|
||||||
@ -13,9 +12,9 @@
|
|||||||
"name": "MIT License",
|
"name": "MIT License",
|
||||||
"url": "https://opensource.org/licenses/MIT"
|
"url": "https://opensource.org/licenses/MIT"
|
||||||
},
|
},
|
||||||
"version": "0.1.0"
|
"version": "0.2.0"
|
||||||
},
|
},
|
||||||
"host": "clustercockpit.localhost:8082",
|
"host": "localhost:8080",
|
||||||
"basePath": "/api",
|
"basePath": "/api",
|
||||||
"paths": {
|
"paths": {
|
||||||
"/jobs/": {
|
"/jobs/": {
|
||||||
@ -26,12 +25,12 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"description": "Get a list of all jobs. Filters can be applied using query parameters.\nNumber of results can be limited by page. Results are sorted by descending startTime.",
|
"description": "Get a list of all jobs. Filters can be applied using query parameters.\nNumber of results can be limited by page. Results are sorted by descending startTime.",
|
||||||
"consumes": [
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"query"
|
||||||
|
],
|
||||||
"summary": "Lists all jobs",
|
"summary": "Lists all jobs",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -62,13 +61,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"description": "Items per page (If empty: No Limit)",
|
"description": "Items per page (Default: 25)",
|
||||||
"name": "items-per-page",
|
"name": "items-per-page",
|
||||||
"in": "query"
|
"in": "query"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"description": "Page Number (If empty: No Paging)",
|
"description": "Page Number (Default: 1)",
|
||||||
"name": "page",
|
"name": "page",
|
||||||
"in": "query"
|
"in": "query"
|
||||||
},
|
},
|
||||||
@ -81,12 +80,9 @@
|
|||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "Array of matching jobs",
|
"description": "Job array and page info",
|
||||||
"schema": {
|
"schema": {
|
||||||
"type": "array",
|
"$ref": "#/definitions/api.GetJobsApiResponse"
|
||||||
"items": {
|
|
||||||
"$ref": "#/definitions/schema.Job"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"400": {
|
"400": {
|
||||||
@ -101,6 +97,227 @@
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job/": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Job to delete is specified by request body. All fields are required in this case.",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "All fields required",
|
||||||
|
"name": "request",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiRequest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job/{id}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Job to remove is specified by database ID. This will not remove the job from the job archive.",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Database ID of Job",
|
||||||
|
"name": "id",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job_before/{ts}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Unix epoch timestamp",
|
||||||
|
"name": "ts",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
"500": {
|
"500": {
|
||||||
"description": "Internal Server Error",
|
"description": "Internal Server Error",
|
||||||
"schema": {
|
"schema": {
|
||||||
@ -124,6 +341,9 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Adds a new job as \"running\"",
|
"summary": "Adds a new job as \"running\"",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -187,6 +407,9 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Marks job as completed and triggers archiving",
|
"summary": "Marks job as completed and triggers archiving",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -201,7 +424,7 @@
|
|||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "Job resource",
|
"description": "Success message",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/schema.JobMeta"
|
"$ref": "#/definitions/schema.JobMeta"
|
||||||
}
|
}
|
||||||
@ -259,6 +482,9 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Marks job as completed and triggers archiving",
|
"summary": "Marks job as completed and triggers archiving",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -338,6 +564,9 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Adds one or more tags to a job",
|
"summary": "Adds one or more tags to a job",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -355,7 +584,7 @@
|
|||||||
"schema": {
|
"schema": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.Tag"
|
"$ref": "#/definitions/api.ApiTag"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,8 +625,53 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
|
"api.ApiTag": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "Tag Name",
|
||||||
|
"type": "string",
|
||||||
|
"example": "Testjob"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"description": "Tag Type",
|
||||||
|
"type": "string",
|
||||||
|
"example": "Debug"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.DeleteJobApiRequest": {
|
||||||
|
"type": "object",
|
||||||
|
"required": [
|
||||||
|
"jobId"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cluster": {
|
||||||
|
"description": "Cluster of job",
|
||||||
|
"type": "string",
|
||||||
|
"example": "fritz"
|
||||||
|
},
|
||||||
|
"jobId": {
|
||||||
|
"description": "Cluster Job ID of job",
|
||||||
|
"type": "integer",
|
||||||
|
"example": 123000
|
||||||
|
},
|
||||||
|
"startTime": {
|
||||||
|
"description": "Start Time of job as epoch",
|
||||||
|
"type": "integer",
|
||||||
|
"example": 1649723812
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.DeleteJobApiResponse": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"msg": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"api.ErrorResponse": {
|
"api.ErrorResponse": {
|
||||||
"description": "Error message as returned from backend.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"error": {
|
"error": {
|
||||||
@ -410,8 +684,27 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"api.GetJobsApiResponse": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"items": {
|
||||||
|
"description": "Number of jobs returned",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"jobs": {
|
||||||
|
"description": "Array of jobs",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/schema.JobMeta"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"page": {
|
||||||
|
"description": "Page id returned",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"api.StartJobApiResponse": {
|
"api.StartJobApiResponse": {
|
||||||
"description": "Successful job start response with database id of new job.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"id": {
|
"id": {
|
||||||
@ -421,7 +714,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.StopJobApiRequest": {
|
"api.StopJobApiRequest": {
|
||||||
"description": "Request to stop running job using stoptime and final state. They are only required if no database id was provided with endpoint.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": [
|
"required": [
|
||||||
"jobState",
|
"jobState",
|
||||||
@ -439,14 +731,11 @@
|
|||||||
"example": 123000
|
"example": 123000
|
||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final job state",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -462,22 +751,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.Tag": {
|
|
||||||
"description": "Defines a tag using name and type.",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {
|
|
||||||
"description": "Tag Name",
|
|
||||||
"type": "string",
|
|
||||||
"example": "Testjob"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"description": "Tag Type",
|
|
||||||
"type": "string",
|
|
||||||
"example": "Debug"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"schema.Job": {
|
"schema.Job": {
|
||||||
"description": "Information of a HPC job.",
|
"description": "Information of a HPC job.",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -516,14 +789,10 @@
|
|||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final state of job",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout",
|
|
||||||
"out_of_memory"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -648,14 +917,10 @@
|
|||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final state of job",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout",
|
|
||||||
"out_of_memory"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -751,6 +1016,29 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"schema.JobState": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"running",
|
||||||
|
"completed",
|
||||||
|
"failed",
|
||||||
|
"cancelled",
|
||||||
|
"stopped",
|
||||||
|
"timeout",
|
||||||
|
"preempted",
|
||||||
|
"out_of_memory"
|
||||||
|
],
|
||||||
|
"x-enum-varnames": [
|
||||||
|
"JobStateRunning",
|
||||||
|
"JobStateCompleted",
|
||||||
|
"JobStateFailed",
|
||||||
|
"JobStateCancelled",
|
||||||
|
"JobStateStopped",
|
||||||
|
"JobStateTimeout",
|
||||||
|
"JobStatePreempted",
|
||||||
|
"JobStateOutOfMemory"
|
||||||
|
]
|
||||||
|
},
|
||||||
"schema.JobStatistics": {
|
"schema.JobStatistics": {
|
||||||
"description": "Specification for job metric statistics.",
|
"description": "Specification for job metric statistics.",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -831,10 +1119,14 @@
|
|||||||
},
|
},
|
||||||
"securityDefinitions": {
|
"securityDefinitions": {
|
||||||
"ApiKeyAuth": {
|
"ApiKeyAuth": {
|
||||||
"description": "JWT based authentification for general API endpoint use.",
|
|
||||||
"type": "apiKey",
|
"type": "apiKey",
|
||||||
"name": "X-Auth-Token",
|
"name": "X-Auth-Token",
|
||||||
"in": "header"
|
"in": "header"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
"tags": [
|
||||||
|
{
|
||||||
|
"name": "Job API"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
298
api/swagger.yaml
298
api/swagger.yaml
@ -1,7 +1,39 @@
|
|||||||
basePath: /api
|
basePath: /api
|
||||||
definitions:
|
definitions:
|
||||||
|
api.ApiTag:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
description: Tag Name
|
||||||
|
example: Testjob
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: Tag Type
|
||||||
|
example: Debug
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
api.DeleteJobApiRequest:
|
||||||
|
properties:
|
||||||
|
cluster:
|
||||||
|
description: Cluster of job
|
||||||
|
example: fritz
|
||||||
|
type: string
|
||||||
|
jobId:
|
||||||
|
description: Cluster Job ID of job
|
||||||
|
example: 123000
|
||||||
|
type: integer
|
||||||
|
startTime:
|
||||||
|
description: Start Time of job as epoch
|
||||||
|
example: 1649723812
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- jobId
|
||||||
|
type: object
|
||||||
|
api.DeleteJobApiResponse:
|
||||||
|
properties:
|
||||||
|
msg:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
api.ErrorResponse:
|
api.ErrorResponse:
|
||||||
description: Error message as returned from backend.
|
|
||||||
properties:
|
properties:
|
||||||
error:
|
error:
|
||||||
description: Error Message
|
description: Error Message
|
||||||
@ -10,16 +42,27 @@ definitions:
|
|||||||
description: Statustext of Errorcode
|
description: Statustext of Errorcode
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
api.GetJobsApiResponse:
|
||||||
|
properties:
|
||||||
|
items:
|
||||||
|
description: Number of jobs returned
|
||||||
|
type: integer
|
||||||
|
jobs:
|
||||||
|
description: Array of jobs
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/schema.JobMeta'
|
||||||
|
type: array
|
||||||
|
page:
|
||||||
|
description: Page id returned
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
api.StartJobApiResponse:
|
api.StartJobApiResponse:
|
||||||
description: Successful job start response with database id of new job.
|
|
||||||
properties:
|
properties:
|
||||||
id:
|
id:
|
||||||
description: Database ID of new job
|
description: Database ID of new job
|
||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
api.StopJobApiRequest:
|
api.StopJobApiRequest:
|
||||||
description: Request to stop running job using stoptime and final state. They
|
|
||||||
are only required if no database id was provided with endpoint.
|
|
||||||
properties:
|
properties:
|
||||||
cluster:
|
cluster:
|
||||||
description: Cluster of job
|
description: Cluster of job
|
||||||
@ -30,15 +73,10 @@ definitions:
|
|||||||
example: 123000
|
example: 123000
|
||||||
type: integer
|
type: integer
|
||||||
jobState:
|
jobState:
|
||||||
description: Final state of job
|
allOf:
|
||||||
enum:
|
- $ref: '#/definitions/schema.JobState'
|
||||||
- completed
|
description: Final job state
|
||||||
- failed
|
|
||||||
- cancelled
|
|
||||||
- stopped
|
|
||||||
- timeout
|
|
||||||
example: completed
|
example: completed
|
||||||
type: string
|
|
||||||
startTime:
|
startTime:
|
||||||
description: Start Time of job as epoch
|
description: Start Time of job as epoch
|
||||||
example: 1649723812
|
example: 1649723812
|
||||||
@ -51,18 +89,6 @@ definitions:
|
|||||||
- jobState
|
- jobState
|
||||||
- stopTime
|
- stopTime
|
||||||
type: object
|
type: object
|
||||||
api.Tag:
|
|
||||||
description: Defines a tag using name and type.
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
description: Tag Name
|
|
||||||
example: Testjob
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: Tag Type
|
|
||||||
example: Debug
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
schema.Job:
|
schema.Job:
|
||||||
description: Information of a HPC job.
|
description: Information of a HPC job.
|
||||||
properties:
|
properties:
|
||||||
@ -95,16 +121,10 @@ definitions:
|
|||||||
example: 123000
|
example: 123000
|
||||||
type: integer
|
type: integer
|
||||||
jobState:
|
jobState:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/definitions/schema.JobState'
|
||||||
description: Final state of job
|
description: Final state of job
|
||||||
enum:
|
|
||||||
- completed
|
|
||||||
- failed
|
|
||||||
- cancelled
|
|
||||||
- stopped
|
|
||||||
- timeout
|
|
||||||
- out_of_memory
|
|
||||||
example: completed
|
example: completed
|
||||||
type: string
|
|
||||||
metaData:
|
metaData:
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
@ -203,16 +223,10 @@ definitions:
|
|||||||
example: 123000
|
example: 123000
|
||||||
type: integer
|
type: integer
|
||||||
jobState:
|
jobState:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/definitions/schema.JobState'
|
||||||
description: Final state of job
|
description: Final state of job
|
||||||
enum:
|
|
||||||
- completed
|
|
||||||
- failed
|
|
||||||
- cancelled
|
|
||||||
- stopped
|
|
||||||
- timeout
|
|
||||||
- out_of_memory
|
|
||||||
example: completed
|
example: completed
|
||||||
type: string
|
|
||||||
metaData:
|
metaData:
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
@ -286,6 +300,26 @@ definitions:
|
|||||||
minimum: 1
|
minimum: 1
|
||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
|
schema.JobState:
|
||||||
|
enum:
|
||||||
|
- running
|
||||||
|
- completed
|
||||||
|
- failed
|
||||||
|
- cancelled
|
||||||
|
- stopped
|
||||||
|
- timeout
|
||||||
|
- preempted
|
||||||
|
- out_of_memory
|
||||||
|
type: string
|
||||||
|
x-enum-varnames:
|
||||||
|
- JobStateRunning
|
||||||
|
- JobStateCompleted
|
||||||
|
- JobStateFailed
|
||||||
|
- JobStateCancelled
|
||||||
|
- JobStateStopped
|
||||||
|
- JobStateTimeout
|
||||||
|
- JobStatePreempted
|
||||||
|
- JobStateOutOfMemory
|
||||||
schema.JobStatistics:
|
schema.JobStatistics:
|
||||||
description: Specification for job metric statistics.
|
description: Specification for job metric statistics.
|
||||||
properties:
|
properties:
|
||||||
@ -344,24 +378,21 @@ definitions:
|
|||||||
example: Debug
|
example: Debug
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
host: clustercockpit.localhost:8082
|
host: localhost:8080
|
||||||
info:
|
info:
|
||||||
contact:
|
contact:
|
||||||
email: support@clustercockpit.org
|
email: support@clustercockpit.org
|
||||||
name: ClusterCockpit Project
|
name: ClusterCockpit Project
|
||||||
url: https://github.com/ClusterCockpit
|
url: https://github.com/ClusterCockpit
|
||||||
description: Defines a tag using name and type.
|
description: API for batch job control.
|
||||||
license:
|
license:
|
||||||
name: MIT License
|
name: MIT License
|
||||||
url: https://opensource.org/licenses/MIT
|
url: https://opensource.org/licenses/MIT
|
||||||
termsOfService: https://monitoring.nhr.fau.de/imprint
|
|
||||||
title: ClusterCockpit REST API
|
title: ClusterCockpit REST API
|
||||||
version: 0.1.0
|
version: 0.2.0
|
||||||
paths:
|
paths:
|
||||||
/jobs/:
|
/jobs/:
|
||||||
get:
|
get:
|
||||||
consumes:
|
|
||||||
- application/json
|
|
||||||
description: |-
|
description: |-
|
||||||
Get a list of all jobs. Filters can be applied using query parameters.
|
Get a list of all jobs. Filters can be applied using query parameters.
|
||||||
Number of results can be limited by page. Results are sorted by descending startTime.
|
Number of results can be limited by page. Results are sorted by descending startTime.
|
||||||
@ -385,11 +416,11 @@ paths:
|
|||||||
in: query
|
in: query
|
||||||
name: start-time
|
name: start-time
|
||||||
type: string
|
type: string
|
||||||
- description: 'Items per page (If empty: No Limit)'
|
- description: 'Items per page (Default: 25)'
|
||||||
in: query
|
in: query
|
||||||
name: items-per-page
|
name: items-per-page
|
||||||
type: integer
|
type: integer
|
||||||
- description: 'Page Number (If empty: No Paging)'
|
- description: 'Page Number (Default: 1)'
|
||||||
in: query
|
in: query
|
||||||
name: page
|
name: page
|
||||||
type: integer
|
type: integer
|
||||||
@ -401,11 +432,9 @@ paths:
|
|||||||
- application/json
|
- application/json
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: Array of matching jobs
|
description: Job array and page info
|
||||||
schema:
|
schema:
|
||||||
items:
|
$ref: '#/definitions/api.GetJobsApiResponse'
|
||||||
$ref: '#/definitions/schema.Job'
|
|
||||||
type: array
|
|
||||||
"400":
|
"400":
|
||||||
description: Bad Request
|
description: Bad Request
|
||||||
schema:
|
schema:
|
||||||
@ -414,6 +443,10 @@ paths:
|
|||||||
description: Unauthorized
|
description: Unauthorized
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/definitions/api.ErrorResponse'
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"403":
|
||||||
|
description: Forbidden
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
"500":
|
"500":
|
||||||
description: Internal Server Error
|
description: Internal Server Error
|
||||||
schema:
|
schema:
|
||||||
@ -421,6 +454,152 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: Lists all jobs
|
summary: Lists all jobs
|
||||||
|
tags:
|
||||||
|
- query
|
||||||
|
/jobs/delete_job/:
|
||||||
|
delete:
|
||||||
|
consumes:
|
||||||
|
- application/json
|
||||||
|
description: Job to delete is specified by request body. All fields are required
|
||||||
|
in this case.
|
||||||
|
parameters:
|
||||||
|
- description: All fields required
|
||||||
|
in: body
|
||||||
|
name: request
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.DeleteJobApiRequest'
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Success message
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.DeleteJobApiResponse'
|
||||||
|
"400":
|
||||||
|
description: Bad Request
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"401":
|
||||||
|
description: Unauthorized
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"403":
|
||||||
|
description: Forbidden
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"404":
|
||||||
|
description: Resource not found
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"422":
|
||||||
|
description: 'Unprocessable Entity: finding job failed: sql: no rows in
|
||||||
|
result set'
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"500":
|
||||||
|
description: Internal Server Error
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: Remove a job from the sql database
|
||||||
|
tags:
|
||||||
|
- remove
|
||||||
|
/jobs/delete_job/{id}:
|
||||||
|
delete:
|
||||||
|
description: Job to remove is specified by database ID. This will not remove
|
||||||
|
the job from the job archive.
|
||||||
|
parameters:
|
||||||
|
- description: Database ID of Job
|
||||||
|
in: path
|
||||||
|
name: id
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Success message
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.DeleteJobApiResponse'
|
||||||
|
"400":
|
||||||
|
description: Bad Request
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"401":
|
||||||
|
description: Unauthorized
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"403":
|
||||||
|
description: Forbidden
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"404":
|
||||||
|
description: Resource not found
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"422":
|
||||||
|
description: 'Unprocessable Entity: finding job failed: sql: no rows in
|
||||||
|
result set'
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"500":
|
||||||
|
description: Internal Server Error
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: Remove a job from the sql database
|
||||||
|
tags:
|
||||||
|
- remove
|
||||||
|
/jobs/delete_job_before/{ts}:
|
||||||
|
delete:
|
||||||
|
description: Remove all jobs with start time before timestamp. The jobs will
|
||||||
|
not be removed from the job archive.
|
||||||
|
parameters:
|
||||||
|
- description: Unix epoch timestamp
|
||||||
|
in: path
|
||||||
|
name: ts
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Success message
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.DeleteJobApiResponse'
|
||||||
|
"400":
|
||||||
|
description: Bad Request
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"401":
|
||||||
|
description: Unauthorized
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"403":
|
||||||
|
description: Forbidden
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"404":
|
||||||
|
description: Resource not found
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"422":
|
||||||
|
description: 'Unprocessable Entity: finding job failed: sql: no rows in
|
||||||
|
result set'
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
"500":
|
||||||
|
description: Internal Server Error
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ErrorResponse'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: Remove a job from the sql database
|
||||||
|
tags:
|
||||||
|
- remove
|
||||||
/jobs/start_job/:
|
/jobs/start_job/:
|
||||||
post:
|
post:
|
||||||
consumes:
|
consumes:
|
||||||
@ -466,6 +645,8 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: Adds a new job as "running"
|
summary: Adds a new job as "running"
|
||||||
|
tags:
|
||||||
|
- add and modify
|
||||||
/jobs/stop_job/:
|
/jobs/stop_job/:
|
||||||
post:
|
post:
|
||||||
description: |-
|
description: |-
|
||||||
@ -482,7 +663,7 @@ paths:
|
|||||||
- application/json
|
- application/json
|
||||||
responses:
|
responses:
|
||||||
"200":
|
"200":
|
||||||
description: Job resource
|
description: Success message
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/definitions/schema.JobMeta'
|
$ref: '#/definitions/schema.JobMeta'
|
||||||
"400":
|
"400":
|
||||||
@ -513,6 +694,8 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: Marks job as completed and triggers archiving
|
summary: Marks job as completed and triggers archiving
|
||||||
|
tags:
|
||||||
|
- add and modify
|
||||||
/jobs/stop_job/{id}:
|
/jobs/stop_job/{id}:
|
||||||
post:
|
post:
|
||||||
consumes:
|
consumes:
|
||||||
@ -567,6 +750,8 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: Marks job as completed and triggers archiving
|
summary: Marks job as completed and triggers archiving
|
||||||
|
tags:
|
||||||
|
- add and modify
|
||||||
/jobs/tag_job/{id}:
|
/jobs/tag_job/{id}:
|
||||||
post:
|
post:
|
||||||
consumes:
|
consumes:
|
||||||
@ -586,7 +771,7 @@ paths:
|
|||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/api.Tag'
|
$ref: '#/definitions/api.ApiTag'
|
||||||
type: array
|
type: array
|
||||||
produces:
|
produces:
|
||||||
- application/json
|
- application/json
|
||||||
@ -614,10 +799,13 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: Adds one or more tags to a job
|
summary: Adds one or more tags to a job
|
||||||
|
tags:
|
||||||
|
- add and modify
|
||||||
securityDefinitions:
|
securityDefinitions:
|
||||||
ApiKeyAuth:
|
ApiKeyAuth:
|
||||||
description: JWT based authentification for general API endpoint use.
|
|
||||||
in: header
|
in: header
|
||||||
name: X-Auth-Token
|
name: X-Auth-Token
|
||||||
type: apiKey
|
type: apiKey
|
||||||
swagger: "2.0"
|
swagger: "2.0"
|
||||||
|
tags:
|
||||||
|
- name: Job API
|
||||||
|
@ -13,7 +13,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -62,19 +61,22 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagDev, flagVersion bool
|
var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagMigrateDB, flagDev, flagVersion, flagLogDateTime bool
|
||||||
var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob string
|
var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string
|
||||||
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)")
|
||||||
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap")
|
||||||
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling")
|
||||||
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)")
|
||||||
flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI")
|
flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI")
|
||||||
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
flag.BoolVar(&flagVersion, "version", false, "Show version information and exit")
|
||||||
|
flag.BoolVar(&flagMigrateDB, "migrate-db", false, "Migrate database to supported version and exit")
|
||||||
|
flag.BoolVar(&flagLogDateTime, "logdate", false, "Set this flag to add date and time to log messages")
|
||||||
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`")
|
||||||
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,api,user]:<password>`")
|
flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `<username>:[admin,support,manager,api,user]:<password>`")
|
||||||
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`")
|
||||||
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`")
|
||||||
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `<path-to-meta.json>:<path-to-data.json>,...`")
|
||||||
|
flag.StringVar(&flagLogLevel, "loglevel", "warn", "Sets the logging level: `[debug,info,warn (default),err,fatal,crit]`")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if flagVersion {
|
if flagVersion {
|
||||||
@ -85,6 +87,9 @@ func main() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply config flags for pkg/log
|
||||||
|
log.Init(flagLogLevel, flagLogDateTime)
|
||||||
|
|
||||||
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
// See https://github.com/google/gops (Runtime overhead is almost zero)
|
||||||
if flagGops {
|
if flagGops {
|
||||||
if err := agent.Listen(agent.Options{}); err != nil {
|
if err := agent.Listen(agent.Options{}); err != nil {
|
||||||
@ -108,6 +113,11 @@ func main() {
|
|||||||
config.Keys.DB = os.Getenv(envvar)
|
config.Keys.DB = os.Getenv(envvar)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagMigrateDB {
|
||||||
|
repository.MigrateDB(config.Keys.DBDriver, config.Keys.DB)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
repository.Connect(config.Keys.DBDriver, config.Keys.DB)
|
||||||
db := repository.GetConnection()
|
db := repository.GetConnection()
|
||||||
|
|
||||||
@ -118,7 +128,7 @@ func main() {
|
|||||||
"ldap": config.Keys.LdapConfig,
|
"ldap": config.Keys.LdapConfig,
|
||||||
"jwt": config.Keys.JwtConfig,
|
"jwt": config.Keys.JwtConfig,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("auth initialization failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil {
|
if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil {
|
||||||
@ -132,14 +142,14 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := authentication.AddUser(&auth.User{
|
if err := authentication.AddUser(&auth.User{
|
||||||
Username: parts[0], Password: parts[2], Roles: strings.Split(parts[1], ","),
|
Username: parts[0], Projects: make([]string, 0), Password: parts[2], Roles: strings.Split(parts[1], ","),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if flagDelUser != "" {
|
if flagDelUser != "" {
|
||||||
if err := authentication.DelUser(flagDelUser); err != nil {
|
if err := authentication.DelUser(flagDelUser); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("deleting user failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,7 +159,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := authentication.LdapAuth.Sync(); err != nil {
|
if err := authentication.LdapAuth.Sync(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("LDAP sync failed: %v", err)
|
||||||
}
|
}
|
||||||
log.Info("LDAP sync successfull")
|
log.Info("LDAP sync successfull")
|
||||||
}
|
}
|
||||||
@ -157,41 +167,41 @@ func main() {
|
|||||||
if flagGenJWT != "" {
|
if flagGenJWT != "" {
|
||||||
user, err := authentication.GetUser(flagGenJWT)
|
user, err := authentication.GetUser(flagGenJWT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("could not get user from JWT: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !user.HasRole(auth.RoleApi) {
|
if !user.HasRole(auth.RoleApi) {
|
||||||
log.Warn("that user does not have the API role")
|
log.Warnf("user '%s' does not have the API role", user.Username)
|
||||||
}
|
}
|
||||||
|
|
||||||
jwt, err := authentication.JwtAuth.ProvideJWT(user)
|
jwt, err := authentication.JwtAuth.ProvideJWT(user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("JWT for '%s': %s\n", user.Username, jwt)
|
fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt)
|
||||||
}
|
}
|
||||||
} else if flagNewUser != "" || flagDelUser != "" {
|
} else if flagNewUser != "" || flagDelUser != "" {
|
||||||
log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled")
|
log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.Init(config.Keys.Archive); err != nil {
|
if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("failed to initialize archive: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.Init(config.Keys.DisableArchive); err != nil {
|
if err := metricdata.Init(config.Keys.DisableArchive); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("failed to initialize metricdata repository: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagReinitDB {
|
if flagReinitDB {
|
||||||
if err := repository.InitDB(); err != nil {
|
if err := repository.InitDB(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("failed to re-initialize repository DB: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagImportJob != "" {
|
if flagImportJob != "" {
|
||||||
if err := repository.HandleImportFlag(flagImportJob); err != nil {
|
if err := repository.HandleImportFlag(flagImportJob); err != nil {
|
||||||
log.Fatalf("import failed: %s", err.Error())
|
log.Fatalf("job import failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,12 +219,12 @@ func main() {
|
|||||||
graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error {
|
graphQLEndpoint.SetRecoverFunc(func(ctx context.Context, err interface{}) error {
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case string:
|
case string:
|
||||||
return fmt.Errorf("panic: %s", e)
|
return fmt.Errorf("MAIN > Panic: %s", e)
|
||||||
case error:
|
case error:
|
||||||
return fmt.Errorf("panic caused by: %w", e)
|
return fmt.Errorf("MAIN > Panic caused by: %w", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.New("internal server error (panic)")
|
return errors.New("MAIN > Internal server error (panic)")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,32 +301,13 @@ func main() {
|
|||||||
if flagDev {
|
if flagDev {
|
||||||
r.Handle("/playground", playground.Handler("GraphQL playground", "/query"))
|
r.Handle("/playground", playground.Handler("GraphQL playground", "/query"))
|
||||||
r.PathPrefix("/swagger/").Handler(httpSwagger.Handler(
|
r.PathPrefix("/swagger/").Handler(httpSwagger.Handler(
|
||||||
httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet)
|
httpSwagger.URL("http://" + config.Keys.Addr + "/swagger/doc.json"))).Methods(http.MethodGet)
|
||||||
}
|
}
|
||||||
secured.Handle("/query", graphQLEndpoint)
|
secured.Handle("/query", graphQLEndpoint)
|
||||||
|
|
||||||
// Send a searchId and then reply with a redirect to a user or job.
|
// Send a searchId and then reply with a redirect to a user, or directly send query to job table for jobid and project.
|
||||||
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
secured.HandleFunc("/search", func(rw http.ResponseWriter, r *http.Request) {
|
||||||
if search := r.URL.Query().Get("searchId"); search != "" {
|
routerConfig.HandleSearchBar(rw, r, api)
|
||||||
job, username, err := api.JobRepository.FindJobOrUser(r.Context(), search)
|
|
||||||
if err == repository.ErrNotFound {
|
|
||||||
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(search), http.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if username != "" {
|
|
||||||
http.Redirect(rw, r, "/monitoring/user/"+username, http.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
http.Redirect(rw, r, fmt.Sprintf("/monitoring/job/%d", job), http.StatusTemporaryRedirect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
http.Error(rw, "'searchId' query parameter missing", http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Mount all /monitoring/... and /api/... routes.
|
// Mount all /monitoring/... and /api/... routes.
|
||||||
@ -361,7 +352,7 @@ func main() {
|
|||||||
// Start http or https server
|
// Start http or https server
|
||||||
listener, err := net.Listen("tcp", config.Keys.Addr)
|
listener, err := net.Listen("tcp", config.Keys.Addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("starting http listener failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" {
|
||||||
@ -373,7 +364,7 @@ func main() {
|
|||||||
if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" {
|
if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" {
|
||||||
cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("loading X509 keypair failed: %v", err)
|
||||||
}
|
}
|
||||||
listener = tls.NewListener(listener, &tls.Config{
|
listener = tls.NewListener(listener, &tls.Config{
|
||||||
Certificates: []tls.Certificate{cert},
|
Certificates: []tls.Certificate{cert},
|
||||||
@ -384,23 +375,23 @@ func main() {
|
|||||||
MinVersion: tls.VersionTLS12,
|
MinVersion: tls.VersionTLS12,
|
||||||
PreferServerCipherSuites: true,
|
PreferServerCipherSuites: true,
|
||||||
})
|
})
|
||||||
log.Printf("HTTPS server listening at %s...", config.Keys.Addr)
|
fmt.Printf("HTTPS server listening at %s...", config.Keys.Addr)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("HTTP server listening at %s...", config.Keys.Addr)
|
fmt.Printf("HTTP server listening at %s...", config.Keys.Addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Because this program will want to bind to a privileged port (like 80), the listener must
|
// Because this program will want to bind to a privileged port (like 80), the listener must
|
||||||
// be established first, then the user can be changed, and after that,
|
// be established first, then the user can be changed, and after that,
|
||||||
// the actuall http server can be started.
|
// the actual http server can be started.
|
||||||
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil {
|
||||||
log.Fatalf("error while changing user: %s", err.Error())
|
log.Fatalf("error while preparing server start: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||||
log.Fatal(err)
|
log.Fatalf("starting server failed: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -410,13 +401,13 @@ func main() {
|
|||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
<-sigs
|
<-sigs
|
||||||
runtimeEnv.SystemdNotifiy(false, "shutting down")
|
runtimeEnv.SystemdNotifiy(false, "Shutting down ...")
|
||||||
|
|
||||||
// First shut down the server gracefully (waiting for all ongoing requests)
|
// First shut down the server gracefully (waiting for all ongoing requests)
|
||||||
server.Shutdown(context.Background())
|
server.Shutdown(context.Background())
|
||||||
|
|
||||||
// Then, wait for any async archivings still pending...
|
// Then, wait for any async archivings still pending...
|
||||||
api.OngoingArchivings.Wait()
|
api.JobRepository.WaitForArchiving()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if config.Keys.StopJobsExceedingWalltime > 0 {
|
if config.Keys.StopJobsExceedingWalltime > 0 {
|
||||||
@ -424,7 +415,7 @@ func main() {
|
|||||||
for range time.Tick(30 * time.Minute) {
|
for range time.Tick(30 * time.Minute) {
|
||||||
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
err := jobRepo.StopJobsExceedingWalltimeBy(config.Keys.StopJobsExceedingWalltime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while looking for jobs exceeding theire walltime: %s", err.Error())
|
log.Warnf("Error while looking for jobs exceeding their walltime: %s", err.Error())
|
||||||
}
|
}
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
## Intro
|
## Intro
|
||||||
|
|
||||||
cc-backend requires a configuration file speciyfing the cluster systems to be used. Still many default
|
cc-backend requires a configuration file specifying the cluster systems to be used. Still many default
|
||||||
options documented below are used. cc-backend tries to load a config.json from the working directory per default.
|
options documented below are used. cc-backend tries to load a config.json from the working directory per default.
|
||||||
To overwrite the default specify a json config file location using the command line option `--config <filepath>`.
|
To overwrite the default specify a json config file location using the command line option `--config <filepath>`.
|
||||||
All security relevant configuration. e.g., keys and passwords, are set using environment variables.
|
All security relevant configuration. e.g., keys and passwords, are set using environment variables.
|
||||||
@ -24,7 +24,8 @@ It is supported to specify these by means of an `.env` file located in the proje
|
|||||||
* `https-cert-file` and `https-key-file`: Type string. If both those options are not empty, use HTTPS using those certificates.
|
* `https-cert-file` and `https-key-file`: Type string. If both those options are not empty, use HTTPS using those certificates.
|
||||||
* `redirect-http-to`: Type string. If not the empty string and `addr` does not end in ":80", redirect every request incoming at port 80 to that url.
|
* `redirect-http-to`: Type string. If not the empty string and `addr` does not end in ":80", redirect every request incoming at port 80 to that url.
|
||||||
* `machine-state-dir`: Type string. Where to store MachineState files. TODO: Explain in more detail!
|
* `machine-state-dir`: Type string. Where to store MachineState files. TODO: Explain in more detail!
|
||||||
* `"stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`;
|
* `"stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`.
|
||||||
|
* `short-running-jobs-duration`: Type int. Do not show running jobs shorter than X seconds. Default `300`.
|
||||||
* `ldap`: Type object. For LDAP Authentication and user synchronisation. Default `nil`.
|
* `ldap`: Type object. For LDAP Authentication and user synchronisation. Default `nil`.
|
||||||
- `url`: Type string. URL of LDAP directory server.
|
- `url`: Type string. URL of LDAP directory server.
|
||||||
- `user_base`: Type string. Base DN of user tree root.
|
- `user_base`: Type string. Base DN of user tree root.
|
||||||
@ -44,16 +45,16 @@ It is supported to specify these by means of an `.env` file located in the proje
|
|||||||
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
"startTime": { "from": "2022-01-01T00:00:00Z", "to": null }
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
* `ui-defaults`: Type object. Default configuration for ui views. If overwriten, all options must be provided! Most options can be overwritten by the user via the web interface.
|
* `ui-defaults`: Type object. Default configuration for ui views. If overwritten, all options must be provided! Most options can be overwritten by the user via the web interface.
|
||||||
- `analysis_view_histogramMetrics`: Type string array. Metrics to show as job count histograms in analysis view. Default `["flops_any", "mem_bw", "mem_used"]`.
|
- `analysis_view_histogramMetrics`: Type string array. Metrics to show as job count histograms in analysis view. Default `["flops_any", "mem_bw", "mem_used"]`.
|
||||||
- `analysis_view_scatterPlotMetrics`: Type array of string array. Initial scatter plto configuration in analysis view. Default `[["flops_any", "mem_bw"], ["flops_any", "cpu_load"], ["cpu_load", "mem_bw"]]`.
|
- `analysis_view_scatterPlotMetrics`: Type array of string array. Initial
|
||||||
|
scatter plot configuration in analysis view. Default `[["flops_any", "mem_bw"], ["flops_any", "cpu_load"], ["cpu_load", "mem_bw"]]`.
|
||||||
- `job_view_nodestats_selectedMetrics`: Type string array. Initial metrics shown in node statistics table of single job view. Default `["flops_any", "mem_bw", "mem_used"]`.
|
- `job_view_nodestats_selectedMetrics`: Type string array. Initial metrics shown in node statistics table of single job view. Default `["flops_any", "mem_bw", "mem_used"]`.
|
||||||
- `job_view_polarPlotMetrics`: Type string array. Metrics shown in polar plot of single job view. Default `["flops_any", "mem_bw", "mem_used", "net_bw", "file_bw"]`.
|
- `job_view_polarPlotMetrics`: Type string array. Metrics shown in polar plot of single job view. Default `["flops_any", "mem_bw", "mem_used", "net_bw", "file_bw"]`.
|
||||||
- `job_view_selectedMetrics`: Type string array. Default `["flops_any", "mem_bw", "mem_used"]`.
|
- `job_view_selectedMetrics`: Type string array. Default `["flops_any", "mem_bw", "mem_used"]`.
|
||||||
- `plot_general_colorBackground`: Type bool. Color plot background according to job average threshold limits. Default `true`.
|
- `plot_general_colorBackground`: Type bool. Color plot background according to job average threshold limits. Default `true`.
|
||||||
- `plot_general_colorscheme`: Type string array. Initial color scheme. Default `"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"`.
|
- `plot_general_colorscheme`: Type string array. Initial color scheme. Default `"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"`.
|
||||||
- `plot_general_lineWidth`: Type int. Initial linewidth. Default `3`.
|
- `plot_general_lineWidth`: Type int. Initial linewidth. Default `3`.
|
||||||
- `plot_list_hideShortRunningJobs`: Type int. Do not show running jobs shorter than X seconds. Default `300`.
|
|
||||||
- `plot_list_jobsPerPage`: Type int. Jobs shown per page in job lists. Default `50`.
|
- `plot_list_jobsPerPage`: Type int. Jobs shown per page in job lists. Default `50`.
|
||||||
- `plot_list_selectedMetrics`: Type string array. Initial metric plots shown in jobs lists. Default `"cpu_load", "ipc", "mem_used", "flops_any", "mem_bw"`.
|
- `plot_list_selectedMetrics`: Type string array. Initial metric plots shown in jobs lists. Default `"cpu_load", "ipc", "mem_used", "flops_any", "mem_bw"`.
|
||||||
- `plot_view_plotsPerRow`: Type int. Number of plots per row in single job view. Default `3`.
|
- `plot_view_plotsPerRow`: Type int. Number of plots per row in single job view. Default `3`.
|
||||||
|
@ -39,5 +39,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
|
"jwts": {
|
||||||
|
"cookieName": "",
|
||||||
|
"forceJWTValidationViaDatabase": false,
|
||||||
|
"max-age": 0,
|
||||||
|
"trustedExternalIssuer": ""
|
||||||
|
},
|
||||||
|
"short-running-jobs-duration": 300
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,10 @@
|
|||||||
JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
JWT_PUBLIC_KEY="kzfYrYy+TzpanWZHJ5qSdMj5uKUWgq74BWhQG6copP0="
|
||||||
JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q=="
|
JWT_PRIVATE_KEY="dtPC/6dWJFKZK7KZ78CvWuynylOmjBFyMsUWArwmodOTN9itjL5POlqdZkcnmpJ0yPm4pRaCrvgFaFAbpyik/Q=="
|
||||||
|
|
||||||
|
# Base64 encoded Ed25519 public key for accepting externally generated JWTs
|
||||||
|
# Keys in PEM format can be converted, see `tools/convert-pem-pubkey-for-cc/Readme.md`
|
||||||
|
CROSS_LOGIN_JWT_PUBLIC_KEY=""
|
||||||
|
|
||||||
# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION)
|
# Some random bytes used as secret for cookie-based sessions (DO NOT USE THIS ONE IN PRODUCTION)
|
||||||
SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629"
|
SESSION_KEY="67d829bf61dc5f87a73fd814e2c9f629"
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ Start by creating a base folder for all of the following steps.
|
|||||||
* Clone Repository
|
* Clone Repository
|
||||||
- `git clone https://github.com/ClusterCockpit/cc-backend.git`
|
- `git clone https://github.com/ClusterCockpit/cc-backend.git`
|
||||||
- `cd cc-backend`
|
- `cd cc-backend`
|
||||||
- `git checkout dev-job-archive-module` Will be merged soon into master
|
|
||||||
* Setup Frontend
|
* Setup Frontend
|
||||||
- `cd ./web/frontend`
|
- `cd ./web/frontend`
|
||||||
- `yarn install`
|
- `yarn install`
|
||||||
@ -41,15 +40,15 @@ Start by creating a base folder for all of the following steps.
|
|||||||
- `cd ../..`
|
- `cd ../..`
|
||||||
* Build Go Executable
|
* Build Go Executable
|
||||||
- `go build ./cmd/cc-backend/`
|
- `go build ./cmd/cc-backend/`
|
||||||
* Prepare Datafolder and Database file
|
|
||||||
- `mkdir var`
|
|
||||||
- `touch var/job.db`
|
|
||||||
* Activate & Config environment for cc-backend
|
* Activate & Config environment for cc-backend
|
||||||
- `cp configs/env-template.txt .env`
|
- `cp configs/env-template.txt .env`
|
||||||
- Optional: Have a look via `vim ./.env`
|
- Optional: Have a look via `vim ./.env`
|
||||||
- Copy the `config.json` file included in this tarball into the root directory of cc-backend: `cp ../../config.json ./`
|
- Copy the `config.json` file included in this tarball into the root directory of cc-backend: `cp ../../config.json ./`
|
||||||
* Back to toplevel `clustercockpit`
|
* Back to toplevel `clustercockpit`
|
||||||
- `cd ..`
|
- `cd ..`
|
||||||
|
* Prepare Datafolder and Database file
|
||||||
|
- `mkdir var`
|
||||||
|
- `./cc-backend --migrate-db`
|
||||||
|
|
||||||
### Setup cc-metric-store
|
### Setup cc-metric-store
|
||||||
* Clone Repository
|
* Clone Repository
|
||||||
|
@ -44,3 +44,39 @@ $ ./cc-backend -jwt <username> -no-server
|
|||||||
```
|
```
|
||||||
$ curl -X GET "<API ENDPOINT>" -H "accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer <JWT TOKEN>"
|
$ curl -X GET "<API ENDPOINT>" -H "accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer <JWT TOKEN>"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Accept externally generated JWTs provided via cookie
|
||||||
|
If there is an external service like an AuthAPI that can generate JWTs and hand them over to ClusterCockpit via cookies, CC can be configured to accept them:
|
||||||
|
|
||||||
|
1. `.env`: CC needs a public ed25519 key to verify foreign JWT signatures. Public keys in PEM format can be converted with the instructions in [/tools/convert-pem-pubkey-for-cc](../tools/convert-pem-pubkey-for-cc/Readme.md) .
|
||||||
|
|
||||||
|
```
|
||||||
|
CROSS_LOGIN_JWT_PUBLIC_KEY="+51iXX8BdLFocrppRxIw52xCOf8xFSH/eNilN5IHVGc="
|
||||||
|
```
|
||||||
|
|
||||||
|
2. `config.json`: Insert a name for the cookie (set by the external service) containing the JWT so that CC knows where to look at. Define a trusted issuer (JWT claim 'iss'), otherwise it will be rejected.
|
||||||
|
If you want usernames and user roles from JWTs ('sub' and 'roles' claim) to be validated against CC's internal database, you need to enable it here. Unknown users will then be rejected and roles set via JWT will be ignored.
|
||||||
|
|
||||||
|
```json
|
||||||
|
"jwts": {
|
||||||
|
"cookieName": "access_cc",
|
||||||
|
"forceJWTValidationViaDatabase": true,
|
||||||
|
"trustedExternalIssuer": "auth.example.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Make sure your external service includes the same issuer (`iss`) in its JWTs. Example JWT payload:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"iat": 1668161471,
|
||||||
|
"nbf": 1668161471,
|
||||||
|
"exp": 1668161531,
|
||||||
|
"sub": "alice",
|
||||||
|
"roles": [
|
||||||
|
"user"
|
||||||
|
],
|
||||||
|
"jti": "a1b2c3d4-1234-5678-abcd-a1b2c3d4e5f6",
|
||||||
|
"iss": "auth.example.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
{
|
{
|
||||||
"addr": "0.0.0.0:8080",
|
"addr": "127.0.0.1:8080",
|
||||||
"validate": true,
|
|
||||||
"archive": {
|
"archive": {
|
||||||
"kind": "file",
|
"kind": "file",
|
||||||
"path": "./var/job-archive"
|
"path": "./var/job-archive"
|
||||||
|
35
docs/searchbar.md
Normal file
35
docs/searchbar.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# Docs for ClusterCockpit Searchbar
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
* Searchtags are implemented as `type:<query>` search-string
|
||||||
|
* Types `jobId, jobName, projectId, username, name` for roles `admin` and `support`
|
||||||
|
* `jobName` is jobName as persisted in `job.meta_data` table-column
|
||||||
|
* `username` is actual account identifier as persisted in `job.user` table-column
|
||||||
|
* `name` is account owners name as persisted in `user.name` table-column
|
||||||
|
* Types `jobId, jobName, projectId` for role `user`
|
||||||
|
* Examples:
|
||||||
|
* `jobName:myJob12`
|
||||||
|
* `jobId:123456`
|
||||||
|
* `username:abcd100`
|
||||||
|
* `name:Paul`
|
||||||
|
* If no searchTag used: Best guess search with the following hierarchy
|
||||||
|
* `jobId -> username -> name -> projectId -> jobName`
|
||||||
|
* Destinations:
|
||||||
|
* JobId: Job-Table (Allows multiple identical matches, e.g. JobIds from different clusters)
|
||||||
|
* JobName: Job-Table (Allows multiple identical matches, e.g. JobNames from different clusters)
|
||||||
|
* ProjectId: Job-Table
|
||||||
|
* Username: Users-Table
|
||||||
|
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table.
|
||||||
|
* Name: Users-Table
|
||||||
|
* **Please Note**: Only users with jobs will be shown in table! I.e., Users without jobs will be missing in table.
|
||||||
|
* Best guess search always redirects to Job-Table or `/monitoring/user/$USER` (first username match)
|
||||||
|
* Unprocessable queries will redirect to `/monitoring/jobs/?`
|
||||||
|
* Spaces trimmed (both for searchTag and queryString)
|
||||||
|
* ` job12` == `job12`
|
||||||
|
* `projectID : abcd ` == `projectId:abcd`
|
||||||
|
* `jobName`- and `name-`queries work with a part of the target-string
|
||||||
|
* `jobName:myjob` for jobName "myjob_cluster1"
|
||||||
|
* `name:Paul` for name "Paul Atreides"
|
||||||
|
|
||||||
|
* JobName GQL Query is resolved as matching the query as a part of the whole metaData-JSON in the SQL DB.
|
72
go.mod
72
go.mod
@ -1,60 +1,84 @@
|
|||||||
module github.com/ClusterCockpit/cc-backend
|
module github.com/ClusterCockpit/cc-backend
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/99designs/gqlgen v0.17.16
|
github.com/99designs/gqlgen v0.17.24
|
||||||
github.com/Masterminds/squirrel v1.5.3
|
github.com/Masterminds/squirrel v1.5.3
|
||||||
github.com/go-ldap/ldap/v3 v3.4.4
|
github.com/go-ldap/ldap/v3 v3.4.4
|
||||||
github.com/go-sql-driver/mysql v1.6.0
|
github.com/go-sql-driver/mysql v1.7.0
|
||||||
github.com/golang-jwt/jwt/v4 v4.4.2
|
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||||
github.com/google/gops v0.3.25
|
github.com/golang-migrate/migrate/v4 v4.15.2
|
||||||
|
github.com/google/gops v0.3.27
|
||||||
github.com/gorilla/handlers v1.5.1
|
github.com/gorilla/handlers v1.5.1
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gorilla/sessions v1.2.1
|
github.com/gorilla/sessions v1.2.1
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.10.0
|
github.com/influxdata/influxdb-client-go/v2 v2.12.2
|
||||||
github.com/jmoiron/sqlx v1.3.5
|
github.com/jmoiron/sqlx v1.3.5
|
||||||
github.com/mattn/go-sqlite3 v1.14.15
|
github.com/mattn/go-sqlite3 v1.14.16
|
||||||
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0
|
github.com/prometheus/client_golang v1.14.0
|
||||||
|
github.com/prometheus/common v0.40.0
|
||||||
|
github.com/qustavo/sqlhooks/v2 v2.1.0
|
||||||
|
github.com/santhosh-tekuri/jsonschema/v5 v5.2.0
|
||||||
github.com/swaggo/http-swagger v1.3.3
|
github.com/swaggo/http-swagger v1.3.3
|
||||||
github.com/swaggo/swag v1.8.5
|
github.com/swaggo/swag v1.8.10
|
||||||
github.com/vektah/gqlparser/v2 v2.5.0
|
github.com/vektah/gqlparser/v2 v2.5.1
|
||||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
|
golang.org/x/crypto v0.6.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e // indirect
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||||
github.com/deepmap/oapi-codegen v1.11.0 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/containerd/containerd v1.6.18 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
|
github.com/deepmap/oapi-codegen v1.12.4 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
github.com/ghodss/yaml v1.0.0 // indirect
|
github.com/ghodss/yaml v1.0.0 // indirect
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
|
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/spec v0.20.7 // indirect
|
github.com/go-openapi/spec v0.20.8 // indirect
|
||||||
github.com/go-openapi/swag v0.22.3 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
|
github.com/jpillora/backoff v1.0.0 // indirect
|
||||||
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
|
github.com/swaggo/files v1.0.0 // indirect
|
||||||
github.com/urfave/cli/v2 v2.8.1 // indirect
|
github.com/urfave/cli/v2 v2.24.4 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
golang.org/x/mod v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220913175220-63ea55921009 // indirect
|
golang.org/x/net v0.7.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/oauth2 v0.5.0 // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/sys v0.5.0 // indirect
|
||||||
|
golang.org/x/text v0.7.0 // indirect
|
||||||
|
golang.org/x/tools v0.6.0 // indirect
|
||||||
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
// Package api GENERATED BY SWAG; DO NOT EDIT
|
// Code generated by swaggo/swag. DO NOT EDIT
|
||||||
// This file was generated by swaggo/swag at
|
|
||||||
// 2022-09-22 13:31:53.353204065 +0200 CEST m=+0.139444562
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import "github.com/swaggo/swag"
|
import "github.com/swaggo/swag"
|
||||||
@ -11,7 +9,6 @@ const docTemplate = `{
|
|||||||
"info": {
|
"info": {
|
||||||
"description": "{{escape .Description}}",
|
"description": "{{escape .Description}}",
|
||||||
"title": "{{.Title}}",
|
"title": "{{.Title}}",
|
||||||
"termsOfService": "https://monitoring.nhr.fau.de/imprint",
|
|
||||||
"contact": {
|
"contact": {
|
||||||
"name": "ClusterCockpit Project",
|
"name": "ClusterCockpit Project",
|
||||||
"url": "https://github.com/ClusterCockpit",
|
"url": "https://github.com/ClusterCockpit",
|
||||||
@ -34,12 +31,12 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"description": "Get a list of all jobs. Filters can be applied using query parameters.\nNumber of results can be limited by page. Results are sorted by descending startTime.",
|
"description": "Get a list of all jobs. Filters can be applied using query parameters.\nNumber of results can be limited by page. Results are sorted by descending startTime.",
|
||||||
"consumes": [
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"query"
|
||||||
|
],
|
||||||
"summary": "Lists all jobs",
|
"summary": "Lists all jobs",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -70,13 +67,13 @@ const docTemplate = `{
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"description": "Items per page (If empty: No Limit)",
|
"description": "Items per page (Default: 25)",
|
||||||
"name": "items-per-page",
|
"name": "items-per-page",
|
||||||
"in": "query"
|
"in": "query"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"description": "Page Number (If empty: No Paging)",
|
"description": "Page Number (Default: 1)",
|
||||||
"name": "page",
|
"name": "page",
|
||||||
"in": "query"
|
"in": "query"
|
||||||
},
|
},
|
||||||
@ -89,12 +86,9 @@ const docTemplate = `{
|
|||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "Array of matching jobs",
|
"description": "Job array and page info",
|
||||||
"schema": {
|
"schema": {
|
||||||
"type": "array",
|
"$ref": "#/definitions/api.GetJobsApiResponse"
|
||||||
"items": {
|
|
||||||
"$ref": "#/definitions/schema.Job"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"400": {
|
"400": {
|
||||||
@ -109,6 +103,227 @@ const docTemplate = `{
|
|||||||
"$ref": "#/definitions/api.ErrorResponse"
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job/": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Job to delete is specified by request body. All fields are required in this case.",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "All fields required",
|
||||||
|
"name": "request",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiRequest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job/{id}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Job to remove is specified by database ID. This will not remove the job from the job archive.",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Database ID of Job",
|
||||||
|
"name": "id",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"description": "Internal Server Error",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/jobs/delete_job_before/{ts}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"remove"
|
||||||
|
],
|
||||||
|
"summary": "Remove a job from the sql database",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Unix epoch timestamp",
|
||||||
|
"name": "ts",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "Success message",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.DeleteJobApiResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"401": {
|
||||||
|
"description": "Unauthorized",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"403": {
|
||||||
|
"description": "Forbidden",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Resource not found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"422": {
|
||||||
|
"description": "Unprocessable Entity: finding job failed: sql: no rows in result set",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ErrorResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
"500": {
|
"500": {
|
||||||
"description": "Internal Server Error",
|
"description": "Internal Server Error",
|
||||||
"schema": {
|
"schema": {
|
||||||
@ -132,6 +347,9 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Adds a new job as \"running\"",
|
"summary": "Adds a new job as \"running\"",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -195,6 +413,9 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Marks job as completed and triggers archiving",
|
"summary": "Marks job as completed and triggers archiving",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -209,7 +430,7 @@ const docTemplate = `{
|
|||||||
],
|
],
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "Job resource",
|
"description": "Success message",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/schema.JobMeta"
|
"$ref": "#/definitions/schema.JobMeta"
|
||||||
}
|
}
|
||||||
@ -267,6 +488,9 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Marks job as completed and triggers archiving",
|
"summary": "Marks job as completed and triggers archiving",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -346,6 +570,9 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"add and modify"
|
||||||
|
],
|
||||||
"summary": "Adds one or more tags to a job",
|
"summary": "Adds one or more tags to a job",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
@ -363,7 +590,7 @@ const docTemplate = `{
|
|||||||
"schema": {
|
"schema": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.Tag"
|
"$ref": "#/definitions/api.ApiTag"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -404,8 +631,53 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
|
"api.ApiTag": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "Tag Name",
|
||||||
|
"type": "string",
|
||||||
|
"example": "Testjob"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"description": "Tag Type",
|
||||||
|
"type": "string",
|
||||||
|
"example": "Debug"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.DeleteJobApiRequest": {
|
||||||
|
"type": "object",
|
||||||
|
"required": [
|
||||||
|
"jobId"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cluster": {
|
||||||
|
"description": "Cluster of job",
|
||||||
|
"type": "string",
|
||||||
|
"example": "fritz"
|
||||||
|
},
|
||||||
|
"jobId": {
|
||||||
|
"description": "Cluster Job ID of job",
|
||||||
|
"type": "integer",
|
||||||
|
"example": 123000
|
||||||
|
},
|
||||||
|
"startTime": {
|
||||||
|
"description": "Start Time of job as epoch",
|
||||||
|
"type": "integer",
|
||||||
|
"example": 1649723812
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.DeleteJobApiResponse": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"msg": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"api.ErrorResponse": {
|
"api.ErrorResponse": {
|
||||||
"description": "Error message as returned from backend.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"error": {
|
"error": {
|
||||||
@ -418,8 +690,27 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"api.GetJobsApiResponse": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"items": {
|
||||||
|
"description": "Number of jobs returned",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"jobs": {
|
||||||
|
"description": "Array of jobs",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/schema.JobMeta"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"page": {
|
||||||
|
"description": "Page id returned",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"api.StartJobApiResponse": {
|
"api.StartJobApiResponse": {
|
||||||
"description": "Successful job start response with database id of new job.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"id": {
|
"id": {
|
||||||
@ -429,7 +720,6 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.StopJobApiRequest": {
|
"api.StopJobApiRequest": {
|
||||||
"description": "Request to stop running job using stoptime and final state. They are only required if no database id was provided with endpoint.",
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"required": [
|
"required": [
|
||||||
"jobState",
|
"jobState",
|
||||||
@ -447,14 +737,11 @@ const docTemplate = `{
|
|||||||
"example": 123000
|
"example": 123000
|
||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final job state",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -470,22 +757,6 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.Tag": {
|
|
||||||
"description": "Defines a tag using name and type.",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"name": {
|
|
||||||
"description": "Tag Name",
|
|
||||||
"type": "string",
|
|
||||||
"example": "Testjob"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"description": "Tag Type",
|
|
||||||
"type": "string",
|
|
||||||
"example": "Debug"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"schema.Job": {
|
"schema.Job": {
|
||||||
"description": "Information of a HPC job.",
|
"description": "Information of a HPC job.",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -524,14 +795,10 @@ const docTemplate = `{
|
|||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final state of job",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout",
|
|
||||||
"out_of_memory"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -656,14 +923,10 @@ const docTemplate = `{
|
|||||||
},
|
},
|
||||||
"jobState": {
|
"jobState": {
|
||||||
"description": "Final state of job",
|
"description": "Final state of job",
|
||||||
"type": "string",
|
"allOf": [
|
||||||
"enum": [
|
{
|
||||||
"completed",
|
"$ref": "#/definitions/schema.JobState"
|
||||||
"failed",
|
}
|
||||||
"cancelled",
|
|
||||||
"stopped",
|
|
||||||
"timeout",
|
|
||||||
"out_of_memory"
|
|
||||||
],
|
],
|
||||||
"example": "completed"
|
"example": "completed"
|
||||||
},
|
},
|
||||||
@ -759,6 +1022,29 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"schema.JobState": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"running",
|
||||||
|
"completed",
|
||||||
|
"failed",
|
||||||
|
"cancelled",
|
||||||
|
"stopped",
|
||||||
|
"timeout",
|
||||||
|
"preempted",
|
||||||
|
"out_of_memory"
|
||||||
|
],
|
||||||
|
"x-enum-varnames": [
|
||||||
|
"JobStateRunning",
|
||||||
|
"JobStateCompleted",
|
||||||
|
"JobStateFailed",
|
||||||
|
"JobStateCancelled",
|
||||||
|
"JobStateStopped",
|
||||||
|
"JobStateTimeout",
|
||||||
|
"JobStatePreempted",
|
||||||
|
"JobStateOutOfMemory"
|
||||||
|
]
|
||||||
|
},
|
||||||
"schema.JobStatistics": {
|
"schema.JobStatistics": {
|
||||||
"description": "Specification for job metric statistics.",
|
"description": "Specification for job metric statistics.",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -839,22 +1125,26 @@ const docTemplate = `{
|
|||||||
},
|
},
|
||||||
"securityDefinitions": {
|
"securityDefinitions": {
|
||||||
"ApiKeyAuth": {
|
"ApiKeyAuth": {
|
||||||
"description": "JWT based authentification for general API endpoint use.",
|
|
||||||
"type": "apiKey",
|
"type": "apiKey",
|
||||||
"name": "X-Auth-Token",
|
"name": "X-Auth-Token",
|
||||||
"in": "header"
|
"in": "header"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
"tags": [
|
||||||
|
{
|
||||||
|
"name": "Job API"
|
||||||
|
}
|
||||||
|
]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
// SwaggerInfo holds exported Swagger Info so clients can modify it
|
// SwaggerInfo holds exported Swagger Info so clients can modify it
|
||||||
var SwaggerInfo = &swag.Spec{
|
var SwaggerInfo = &swag.Spec{
|
||||||
Version: "0.1.0",
|
Version: "0.2.0",
|
||||||
Host: "clustercockpit.localhost:8082",
|
Host: "localhost:8080",
|
||||||
BasePath: "/api",
|
BasePath: "/api",
|
||||||
Schemes: []string{},
|
Schemes: []string{},
|
||||||
Title: "ClusterCockpit REST API",
|
Title: "ClusterCockpit REST API",
|
||||||
Description: "Defines a tag using name and type.",
|
Description: "API for batch job control.",
|
||||||
InfoInstanceName: "swagger",
|
InfoInstanceName: "swagger",
|
||||||
SwaggerTemplate: docTemplate,
|
SwaggerTemplate: docTemplate,
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
@ -23,7 +22,6 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
@ -32,9 +30,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// @title ClusterCockpit REST API
|
// @title ClusterCockpit REST API
|
||||||
// @version 0.1.0
|
// @version 0.2.0
|
||||||
// @description API for batch job control.
|
// @description API for batch job control.
|
||||||
// @termsOfService https://monitoring.nhr.fau.de/imprint
|
|
||||||
|
// @tag.name Job API
|
||||||
|
|
||||||
// @contact.name ClusterCockpit Project
|
// @contact.name ClusterCockpit Project
|
||||||
// @contact.url https://github.com/ClusterCockpit
|
// @contact.url https://github.com/ClusterCockpit
|
||||||
@ -43,20 +42,19 @@ import (
|
|||||||
// @license.name MIT License
|
// @license.name MIT License
|
||||||
// @license.url https://opensource.org/licenses/MIT
|
// @license.url https://opensource.org/licenses/MIT
|
||||||
|
|
||||||
// @host clustercockpit.localhost:8082
|
// @host localhost:8080
|
||||||
// @BasePath /api
|
// @basePath /api
|
||||||
|
|
||||||
// @securityDefinitions.apikey ApiKeyAuth
|
// @securityDefinitions.apikey ApiKeyAuth
|
||||||
// @in header
|
// @in header
|
||||||
// @name X-Auth-Token
|
// @name X-Auth-Token
|
||||||
// @description JWT based authentification for general API endpoint use.
|
|
||||||
|
|
||||||
type RestApi struct {
|
type RestApi struct {
|
||||||
JobRepository *repository.JobRepository
|
JobRepository *repository.JobRepository
|
||||||
Resolver *graph.Resolver
|
Resolver *graph.Resolver
|
||||||
Authentication *auth.Authentication
|
Authentication *auth.Authentication
|
||||||
MachineStateDir string
|
MachineStateDir string
|
||||||
OngoingArchivings sync.WaitGroup
|
RepositoryMutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *RestApi) MountRoutes(r *mux.Router) {
|
func (api *RestApi) MountRoutes(r *mux.Router) {
|
||||||
@ -72,9 +70,13 @@ func (api *RestApi) MountRoutes(r *mux.Router) {
|
|||||||
// r.HandleFunc("/jobs/{id}", api.getJob).Methods(http.MethodGet)
|
// r.HandleFunc("/jobs/{id}", api.getJob).Methods(http.MethodGet)
|
||||||
r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch)
|
r.HandleFunc("/jobs/tag_job/{id}", api.tagJob).Methods(http.MethodPost, http.MethodPatch)
|
||||||
r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet)
|
r.HandleFunc("/jobs/metrics/{id}", api.getJobMetrics).Methods(http.MethodGet)
|
||||||
|
r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete)
|
||||||
|
r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete)
|
||||||
|
r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete)
|
||||||
|
|
||||||
if api.Authentication != nil {
|
if api.Authentication != nil {
|
||||||
r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet)
|
r.HandleFunc("/jwt/", api.getJWT).Methods(http.MethodGet)
|
||||||
|
r.HandleFunc("/roles/", api.getRoles).Methods(http.MethodGet)
|
||||||
r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut)
|
r.HandleFunc("/users/", api.createUser).Methods(http.MethodPost, http.MethodPut)
|
||||||
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
|
r.HandleFunc("/users/", api.getUsers).Methods(http.MethodGet)
|
||||||
r.HandleFunc("/users/", api.deleteUser).Methods(http.MethodDelete)
|
r.HandleFunc("/users/", api.deleteUser).Methods(http.MethodDelete)
|
||||||
@ -89,44 +91,58 @@ func (api *RestApi) MountRoutes(r *mux.Router) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartJobApiResponse model
|
// StartJobApiResponse model
|
||||||
// @Description Successful job start response with database id of new job.
|
|
||||||
type StartJobApiResponse struct {
|
type StartJobApiResponse struct {
|
||||||
// Database ID of new job
|
// Database ID of new job
|
||||||
DBID int64 `json:"id"`
|
DBID int64 `json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteJobApiResponse model
|
||||||
|
type DeleteJobApiResponse struct {
|
||||||
|
Message string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
// StopJobApiRequest model
|
// StopJobApiRequest model
|
||||||
// @Description Request to stop running job using stoptime and final state.
|
|
||||||
// @Description They are only required if no database id was provided with endpoint.
|
|
||||||
type StopJobApiRequest struct {
|
type StopJobApiRequest struct {
|
||||||
// Stop Time of job as epoch
|
// Stop Time of job as epoch
|
||||||
StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"`
|
StopTime int64 `json:"stopTime" validate:"required" example:"1649763839"`
|
||||||
State schema.JobState `json:"jobState" validate:"required" example:"completed" enums:"completed,failed,cancelled,stopped,timeout"` // Final job state
|
State schema.JobState `json:"jobState" validate:"required" example:"completed"` // Final job state
|
||||||
JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job
|
JobId *int64 `json:"jobId" example:"123000"` // Cluster Job ID of job
|
||||||
Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
|
Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
|
||||||
StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
|
StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteJobApiRequest model
|
||||||
|
type DeleteJobApiRequest struct {
|
||||||
|
JobId *int64 `json:"jobId" validate:"required" example:"123000"` // Cluster Job ID of job
|
||||||
|
Cluster *string `json:"cluster" example:"fritz"` // Cluster of job
|
||||||
|
StartTime *int64 `json:"startTime" example:"1649723812"` // Start Time of job as epoch
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetJobsApiResponse model
|
||||||
|
type GetJobsApiResponse struct {
|
||||||
|
Jobs []*schema.JobMeta `json:"jobs"` // Array of jobs
|
||||||
|
Items int `json:"items"` // Number of jobs returned
|
||||||
|
Page int `json:"page"` // Page id returned
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorResponse model
|
// ErrorResponse model
|
||||||
// @Description Error message as returned from backend.
|
|
||||||
type ErrorResponse struct {
|
type ErrorResponse struct {
|
||||||
// Statustext of Errorcode
|
// Statustext of Errorcode
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Error string `json:"error"` // Error Message
|
Error string `json:"error"` // Error Message
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tag model
|
// ApiTag model
|
||||||
// @Description Defines a tag using name and type.
|
type ApiTag struct {
|
||||||
type Tag struct {
|
|
||||||
// Tag Type
|
// Tag Type
|
||||||
Type string `json:"type" example:"Debug"`
|
Type string `json:"type" example:"Debug"`
|
||||||
Name string `json:"name" example:"Testjob"` // Tag Name
|
Name string `json:"name" example:"Testjob"` // Tag Name
|
||||||
}
|
}
|
||||||
|
|
||||||
type TagJobApiRequest []*Tag
|
type TagJobApiRequest []*ApiTag
|
||||||
|
|
||||||
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
func handleError(err error, statusCode int, rw http.ResponseWriter) {
|
||||||
log.Warnf("REST API: %s", err.Error())
|
log.Warnf("REST ERROR : %s", err.Error())
|
||||||
rw.Header().Add("Content-Type", "application/json")
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
rw.WriteHeader(statusCode)
|
rw.WriteHeader(statusCode)
|
||||||
json.NewEncoder(rw).Encode(ErrorResponse{
|
json.NewEncoder(rw).Encode(ErrorResponse{
|
||||||
@ -142,35 +158,43 @@ func decode(r io.Reader, val interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getJobs godoc
|
// getJobs godoc
|
||||||
// @Summary Lists all jobs
|
// @summary Lists all jobs
|
||||||
// @Description Get a list of all jobs. Filters can be applied using query parameters.
|
// @tags query
|
||||||
// @Description Number of results can be limited by page. Results are sorted by descending startTime.
|
// @description Get a list of all jobs. Filters can be applied using query parameters.
|
||||||
// @Accept json
|
// @description Number of results can be limited by page. Results are sorted by descending startTime.
|
||||||
// @Produce json
|
// @produce json
|
||||||
// @Param state query string false "Job State" Enums(running, completed, failed, cancelled, stopped, timeout)
|
// @param state query string false "Job State" Enums(running, completed, failed, cancelled, stopped, timeout)
|
||||||
// @Param cluster query string false "Job Cluster"
|
// @param cluster query string false "Job Cluster"
|
||||||
// @Param start-time query string false "Syntax: '$from-$to', as unix epoch timestamps in seconds"
|
// @param start-time query string false "Syntax: '$from-$to', as unix epoch timestamps in seconds"
|
||||||
// @Param items-per-page query int false "Items per page (If empty: No Limit)"
|
// @param items-per-page query int false "Items per page (Default: 25)"
|
||||||
// @Param page query int false "Page Number (If empty: No Paging)"
|
// @param page query int false "Page Number (Default: 1)"
|
||||||
// @Param with-metadata query bool false "Include metadata (e.g. jobScript) in response"
|
// @param with-metadata query bool false "Include metadata (e.g. jobScript) in response"
|
||||||
// @Success 200 {array} schema.Job "Array of matching jobs"
|
// @success 200 {object} api.GetJobsApiResponse "Job array and page info"
|
||||||
// @Failure 400 {object} api.ErrorResponse "Bad Request"
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
// @Failure 401 {object} api.ErrorResponse "Unauthorized"
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
// @Failure 500 {object} api.ErrorResponse "Internal Server Error"
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
// @Security ApiKeyAuth
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
// @Router /jobs/ [get]
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/ [get]
|
||||||
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
withMetadata := false
|
withMetadata := false
|
||||||
filter := &model.JobFilter{}
|
filter := &model.JobFilter{}
|
||||||
page := &model.PageRequest{ItemsPerPage: -1, Page: 1}
|
page := &model.PageRequest{ItemsPerPage: 25, Page: 1}
|
||||||
order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc}
|
order := &model.OrderByInput{Field: "startTime", Order: model.SortDirectionEnumDesc}
|
||||||
|
|
||||||
for key, vals := range r.URL.Query() {
|
for key, vals := range r.URL.Query() {
|
||||||
switch key {
|
switch key {
|
||||||
case "state":
|
case "state":
|
||||||
for _, s := range vals {
|
for _, s := range vals {
|
||||||
state := schema.JobState(s)
|
state := schema.JobState(s)
|
||||||
if !state.Valid() {
|
if !state.Valid() {
|
||||||
http.Error(rw, "invalid query parameter value: state", http.StatusBadRequest)
|
handleError(fmt.Errorf("invalid query parameter value: state"),
|
||||||
|
http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
filter.State = append(filter.State, state)
|
filter.State = append(filter.State, state)
|
||||||
@ -180,17 +204,18 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
case "start-time":
|
case "start-time":
|
||||||
st := strings.Split(vals[0], "-")
|
st := strings.Split(vals[0], "-")
|
||||||
if len(st) != 2 {
|
if len(st) != 2 {
|
||||||
http.Error(rw, "invalid query parameter value: startTime", http.StatusBadRequest)
|
handleError(fmt.Errorf("invalid query parameter value: startTime"),
|
||||||
|
http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
from, err := strconv.ParseInt(st[0], 10, 64)
|
from, err := strconv.ParseInt(st[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
to, err := strconv.ParseInt(st[1], 10, 64)
|
to, err := strconv.ParseInt(st[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ufrom, uto := time.Unix(from, 0), time.Unix(to, 0)
|
ufrom, uto := time.Unix(from, 0), time.Unix(to, 0)
|
||||||
@ -198,28 +223,29 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
case "page":
|
case "page":
|
||||||
x, err := strconv.Atoi(vals[0])
|
x, err := strconv.Atoi(vals[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
page.Page = x
|
page.Page = x
|
||||||
case "items-per-page":
|
case "items-per-page":
|
||||||
x, err := strconv.Atoi(vals[0])
|
x, err := strconv.Atoi(vals[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
handleError(err, http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
page.ItemsPerPage = x
|
page.ItemsPerPage = x
|
||||||
case "with-metadata":
|
case "with-metadata":
|
||||||
withMetadata = true
|
withMetadata = true
|
||||||
default:
|
default:
|
||||||
http.Error(rw, "invalid query parameter: "+key, http.StatusBadRequest)
|
handleError(fmt.Errorf("invalid query parameter: %s", key),
|
||||||
|
http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jobs, err := api.JobRepository.QueryJobs(r.Context(), []*model.JobFilter{filter}, page, order)
|
jobs, err := api.JobRepository.QueryJobs(r.Context(), []*model.JobFilter{filter}, page, order)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +253,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
for _, job := range jobs {
|
for _, job := range jobs {
|
||||||
if withMetadata {
|
if withMetadata {
|
||||||
if _, err := api.JobRepository.FetchMetadata(job); err != nil {
|
if _, err := api.JobRepository.FetchMetadata(job); err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -240,7 +266,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
res.Tags, err = api.JobRepository.GetTags(&job.ID)
|
res.Tags, err = api.JobRepository.GetTags(&job.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +274,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
res.Statistics, err = archive.GetStatistics(job)
|
res.Statistics, err = archive.GetStatistics(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -258,32 +284,44 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("/api/jobs: %d jobs returned", len(results))
|
log.Debugf("/api/jobs: %d jobs returned", len(results))
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
bw := bufio.NewWriter(rw)
|
bw := bufio.NewWriter(rw)
|
||||||
defer bw.Flush()
|
defer bw.Flush()
|
||||||
if err := json.NewEncoder(bw).Encode(map[string]interface{}{
|
|
||||||
"jobs": results,
|
payload := GetJobsApiResponse{
|
||||||
}); err != nil {
|
Jobs: results,
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
Items: page.ItemsPerPage,
|
||||||
|
Page: page.Page,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewEncoder(bw).Encode(payload); err != nil {
|
||||||
|
handleError(err, http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// tagJob godoc
|
// tagJob godoc
|
||||||
// @Summary Adds one or more tags to a job
|
// @summary Adds one or more tags to a job
|
||||||
// @Description Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.
|
// @tags add and modify
|
||||||
// @Description If tagged job is already finished: Tag will be written directly to respective archive files.
|
// @description Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.
|
||||||
// @Accept json
|
// @description If tagged job is already finished: Tag will be written directly to respective archive files.
|
||||||
// @Produce json
|
// @accept json
|
||||||
// @Param id path int true "Job Database ID"
|
// @produce json
|
||||||
// @Param request body api.TagJobApiRequest true "Array of tag-objects to add"
|
// @param id path int true "Job Database ID"
|
||||||
// @Success 200 {object} schema.Job "Updated job resource"
|
// @param request body api.TagJobApiRequest true "Array of tag-objects to add"
|
||||||
// @Failure 400 {object} api.ErrorResponse "Bad Request"
|
// @success 200 {object} schema.Job "Updated job resource"
|
||||||
// @Failure 401 {object} api.ErrorResponse "Unauthorized"
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
// @Failure 404 {object} api.ErrorResponse "Job or tag does not exist"
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
// @Failure 500 {object} api.ErrorResponse "Internal Server Error"
|
// @failure 404 {object} api.ErrorResponse "Job or tag does not exist"
|
||||||
// @Security ApiKeyAuth
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
// @Router /jobs/tag_job/{id} [post]
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/tag_job/{id} [post]
|
||||||
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
iid, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
|
iid, err := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||||
@ -328,23 +366,24 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startJob godoc
|
// startJob godoc
|
||||||
// @Summary Adds a new job as "running"
|
// @summary Adds a new job as "running"
|
||||||
// @Description Job specified in request body will be saved to database as "running" with new DB ID.
|
// @tags add and modify
|
||||||
// @Description Job specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.
|
// @description Job specified in request body will be saved to database as "running" with new DB ID.
|
||||||
// @Accept json
|
// @description Job specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.
|
||||||
// @Produce json
|
// @accept json
|
||||||
// @Param request body schema.JobMeta true "Job to add"
|
// @produce json
|
||||||
// @Success 201 {object} api.StartJobApiResponse "Job added successfully"
|
// @param request body schema.JobMeta true "Job to add"
|
||||||
// @Failure 400 {object} api.ErrorResponse "Bad Request"
|
// @success 201 {object} api.StartJobApiResponse "Job added successfully"
|
||||||
// @Failure 401 {object} api.ErrorResponse "Unauthorized"
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
// @Failure 403 {object} api.ErrorResponse "Forbidden"
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
// @Failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist"
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
// @Failure 500 {object} api.ErrorResponse "Internal Server Error"
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: The combination of jobId, clusterId and startTime does already exist"
|
||||||
// @Security ApiKeyAuth
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
// @Router /jobs/start_job/ [post]
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/start_job/ [post]
|
||||||
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,15 +401,22 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// aquire lock to avoid race condition between API calls
|
||||||
|
var unlockOnce sync.Once
|
||||||
|
api.RepositoryMutex.Lock()
|
||||||
|
defer unlockOnce.Do(api.RepositoryMutex.Unlock)
|
||||||
|
|
||||||
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
// Check if combination of (job_id, cluster_id, start_time) already exists:
|
||||||
job, err := api.JobRepository.Find(&req.JobID, &req.Cluster, nil)
|
jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
|
handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
if (req.StartTime - job.StartTimeUnix) < 86400 {
|
for _, job := range jobs {
|
||||||
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
|
if (req.StartTime - job.StartTimeUnix) < 86400 {
|
||||||
return
|
handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,6 +425,8 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
|
handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// unlock here, adding Tags can be async
|
||||||
|
unlockOnce.Do(api.RepositoryMutex.Unlock)
|
||||||
|
|
||||||
for _, tag := range req.Tags {
|
for _, tag := range req.Tags {
|
||||||
if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
||||||
@ -397,25 +445,26 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stopJobById godoc
|
// stopJobById godoc
|
||||||
// @Summary Marks job as completed and triggers archiving
|
// @summary Marks job as completed and triggers archiving
|
||||||
// @Description Job to stop is specified by database ID. Only stopTime and final state are required in request body.
|
// @tags add and modify
|
||||||
// @Description Returns full job resource information according to 'JobMeta' scheme.
|
// @description Job to stop is specified by database ID. Only stopTime and final state are required in request body.
|
||||||
// @Accept json
|
// @description Returns full job resource information according to 'JobMeta' scheme.
|
||||||
// @Produce json
|
// @accept json
|
||||||
// @Param id path int true "Database ID of Job"
|
// @produce json
|
||||||
// @Param request body api.StopJobApiRequest true "stopTime and final state in request body"
|
// @param id path int true "Database ID of Job"
|
||||||
// @Success 200 {object} schema.JobMeta "Job resource"
|
// @param request body api.StopJobApiRequest true "stopTime and final state in request body"
|
||||||
// @Failure 400 {object} api.ErrorResponse "Bad Request"
|
// @success 200 {object} schema.JobMeta "Job resource"
|
||||||
// @Failure 401 {object} api.ErrorResponse "Unauthorized"
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
// @Failure 403 {object} api.ErrorResponse "Forbidden"
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
// @Failure 404 {object} api.ErrorResponse "Resource not found"
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
// @Failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
// @failure 404 {object} api.ErrorResponse "Resource not found"
|
||||||
// @Failure 500 {object} api.ErrorResponse "Internal Server Error"
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
||||||
// @Security ApiKeyAuth
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
// @Router /jobs/stop_job/{id} [post]
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/stop_job/{id} [post]
|
||||||
func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -451,23 +500,24 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stopJobByRequest godoc
|
// stopJobByRequest godoc
|
||||||
// @Summary Marks job as completed and triggers archiving
|
// @summary Marks job as completed and triggers archiving
|
||||||
// @Description Job to stop is specified by request body. All fields are required in this case.
|
// @tags add and modify
|
||||||
// @Description Returns full job resource information according to 'JobMeta' scheme.
|
// @description Job to stop is specified by request body. All fields are required in this case.
|
||||||
// @Produce json
|
// @description Returns full job resource information according to 'JobMeta' scheme.
|
||||||
// @Param request body api.StopJobApiRequest true "All fields required"
|
// @produce json
|
||||||
// @Success 200 {object} schema.JobMeta "Job resource"
|
// @param request body api.StopJobApiRequest true "All fields required"
|
||||||
// @Failure 400 {object} api.ErrorResponse "Bad Request"
|
// @success 200 {object} schema.JobMeta "Success message"
|
||||||
// @Failure 401 {object} api.ErrorResponse "Unauthorized"
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
// @Failure 403 {object} api.ErrorResponse "Forbidden"
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
// @Failure 404 {object} api.ErrorResponse "Resource not found"
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
// @Failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
// @failure 404 {object} api.ErrorResponse "Resource not found"
|
||||||
// @Failure 500 {object} api.ErrorResponse "Internal Server Error"
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
||||||
// @Security ApiKeyAuth
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
// @Router /jobs/stop_job/ [post]
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/stop_job/ [post]
|
||||||
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -496,6 +546,159 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
|||||||
api.checkAndHandleStopJob(rw, job, req)
|
api.checkAndHandleStopJob(rw, job, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteJobById godoc
|
||||||
|
// @summary Remove a job from the sql database
|
||||||
|
// @tags remove
|
||||||
|
// @description Job to remove is specified by database ID. This will not remove the job from the job archive.
|
||||||
|
// @produce json
|
||||||
|
// @param id path int true "Database ID of Job"
|
||||||
|
// @success 200 {object} api.DeleteJobApiResponse "Success message"
|
||||||
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
|
// @failure 404 {object} api.ErrorResponse "Resource not found"
|
||||||
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
||||||
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/delete_job/{id} [delete]
|
||||||
|
func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch job (that will be stopped) from db
|
||||||
|
id, ok := mux.Vars(r)["id"]
|
||||||
|
var err error
|
||||||
|
if ok {
|
||||||
|
id, e := strconv.ParseInt(id, 10, 64)
|
||||||
|
if e != nil {
|
||||||
|
handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = api.JobRepository.DeleteJobById(id)
|
||||||
|
} else {
|
||||||
|
handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
rw.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
||||||
|
Message: fmt.Sprintf("Successfully deleted job %s", id),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteJobByRequest godoc
|
||||||
|
// @summary Remove a job from the sql database
|
||||||
|
// @tags remove
|
||||||
|
// @description Job to delete is specified by request body. All fields are required in this case.
|
||||||
|
// @accept json
|
||||||
|
// @produce json
|
||||||
|
// @param request body api.DeleteJobApiRequest true "All fields required"
|
||||||
|
// @success 200 {object} api.DeleteJobApiResponse "Success message"
|
||||||
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
|
// @failure 404 {object} api.ErrorResponse "Resource not found"
|
||||||
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
||||||
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/delete_job/ [delete]
|
||||||
|
func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse request body
|
||||||
|
req := DeleteJobApiRequest{}
|
||||||
|
if err := decode(r.Body, &req); err != nil {
|
||||||
|
handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch job (that will be deleted) from db
|
||||||
|
var job *schema.Job
|
||||||
|
var err error
|
||||||
|
if req.JobId == nil {
|
||||||
|
handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = api.JobRepository.DeleteJobById(job.ID)
|
||||||
|
if err != nil {
|
||||||
|
handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
rw.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
||||||
|
Message: fmt.Sprintf("Successfully deleted job %d", job.ID),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteJobBefore godoc
|
||||||
|
// @summary Remove a job from the sql database
|
||||||
|
// @tags remove
|
||||||
|
// @description Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.
|
||||||
|
// @produce json
|
||||||
|
// @param ts path int true "Unix epoch timestamp"
|
||||||
|
// @success 200 {object} api.DeleteJobApiResponse "Success message"
|
||||||
|
// @failure 400 {object} api.ErrorResponse "Bad Request"
|
||||||
|
// @failure 401 {object} api.ErrorResponse "Unauthorized"
|
||||||
|
// @failure 403 {object} api.ErrorResponse "Forbidden"
|
||||||
|
// @failure 404 {object} api.ErrorResponse "Resource not found"
|
||||||
|
// @failure 422 {object} api.ErrorResponse "Unprocessable Entity: finding job failed: sql: no rows in result set"
|
||||||
|
// @failure 500 {object} api.ErrorResponse "Internal Server Error"
|
||||||
|
// @security ApiKeyAuth
|
||||||
|
// @router /jobs/delete_job_before/{ts} [delete]
|
||||||
|
func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
||||||
|
handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var cnt int
|
||||||
|
// Fetch job (that will be stopped) from db
|
||||||
|
id, ok := mux.Vars(r)["ts"]
|
||||||
|
var err error
|
||||||
|
if ok {
|
||||||
|
ts, e := strconv.ParseInt(id, 10, 64)
|
||||||
|
if e != nil {
|
||||||
|
handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cnt, err = api.JobRepository.DeleteJobsBefore(ts)
|
||||||
|
} else {
|
||||||
|
handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rw.Header().Add("Content-Type", "application/json")
|
||||||
|
rw.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(rw).Encode(DeleteJobApiResponse{
|
||||||
|
Message: fmt.Sprintf("Successfully deleted %d jobs", cnt),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) {
|
func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Job, req StopJobApiRequest) {
|
||||||
|
|
||||||
// Sanity checks
|
// Sanity checks
|
||||||
@ -507,7 +710,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
|
|||||||
if req.State != "" && !req.State.Valid() {
|
if req.State != "" && !req.State.Valid() {
|
||||||
handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw)
|
handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw)
|
||||||
return
|
return
|
||||||
} else {
|
} else if req.State == "" {
|
||||||
req.State = schema.JobStateCompleted
|
req.State = schema.JobStateCompleted
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,59 +736,10 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to start a new goroutine as this functions needs to return
|
// Trigger async archiving
|
||||||
// for the response to be flushed to the client.
|
api.JobRepository.TriggerArchiving(job)
|
||||||
api.OngoingArchivings.Add(1) // So that a shutdown does not interrupt this goroutine.
|
|
||||||
go func() {
|
|
||||||
defer api.OngoingArchivings.Done()
|
|
||||||
|
|
||||||
if _, err := api.JobRepository.FetchMetadata(job); err != nil {
|
|
||||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
|
||||||
api.JobRepository.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricdata.ArchiveJob will fetch all the data from a MetricDataRepository and create meta.json/data.json files
|
|
||||||
jobMeta, err := metricdata.ArchiveJob(job, context.Background())
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
|
||||||
api.JobRepository.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the jobs database entry one last time:
|
|
||||||
if err := api.JobRepository.Archive(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
|
|
||||||
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("archiving job (dbid: %d) successful", job.ID)
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) {
|
|
||||||
// if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
|
|
||||||
// handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// var body struct {
|
|
||||||
// Meta *schema.JobMeta `json:"meta"`
|
|
||||||
// Data *schema.JobData `json:"data"`
|
|
||||||
// }
|
|
||||||
// if err := decode(r.Body, &body); err != nil {
|
|
||||||
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusBadRequest, rw)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if err := api.JobRepository.ImportJob(body.Meta, body.Data); err != nil {
|
|
||||||
// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// rw.Write([]byte(`{ "status": "OK" }`))
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||||
id := mux.Vars(r)["id"]
|
id := mux.Vars(r)["id"]
|
||||||
metrics := r.URL.Query()["metric"]
|
metrics := r.URL.Query()["metric"]
|
||||||
@ -634,7 +788,8 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
|
|||||||
me := auth.GetUser(r.Context())
|
me := auth.GetUser(r.Context())
|
||||||
if !me.HasRole(auth.RoleAdmin) {
|
if !me.HasRole(auth.RoleAdmin) {
|
||||||
if username != me.Username {
|
if username != me.Username {
|
||||||
http.Error(rw, "only admins are allowed to sign JWTs not for themselves", http.StatusForbidden)
|
http.Error(rw, "Only admins are allowed to sign JWTs not for themselves",
|
||||||
|
http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -659,13 +814,21 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
|
|||||||
rw.Header().Set("Content-Type", "text/plain")
|
rw.Header().Set("Content-Type", "text/plain")
|
||||||
me := auth.GetUser(r.Context())
|
me := auth.GetUser(r.Context())
|
||||||
if !me.HasRole(auth.RoleAdmin) {
|
if !me.HasRole(auth.RoleAdmin) {
|
||||||
http.Error(rw, "only admins are allowed to create new users", http.StatusForbidden)
|
http.Error(rw, "Only admins are allowed to create new users", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email")
|
username, password, role, name, email, project := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email"), r.FormValue("project")
|
||||||
if len(password) == 0 && role != auth.RoleApi {
|
if len(password) == 0 && role != auth.GetRoleString(auth.RoleApi) {
|
||||||
http.Error(rw, "only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
|
http.Error(rw, "Only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(project) != 0 && role != auth.GetRoleString(auth.RoleManager) {
|
||||||
|
http.Error(rw, "only managers require a project (can be changed later)", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
} else if len(project) == 0 && role == auth.GetRoleString(auth.RoleManager) {
|
||||||
|
http.Error(rw, "managers require a project to manage (can be changed later)", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -674,17 +837,18 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
|
|||||||
Name: name,
|
Name: name,
|
||||||
Password: password,
|
Password: password,
|
||||||
Email: email,
|
Email: email,
|
||||||
|
Projects: []string{project},
|
||||||
Roles: []string{role}}); err != nil {
|
Roles: []string{role}}); err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rw.Write([]byte(fmt.Sprintf("User %#v successfully created!\n", username)))
|
rw.Write([]byte(fmt.Sprintf("User %v successfully created!\n", username)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
||||||
http.Error(rw, "only admins are allowed to delete a user", http.StatusForbidden)
|
http.Error(rw, "Only admins are allowed to delete a user", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -699,7 +863,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
||||||
http.Error(rw, "only admins are allowed to fetch a list of users", http.StatusForbidden)
|
http.Error(rw, "Only admins are allowed to fetch a list of users", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -712,15 +876,33 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
|
|||||||
json.NewEncoder(rw).Encode(users)
|
json.NewEncoder(rw).Encode(users)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
user := auth.GetUser(r.Context())
|
||||||
|
if !user.HasRole(auth.RoleAdmin) {
|
||||||
|
http.Error(rw, "only admins are allowed to fetch a list of roles", http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
roles, err := auth.GetValidRoles(user)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
json.NewEncoder(rw).Encode(roles)
|
||||||
|
}
|
||||||
|
|
||||||
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
|
||||||
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) {
|
||||||
http.Error(rw, "only admins are allowed to update a user", http.StatusForbidden)
|
http.Error(rw, "Only admins are allowed to update a user", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get Values
|
// Get Values
|
||||||
newrole := r.FormValue("add-role")
|
newrole := r.FormValue("add-role")
|
||||||
delrole := r.FormValue("remove-role")
|
delrole := r.FormValue("remove-role")
|
||||||
|
newproj := r.FormValue("add-project")
|
||||||
|
delproj := r.FormValue("remove-project")
|
||||||
|
|
||||||
// TODO: Handle anything but roles...
|
// TODO: Handle anything but roles...
|
||||||
if newrole != "" {
|
if newrole != "" {
|
||||||
@ -735,8 +917,20 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
rw.Write([]byte("Remove Role Success"))
|
rw.Write([]byte("Remove Role Success"))
|
||||||
|
} else if newproj != "" {
|
||||||
|
if err := api.Authentication.AddProject(r.Context(), mux.Vars(r)["id"], newproj); err != nil {
|
||||||
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rw.Write([]byte("Add Project Success"))
|
||||||
|
} else if delproj != "" {
|
||||||
|
if err := api.Authentication.RemoveProject(r.Context(), mux.Vars(r)["id"], delproj); err != nil {
|
||||||
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rw.Write([]byte("Remove Project Success"))
|
||||||
} else {
|
} else {
|
||||||
http.Error(rw, "Not Add or Del?", http.StatusInternalServerError)
|
http.Error(rw, "Not Add or Del [role|project]?", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -744,7 +938,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
|
|||||||
rw.Header().Set("Content-Type", "text/plain")
|
rw.Header().Set("Content-Type", "text/plain")
|
||||||
key, value := r.FormValue("key"), r.FormValue("value")
|
key, value := r.FormValue("key"), r.FormValue("value")
|
||||||
|
|
||||||
fmt.Printf("KEY: %#v\nVALUE: %#v\n", key, value)
|
fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value)
|
||||||
|
|
||||||
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil {
|
if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil {
|
||||||
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
|
||||||
@ -756,7 +950,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request)
|
|||||||
|
|
||||||
func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
|
||||||
if api.MachineStateDir == "" {
|
if api.MachineStateDir == "" {
|
||||||
http.Error(rw, "not enabled", http.StatusNotFound)
|
http.Error(rw, "REST > machine state not enabled", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -787,7 +981,7 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) {
|
func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) {
|
||||||
if api.MachineStateDir == "" {
|
if api.MachineStateDir == "" {
|
||||||
http.Error(rw, "not enabled", http.StatusNotFound)
|
http.Error(rw, "REST > machine state not enabled", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,8 +9,10 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
@ -18,32 +20,186 @@ import (
|
|||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type AuthSource int
|
||||||
RoleAdmin string = "admin"
|
|
||||||
RoleSupport string = "support"
|
|
||||||
RoleApi string = "api"
|
|
||||||
RoleUser string = "user"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AuthViaLocalPassword int8 = 0
|
AuthViaLocalPassword AuthSource = iota
|
||||||
AuthViaLDAP int8 = 1
|
AuthViaLDAP
|
||||||
AuthViaToken int8 = 2
|
AuthViaToken
|
||||||
)
|
)
|
||||||
|
|
||||||
type User struct {
|
type User struct {
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"-"`
|
Password string `json:"-"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Roles []string `json:"roles"`
|
Roles []string `json:"roles"`
|
||||||
AuthSource int8 `json:"via"`
|
AuthSource AuthSource `json:"via"`
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
|
Projects []string `json:"projects"`
|
||||||
Expiration time.Time
|
Expiration time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *User) HasRole(role string) bool {
|
type Role int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RoleAnonymous Role = iota
|
||||||
|
RoleApi
|
||||||
|
RoleUser
|
||||||
|
RoleManager
|
||||||
|
RoleSupport
|
||||||
|
RoleAdmin
|
||||||
|
RoleError
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetRoleString(roleInt Role) string {
|
||||||
|
return [6]string{"anonymous", "api", "user", "manager", "support", "admin"}[roleInt]
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRoleEnum(roleStr string) Role {
|
||||||
|
switch strings.ToLower(roleStr) {
|
||||||
|
case "admin":
|
||||||
|
return RoleAdmin
|
||||||
|
case "support":
|
||||||
|
return RoleSupport
|
||||||
|
case "manager":
|
||||||
|
return RoleManager
|
||||||
|
case "user":
|
||||||
|
return RoleUser
|
||||||
|
case "api":
|
||||||
|
return RoleApi
|
||||||
|
case "anonymous":
|
||||||
|
return RoleAnonymous
|
||||||
|
default:
|
||||||
|
return RoleError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidRole(role string) bool {
|
||||||
|
if getRoleEnum(role) == RoleError {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) HasValidRole(role string) (hasRole bool, isValid bool) {
|
||||||
|
if isValidRole(role) {
|
||||||
|
for _, r := range u.Roles {
|
||||||
|
if r == role {
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, true
|
||||||
|
}
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) HasRole(role Role) bool {
|
||||||
for _, r := range u.Roles {
|
for _, r := range u.Roles {
|
||||||
if r == role {
|
if r == GetRoleString(role) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Role-Arrays are short: performance not impacted by nested loop
|
||||||
|
func (u *User) HasAnyRole(queryroles []Role) bool {
|
||||||
|
for _, ur := range u.Roles {
|
||||||
|
for _, qr := range queryroles {
|
||||||
|
if ur == GetRoleString(qr) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Role-Arrays are short: performance not impacted by nested loop
|
||||||
|
func (u *User) HasAllRoles(queryroles []Role) bool {
|
||||||
|
target := len(queryroles)
|
||||||
|
matches := 0
|
||||||
|
for _, ur := range u.Roles {
|
||||||
|
for _, qr := range queryroles {
|
||||||
|
if ur == GetRoleString(qr) {
|
||||||
|
matches += 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches == target {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Role-Arrays are short: performance not impacted by nested loop
|
||||||
|
func (u *User) HasNotRoles(queryroles []Role) bool {
|
||||||
|
matches := 0
|
||||||
|
for _, ur := range u.Roles {
|
||||||
|
for _, qr := range queryroles {
|
||||||
|
if ur == GetRoleString(qr) {
|
||||||
|
matches += 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches == 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by API endpoint '/roles/' from frontend: Only required for admin config -> Check Admin Role
|
||||||
|
func GetValidRoles(user *User) ([]string, error) {
|
||||||
|
var vals []string
|
||||||
|
if user.HasRole(RoleAdmin) {
|
||||||
|
for i := RoleApi; i < RoleError; i++ {
|
||||||
|
vals = append(vals, GetRoleString(i))
|
||||||
|
}
|
||||||
|
return vals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return vals, fmt.Errorf("%s: only admins are allowed to fetch a list of roles", user.Username)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by routerConfig web.page setup in backend: Only requires known user and/or not API user
|
||||||
|
func GetValidRolesMap(user *User) (map[string]Role, error) {
|
||||||
|
named := make(map[string]Role)
|
||||||
|
if user.HasNotRoles([]Role{RoleApi, RoleAnonymous}) {
|
||||||
|
for i := RoleApi; i < RoleError; i++ {
|
||||||
|
named[GetRoleString(i)] = i
|
||||||
|
}
|
||||||
|
return named, nil
|
||||||
|
}
|
||||||
|
return named, fmt.Errorf("Only known users are allowed to fetch a list of roles")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find highest role
|
||||||
|
func (u *User) GetAuthLevel() Role {
|
||||||
|
if u.HasRole(RoleAdmin) {
|
||||||
|
return RoleAdmin
|
||||||
|
} else if u.HasRole(RoleSupport) {
|
||||||
|
return RoleSupport
|
||||||
|
} else if u.HasRole(RoleManager) {
|
||||||
|
return RoleManager
|
||||||
|
} else if u.HasRole(RoleUser) {
|
||||||
|
return RoleUser
|
||||||
|
} else if u.HasRole(RoleApi) {
|
||||||
|
return RoleApi
|
||||||
|
} else if u.HasRole(RoleAnonymous) {
|
||||||
|
return RoleAnonymous
|
||||||
|
} else {
|
||||||
|
return RoleError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) HasProject(project string) bool {
|
||||||
|
for _, p := range u.Projects {
|
||||||
|
if p == project {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -85,29 +241,20 @@ func Init(db *sqlx.DB,
|
|||||||
configs map[string]interface{}) (*Authentication, error) {
|
configs map[string]interface{}) (*Authentication, error) {
|
||||||
auth := &Authentication{}
|
auth := &Authentication{}
|
||||||
auth.db = db
|
auth.db = db
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS user (
|
|
||||||
username varchar(255) PRIMARY KEY NOT NULL,
|
|
||||||
password varchar(255) DEFAULT NULL,
|
|
||||||
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
|
|
||||||
name varchar(255) DEFAULT NULL,
|
|
||||||
roles varchar(255) NOT NULL DEFAULT "[]",
|
|
||||||
email varchar(255) DEFAULT NULL);`)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sessKey := os.Getenv("SESSION_KEY")
|
sessKey := os.Getenv("SESSION_KEY")
|
||||||
if sessKey == "" {
|
if sessKey == "" {
|
||||||
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
|
log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)")
|
||||||
bytes := make([]byte, 32)
|
bytes := make([]byte, 32)
|
||||||
if _, err := rand.Read(bytes); err != nil {
|
if _, err := rand.Read(bytes); err != nil {
|
||||||
|
log.Error("Error while initializing authentication -> failed to generate random bytes for session key")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auth.sessionStore = sessions.NewCookieStore(bytes)
|
auth.sessionStore = sessions.NewCookieStore(bytes)
|
||||||
} else {
|
} else {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(sessKey)
|
bytes, err := base64.StdEncoding.DecodeString(sessKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while initializing authentication -> decoding session key failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auth.sessionStore = sessions.NewCookieStore(bytes)
|
auth.sessionStore = sessions.NewCookieStore(bytes)
|
||||||
@ -115,12 +262,14 @@ func Init(db *sqlx.DB,
|
|||||||
|
|
||||||
auth.LocalAuth = &LocalAuthenticator{}
|
auth.LocalAuth = &LocalAuthenticator{}
|
||||||
if err := auth.LocalAuth.Init(auth, nil); err != nil {
|
if err := auth.LocalAuth.Init(auth, nil); err != nil {
|
||||||
|
log.Error("Error while initializing authentication -> localAuth init failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auth.authenticators = append(auth.authenticators, auth.LocalAuth)
|
auth.authenticators = append(auth.authenticators, auth.LocalAuth)
|
||||||
|
|
||||||
auth.JwtAuth = &JWTAuthenticator{}
|
auth.JwtAuth = &JWTAuthenticator{}
|
||||||
if err := auth.JwtAuth.Init(auth, configs["jwt"]); err != nil {
|
if err := auth.JwtAuth.Init(auth, configs["jwt"]); err != nil {
|
||||||
|
log.Error("Error while initializing authentication -> jwtAuth init failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auth.authenticators = append(auth.authenticators, auth.JwtAuth)
|
auth.authenticators = append(auth.authenticators, auth.JwtAuth)
|
||||||
@ -128,6 +277,7 @@ func Init(db *sqlx.DB,
|
|||||||
if config, ok := configs["ldap"]; ok {
|
if config, ok := configs["ldap"]; ok {
|
||||||
auth.LdapAuth = &LdapAuthenticator{}
|
auth.LdapAuth = &LdapAuthenticator{}
|
||||||
if err := auth.LdapAuth.Init(auth, config); err != nil {
|
if err := auth.LdapAuth.Init(auth, config); err != nil {
|
||||||
|
log.Error("Error while initializing authentication -> ldapAuth init failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auth.authenticators = append(auth.authenticators, auth.LdapAuth)
|
auth.authenticators = append(auth.authenticators, auth.LdapAuth)
|
||||||
@ -142,6 +292,7 @@ func (auth *Authentication) AuthViaSession(
|
|||||||
|
|
||||||
session, err := auth.sessionStore.Get(r, "session")
|
session, err := auth.sessionStore.Get(r, "session")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while getting session store")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,9 +301,11 @@ func (auth *Authentication) AuthViaSession(
|
|||||||
}
|
}
|
||||||
|
|
||||||
username, _ := session.Values["username"].(string)
|
username, _ := session.Values["username"].(string)
|
||||||
|
projects, _ := session.Values["projects"].([]string)
|
||||||
roles, _ := session.Values["roles"].([]string)
|
roles, _ := session.Values["roles"].([]string)
|
||||||
return &User{
|
return &User{
|
||||||
Username: username,
|
Username: username,
|
||||||
|
Projects: projects,
|
||||||
Roles: roles,
|
Roles: roles,
|
||||||
AuthSource: -1,
|
AuthSource: -1,
|
||||||
}, nil
|
}, nil
|
||||||
@ -169,7 +322,7 @@ func (auth *Authentication) Login(
|
|||||||
user := (*User)(nil)
|
user := (*User)(nil)
|
||||||
if username != "" {
|
if username != "" {
|
||||||
if user, _ = auth.GetUser(username); err != nil {
|
if user, _ = auth.GetUser(username); err != nil {
|
||||||
// log.Warnf("login of unkown user %#v", username)
|
// log.Warnf("login of unkown user %v", username)
|
||||||
_ = err
|
_ = err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,7 +334,7 @@ func (auth *Authentication) Login(
|
|||||||
|
|
||||||
user, err = authenticator.Login(user, rw, r)
|
user, err = authenticator.Login(user, rw, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("login failed: %s", err.Error())
|
log.Warnf("user '%s' login failed: %s", user.Username, err.Error())
|
||||||
onfailure(rw, r, err)
|
onfailure(rw, r, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -197,14 +350,15 @@ func (auth *Authentication) Login(
|
|||||||
session.Options.MaxAge = int(auth.SessionMaxAge.Seconds())
|
session.Options.MaxAge = int(auth.SessionMaxAge.Seconds())
|
||||||
}
|
}
|
||||||
session.Values["username"] = user.Username
|
session.Values["username"] = user.Username
|
||||||
|
session.Values["projects"] = user.Projects
|
||||||
session.Values["roles"] = user.Roles
|
session.Values["roles"] = user.Roles
|
||||||
if err := auth.sessionStore.Save(r, rw, session); err != nil {
|
if err := auth.sessionStore.Save(r, rw, session); err != nil {
|
||||||
log.Errorf("session save failed: %s", err.Error())
|
log.Warnf("session save failed: %s", err.Error())
|
||||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("login successfull: user: %#v (roles: %v)", user.Username, user.Roles)
|
log.Infof("login successfull: user: %#v (roles: %v, projects: %v)", user.Username, user.Roles, user.Projects)
|
||||||
ctx := context.WithValue(r.Context(), ContextUserKey, user)
|
ctx := context.WithValue(r.Context(), ContextUserKey, user)
|
||||||
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
onsuccess.ServeHTTP(rw, r.WithContext(ctx))
|
||||||
return
|
return
|
||||||
|
@ -1 +1,129 @@
|
|||||||
|
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
package auth
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHasValidRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
exists, _ := u.HasValidRole("user")
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): EXISTS = %v, expected 'true'.`, exists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotValidRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
exists, _ := u.HasValidRole("manager")
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("manager"): EXISTS = %v, expected 'false'.`, exists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasInvalidRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
_, valid := u.HasValidRole("invalid")
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("invalid"): VALID = %v, expected 'false'.`, valid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotInvalidRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
_, valid := u.HasValidRole("user")
|
||||||
|
|
||||||
|
if !valid {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasValidRole("user"): VALID = %v, expected 'true'.`, valid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
exists := u.HasRole(RoleUser)
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleUser): EXISTS = %v, expected 'true'.`, exists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user"}}
|
||||||
|
|
||||||
|
exists := u.HasRole(RoleManager)
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
t.Fatalf(`User{Roles: ["user"]} -> HasRole(RoleManager): EXISTS = %v, expected 'false'.`, exists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasAnyRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager"}}
|
||||||
|
|
||||||
|
result := u.HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin})
|
||||||
|
|
||||||
|
if !result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAnyRole([]Role{RoleManager, RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotAnyRole(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager"}}
|
||||||
|
|
||||||
|
result := u.HasAnyRole([]Role{RoleSupport, RoleAdmin})
|
||||||
|
|
||||||
|
if result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'false'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasAllRoles(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager", "support"}}
|
||||||
|
|
||||||
|
result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport})
|
||||||
|
|
||||||
|
if !result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager", "support"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'true'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotAllRoles(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager"}}
|
||||||
|
|
||||||
|
result := u.HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport})
|
||||||
|
|
||||||
|
if result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager"]} -> HasAllRoles([]Role{RoleUser, RoleManager, RoleSupport}): RESULT = %v, expected 'false'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasNotRoles(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager"}}
|
||||||
|
|
||||||
|
result := u.HasNotRoles([]Role{RoleSupport, RoleAdmin})
|
||||||
|
|
||||||
|
if !result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleSupport, RoleAdmin}): RESULT = %v, expected 'true'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasAllNotRoles(t *testing.T) {
|
||||||
|
u := User{Username: "testuser", Roles: []string{"user", "manager"}}
|
||||||
|
|
||||||
|
result := u.HasNotRoles([]Role{RoleUser, RoleManager})
|
||||||
|
|
||||||
|
if result {
|
||||||
|
t.Fatalf(`User{Roles: ["user", "manager"]} -> HasNotRoles([]Role{RoleUser, RoleManager}): RESULT = %v, expected 'false'.`, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -23,8 +23,9 @@ import (
|
|||||||
type JWTAuthenticator struct {
|
type JWTAuthenticator struct {
|
||||||
auth *Authentication
|
auth *Authentication
|
||||||
|
|
||||||
publicKey ed25519.PublicKey
|
publicKey ed25519.PublicKey
|
||||||
privateKey ed25519.PrivateKey
|
privateKey ed25519.PrivateKey
|
||||||
|
publicKeyCrossLogin ed25519.PublicKey // For accepting externally generated JWTs
|
||||||
|
|
||||||
loginTokenKey []byte // HS256 key
|
loginTokenKey []byte // HS256 key
|
||||||
|
|
||||||
@ -44,11 +45,13 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
|
|||||||
} else {
|
} else {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Could not decode JWT public key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.publicKey = ed25519.PublicKey(bytes)
|
ja.publicKey = ed25519.PublicKey(bytes)
|
||||||
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
bytes, err = base64.StdEncoding.DecodeString(privKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Could not decode JWT private key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.privateKey = ed25519.PrivateKey(bytes)
|
ja.privateKey = ed25519.PrivateKey(bytes)
|
||||||
@ -57,11 +60,41 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error {
|
|||||||
if pubKey = os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
|
if pubKey = os.Getenv("CROSS_LOGIN_JWT_HS512_KEY"); pubKey != "" {
|
||||||
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
bytes, err := base64.StdEncoding.DecodeString(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Could not decode cross login JWT HS512 key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ja.loginTokenKey = bytes
|
ja.loginTokenKey = bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Look for external public keys
|
||||||
|
pubKeyCrossLogin, keyFound := os.LookupEnv("CROSS_LOGIN_JWT_PUBLIC_KEY")
|
||||||
|
if keyFound && pubKeyCrossLogin != "" {
|
||||||
|
bytes, err := base64.StdEncoding.DecodeString(pubKeyCrossLogin)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Could not decode cross login JWT public key")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ja.publicKeyCrossLogin = ed25519.PublicKey(bytes)
|
||||||
|
|
||||||
|
// Warn if other necessary settings are not configured
|
||||||
|
if ja.config != nil {
|
||||||
|
if ja.config.CookieName == "" {
|
||||||
|
log.Warn("cookieName for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
|
}
|
||||||
|
if !ja.config.ForceJWTValidationViaDatabase {
|
||||||
|
log.Warn("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!")
|
||||||
|
}
|
||||||
|
if ja.config.TrustedExternalIssuer == "" {
|
||||||
|
log.Warn("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Warn("cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ja.publicKeyCrossLogin = nil
|
||||||
|
log.Warn("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,13 +127,15 @@ func (ja *JWTAuthenticator) Login(
|
|||||||
if t.Method == jwt.SigningMethodHS256 || t.Method == jwt.SigningMethodHS512 {
|
if t.Method == jwt.SigningMethodHS256 || t.Method == jwt.SigningMethodHS512 {
|
||||||
return ja.loginTokenKey, nil
|
return ja.loginTokenKey, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
|
return nil, fmt.Errorf("AUTH/JWT > unkown signing method for login token: %s (known: HS256, HS512, EdDSA)", t.Method.Alg())
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing jwt token")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := token.Claims.Valid(); err != nil {
|
if err := token.Claims.Valid(); err != nil {
|
||||||
|
log.Warn("jwt token claims are not valid")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,17 +146,22 @@ func (ja *JWTAuthenticator) Login(
|
|||||||
if rawroles, ok := claims["roles"].([]interface{}); ok {
|
if rawroles, ok := claims["roles"].([]interface{}); ok {
|
||||||
for _, rr := range rawroles {
|
for _, rr := range rawroles {
|
||||||
if r, ok := rr.(string); ok {
|
if r, ok := rr.(string); ok {
|
||||||
roles = append(roles, r)
|
if isValidRole(r) {
|
||||||
|
roles = append(roles, r)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if rawrole, ok := claims["roles"].(string); ok {
|
if rawrole, ok := claims["roles"].(string); ok {
|
||||||
roles = append(roles, rawrole)
|
if isValidRole(rawrole) {
|
||||||
|
roles = append(roles, rawrole)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if user == nil {
|
if user == nil {
|
||||||
user, err = ja.auth.GetUser(sub)
|
user, err = ja.auth.GetUser(sub)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
log.Errorf("Error while loading user '%v'", sub)
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if user == nil {
|
} else if user == nil {
|
||||||
user = &User{
|
user = &User{
|
||||||
@ -130,6 +170,7 @@ func (ja *JWTAuthenticator) Login(
|
|||||||
AuthSource: AuthViaToken,
|
AuthSource: AuthViaToken,
|
||||||
}
|
}
|
||||||
if err := ja.auth.AddUser(user); err != nil {
|
if err := ja.auth.AddUser(user); err != nil {
|
||||||
|
log.Errorf("Error while adding user '%v' to auth from token", user.Username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -149,38 +190,122 @@ func (ja *JWTAuthenticator) Auth(
|
|||||||
rawtoken = strings.TrimPrefix(rawtoken, "Bearer ")
|
rawtoken = strings.TrimPrefix(rawtoken, "Bearer ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If no auth header was found, check for a certain cookie containing a JWT
|
||||||
|
cookieName := ""
|
||||||
|
cookieFound := false
|
||||||
|
if ja.config != nil && ja.config.CookieName != "" {
|
||||||
|
cookieName = ja.config.CookieName
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to read the JWT cookie
|
||||||
|
if rawtoken == "" && cookieName != "" {
|
||||||
|
jwtCookie, err := r.Cookie(cookieName)
|
||||||
|
|
||||||
|
if err == nil && jwtCookie.Value != "" {
|
||||||
|
rawtoken = jwtCookie.Value
|
||||||
|
cookieFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Because a user can also log in via a token, the
|
// Because a user can also log in via a token, the
|
||||||
// session cookie must be checked here as well:
|
// session cookie must be checked here as well:
|
||||||
if rawtoken == "" {
|
if rawtoken == "" {
|
||||||
return ja.auth.AuthViaSession(rw, r)
|
return ja.auth.AuthViaSession(rw, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to parse JWT
|
||||||
token, err := jwt.Parse(rawtoken, func(t *jwt.Token) (interface{}, error) {
|
token, err := jwt.Parse(rawtoken, func(t *jwt.Token) (interface{}, error) {
|
||||||
if t.Method != jwt.SigningMethodEdDSA {
|
if t.Method != jwt.SigningMethodEdDSA {
|
||||||
return nil, errors.New("only Ed25519/EdDSA supported")
|
return nil, errors.New("only Ed25519/EdDSA supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is there more than one public key?
|
||||||
|
if ja.publicKeyCrossLogin != nil && ja.config != nil && ja.config.TrustedExternalIssuer != "" {
|
||||||
|
// Determine whether to use the external public key
|
||||||
|
unvalidatedIssuer, success := t.Claims.(jwt.MapClaims)["iss"].(string)
|
||||||
|
if success && unvalidatedIssuer == ja.config.TrustedExternalIssuer {
|
||||||
|
// The (unvalidated) issuer seems to be the expected one,
|
||||||
|
// use public cross login key from config
|
||||||
|
return ja.publicKeyCrossLogin, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No cross login key configured or issuer not expected
|
||||||
|
// Try own key
|
||||||
return ja.publicKey, nil
|
return ja.publicKey, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing token")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check token validity
|
||||||
if err := token.Claims.Valid(); err != nil {
|
if err := token.Claims.Valid(); err != nil {
|
||||||
|
log.Warn("jwt token claims are not valid")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Token is valid, extract payload
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
sub, _ := claims["sub"].(string)
|
sub, _ := claims["sub"].(string)
|
||||||
|
|
||||||
var roles []string
|
var roles []string
|
||||||
if rawroles, ok := claims["roles"].([]interface{}); ok {
|
|
||||||
for _, rr := range rawroles {
|
// Validate user + roles from JWT against database?
|
||||||
if r, ok := rr.(string); ok {
|
if ja.config != nil && ja.config.ForceJWTValidationViaDatabase {
|
||||||
roles = append(roles, r)
|
user, err := ja.auth.GetUser(sub)
|
||||||
|
|
||||||
|
// Deny any logins for unknown usernames
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Could not find user from JWT in internal database.")
|
||||||
|
return nil, errors.New("unknown user")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take user roles from database instead of trusting the JWT
|
||||||
|
roles = user.Roles
|
||||||
|
} else {
|
||||||
|
// Extract roles from JWT (if present)
|
||||||
|
if rawroles, ok := claims["roles"].([]interface{}); ok {
|
||||||
|
for _, rr := range rawroles {
|
||||||
|
if r, ok := rr.(string); ok {
|
||||||
|
roles = append(roles, r)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cookieFound {
|
||||||
|
// Create a session so that we no longer need the JTW Cookie
|
||||||
|
session, err := ja.auth.sessionStore.New(r, "session")
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("session creation failed: %s", err.Error())
|
||||||
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ja.auth.SessionMaxAge != 0 {
|
||||||
|
session.Options.MaxAge = int(ja.auth.SessionMaxAge.Seconds())
|
||||||
|
}
|
||||||
|
session.Values["username"] = sub
|
||||||
|
session.Values["roles"] = roles
|
||||||
|
|
||||||
|
if err := ja.auth.sessionStore.Save(r, rw, session); err != nil {
|
||||||
|
log.Warnf("session save failed: %s", err.Error())
|
||||||
|
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// (Ask browser to) Delete JWT cookie
|
||||||
|
deletedCookie := &http.Cookie{
|
||||||
|
Name: cookieName,
|
||||||
|
Value: "",
|
||||||
|
Path: "/",
|
||||||
|
MaxAge: -1,
|
||||||
|
HttpOnly: true,
|
||||||
|
}
|
||||||
|
http.SetCookie(rw, deletedCookie)
|
||||||
|
}
|
||||||
|
|
||||||
return &User{
|
return &User{
|
||||||
Username: sub,
|
Username: sub,
|
||||||
Roles: roles,
|
Roles: roles,
|
||||||
|
@ -39,21 +39,23 @@ func (la *LdapAuthenticator) Init(
|
|||||||
if la.config != nil && la.config.SyncInterval != "" {
|
if la.config != nil && la.config.SyncInterval != "" {
|
||||||
interval, err := time.ParseDuration(la.config.SyncInterval)
|
interval, err := time.ParseDuration(la.config.SyncInterval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warnf("Could not parse duration for sync interval: %v", la.config.SyncInterval)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if interval == 0 {
|
if interval == 0 {
|
||||||
|
log.Info("Sync interval is zero")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
ticker := time.NewTicker(interval)
|
ticker := time.NewTicker(interval)
|
||||||
for t := range ticker.C {
|
for t := range ticker.C {
|
||||||
log.Printf("LDAP sync started at %s", t.Format(time.RFC3339))
|
log.Printf("sync started at %s", t.Format(time.RFC3339))
|
||||||
if err := la.Sync(); err != nil {
|
if err := la.Sync(); err != nil {
|
||||||
log.Errorf("LDAP sync failed: %s", err.Error())
|
log.Errorf("sync failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
log.Print("LDAP sync done")
|
log.Print("sync done")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -76,12 +78,14 @@ func (la *LdapAuthenticator) Login(
|
|||||||
|
|
||||||
l, err := la.getLdapConnection(false)
|
l, err := la.getLdapConnection(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while getting ldap connection")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
userDn := strings.Replace(la.config.UserBind, "{username}", user.Username, -1)
|
userDn := strings.Replace(la.config.UserBind, "{username}", user.Username, -1)
|
||||||
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
|
if err := l.Bind(userDn, r.FormValue("password")); err != nil {
|
||||||
|
log.Error("Error while binding to ldap connection")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,12 +108,14 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
users := map[string]int{}
|
users := map[string]int{}
|
||||||
rows, err := la.auth.db.Query(`SELECT username FROM user WHERE user.ldap = 1`)
|
rows, err := la.auth.db.Query(`SELECT username FROM user WHERE user.ldap = 1`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while querying LDAP users")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var username string
|
var username string
|
||||||
if err := rows.Scan(&username); err != nil {
|
if err := rows.Scan(&username); err != nil {
|
||||||
|
log.Warnf("Error while scanning for user '%s'", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,6 +124,7 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
|
|
||||||
l, err := la.getLdapConnection(true)
|
l, err := la.getLdapConnection(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("LDAP connection error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
@ -126,6 +133,7 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
la.config.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
la.config.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||||
la.config.UserFilter, []string{"dn", "uid", "gecos"}, nil))
|
la.config.UserFilter, []string{"dn", "uid", "gecos"}, nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("LDAP search error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,15 +155,17 @@ func (la *LdapAuthenticator) Sync() error {
|
|||||||
|
|
||||||
for username, where := range users {
|
for username, where := range users {
|
||||||
if where == IN_DB && la.config.SyncDelOldUsers {
|
if where == IN_DB && la.config.SyncDelOldUsers {
|
||||||
log.Debugf("ldap-sync: remove %#v (does not show up in LDAP anymore)", username)
|
log.Debugf("sync: remove %v (does not show up in LDAP anymore)", username)
|
||||||
if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil {
|
if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil {
|
||||||
|
log.Errorf("User '%s' not in LDAP anymore: Delete from DB failed", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if where == IN_LDAP {
|
} else if where == IN_LDAP {
|
||||||
name := newnames[username]
|
name := newnames[username]
|
||||||
log.Debugf("ldap-sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name)
|
log.Debugf("sync: add %v (name: %v, roles: [user], ldap: true)", username, name)
|
||||||
if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`,
|
if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`,
|
||||||
username, 1, name, "[\""+RoleUser+"\"]"); err != nil {
|
username, 1, name, "[\""+GetRoleString(RoleUser)+"\"]"); err != nil {
|
||||||
|
log.Errorf("User '%s' new in LDAP: Insert into DB failed", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -170,12 +180,14 @@ func (la *LdapAuthenticator) getLdapConnection(admin bool) (*ldap.Conn, error) {
|
|||||||
|
|
||||||
conn, err := ldap.DialURL(la.config.Url)
|
conn, err := ldap.DialURL(la.config.Url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("LDAP URL dial failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if admin {
|
if admin {
|
||||||
if err := conn.Bind(la.config.SearchDN, la.syncPassword); err != nil {
|
if err := conn.Bind(la.config.SearchDN, la.syncPassword); err != nil {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
|
log.Warn("LDAP connection bind failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func (la *LocalAuthenticator) Login(
|
|||||||
r *http.Request) (*User, error) {
|
r *http.Request) (*User, error) {
|
||||||
|
|
||||||
if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil {
|
if e := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(r.FormValue("password"))); e != nil {
|
||||||
return nil, fmt.Errorf("user '%s' provided the wrong password (%w)", user.Username, e)
|
return nil, fmt.Errorf("AUTH/LOCAL > user '%s' provided the wrong password (%w)", user.Username, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return user, nil
|
return user, nil
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
@ -21,10 +22,11 @@ import (
|
|||||||
func (auth *Authentication) GetUser(username string) (*User, error) {
|
func (auth *Authentication) GetUser(username string) (*User, error) {
|
||||||
|
|
||||||
user := &User{Username: username}
|
user := &User{Username: username}
|
||||||
var hashedPassword, name, rawRoles, email sql.NullString
|
var hashedPassword, name, rawRoles, email, rawProjects sql.NullString
|
||||||
if err := sq.Select("password", "ldap", "name", "roles", "email").From("user").
|
if err := sq.Select("password", "ldap", "name", "roles", "email", "projects").From("user").
|
||||||
Where("user.username = ?", username).RunWith(auth.db).
|
Where("user.username = ?", username).RunWith(auth.db).
|
||||||
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email); err != nil {
|
QueryRow().Scan(&hashedPassword, &user.AuthSource, &name, &rawRoles, &email, &rawProjects); err != nil {
|
||||||
|
log.Warnf("Error while querying user '%v' from database", username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,6 +35,12 @@ func (auth *Authentication) GetUser(username string) (*User, error) {
|
|||||||
user.Email = email.String
|
user.Email = email.String
|
||||||
if rawRoles.Valid {
|
if rawRoles.Valid {
|
||||||
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
|
if err := json.Unmarshal([]byte(rawRoles.String), &user.Roles); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw roles from DB")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rawProjects.Valid {
|
||||||
|
if err := json.Unmarshal([]byte(rawProjects.String), &user.Projects); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -43,9 +51,11 @@ func (auth *Authentication) GetUser(username string) (*User, error) {
|
|||||||
func (auth *Authentication) AddUser(user *User) error {
|
func (auth *Authentication) AddUser(user *User) error {
|
||||||
|
|
||||||
rolesJson, _ := json.Marshal(user.Roles)
|
rolesJson, _ := json.Marshal(user.Roles)
|
||||||
|
projectsJson, _ := json.Marshal(user.Projects)
|
||||||
|
|
||||||
|
cols := []string{"username", "roles", "projects"}
|
||||||
|
vals := []interface{}{user.Username, string(rolesJson), string(projectsJson)}
|
||||||
|
|
||||||
cols := []string{"username", "roles"}
|
|
||||||
vals := []interface{}{user.Username, string(rolesJson)}
|
|
||||||
if user.Name != "" {
|
if user.Name != "" {
|
||||||
cols = append(cols, "name")
|
cols = append(cols, "name")
|
||||||
vals = append(vals, user.Name)
|
vals = append(vals, user.Name)
|
||||||
@ -57,6 +67,7 @@ func (auth *Authentication) AddUser(user *User) error {
|
|||||||
if user.Password != "" {
|
if user.Password != "" {
|
||||||
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
|
password, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while encrypting new user password")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cols = append(cols, "password")
|
cols = append(cols, "password")
|
||||||
@ -64,28 +75,31 @@ func (auth *Authentication) AddUser(user *User) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(auth.db).Exec(); err != nil {
|
if _, err := sq.Insert("user").Columns(cols...).Values(vals...).RunWith(auth.db).Exec(); err != nil {
|
||||||
|
log.Errorf("Error while inserting new user '%v' into DB", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource)
|
log.Infof("new user %#v created (roles: %s, auth-source: %d, projects: %s)", user.Username, rolesJson, user.AuthSource, projectsJson)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *Authentication) DelUser(username string) error {
|
func (auth *Authentication) DelUser(username string) error {
|
||||||
|
|
||||||
_, err := auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username)
|
_, err := auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username)
|
||||||
|
log.Errorf("Error while deleting user '%s' from DB", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
||||||
|
|
||||||
q := sq.Select("username", "name", "email", "roles").From("user")
|
q := sq.Select("username", "name", "email", "roles", "projects").From("user")
|
||||||
if specialsOnly {
|
if specialsOnly {
|
||||||
q = q.Where("(roles != '[\"user\"]' AND roles != '[]')")
|
q = q.Where("(roles != '[\"user\"]' AND roles != '[]')")
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := q.RunWith(auth.db).Query()
|
rows, err := q.RunWith(auth.db).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while querying user list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,13 +107,20 @@ func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
|||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
rawroles := ""
|
rawroles := ""
|
||||||
|
rawprojects := ""
|
||||||
user := &User{}
|
user := &User{}
|
||||||
var name, email sql.NullString
|
var name, email sql.NullString
|
||||||
if err := rows.Scan(&user.Username, &name, &email, &rawroles); err != nil {
|
if err := rows.Scan(&user.Username, &name, &email, &rawroles, &rawprojects); err != nil {
|
||||||
|
log.Warn("Error while scanning user list")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
|
if err := json.Unmarshal([]byte(rawroles), &user.Roles); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw role list")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(rawprojects), &user.Projects); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,64 +134,137 @@ func (auth *Authentication) ListUsers(specialsOnly bool) ([]*User, error) {
|
|||||||
func (auth *Authentication) AddRole(
|
func (auth *Authentication) AddRole(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
username string,
|
username string,
|
||||||
role string) error {
|
queryrole string) error {
|
||||||
|
|
||||||
|
newRole := strings.ToLower(queryrole)
|
||||||
user, err := auth.GetUser(username)
|
user, err := auth.GetUser(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warnf("Could not load user '%s'", username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if role != RoleAdmin && role != RoleApi && role != RoleUser && role != RoleSupport {
|
exists, valid := user.HasValidRole(newRole)
|
||||||
return fmt.Errorf("invalid user role: %#v", role)
|
|
||||||
|
if !valid {
|
||||||
|
return fmt.Errorf("Supplied role is no valid option : %v", newRole)
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
return fmt.Errorf("User %v already has role %v", username, newRole)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, r := range user.Roles {
|
roles, _ := json.Marshal(append(user.Roles, newRole))
|
||||||
if r == role {
|
|
||||||
return fmt.Errorf("user %#v already has role %#v", username, role)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
roles, _ := json.Marshal(append(user.Roles, role))
|
|
||||||
if _, err := sq.Update("user").Set("roles", roles).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
if _, err := sq.Update("user").Set("roles", roles).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
||||||
|
log.Errorf("Error while adding new role for user '%s'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *Authentication) RemoveRole(ctx context.Context, username string, role string) error {
|
func (auth *Authentication) RemoveRole(ctx context.Context, username string, queryrole string) error {
|
||||||
|
oldRole := strings.ToLower(queryrole)
|
||||||
|
user, err := auth.GetUser(username)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("Could not load user '%s'", username)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, valid := user.HasValidRole(oldRole)
|
||||||
|
|
||||||
|
if !valid {
|
||||||
|
return fmt.Errorf("Supplied role is no valid option : %v", oldRole)
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("Role already deleted for user '%v': %v", username, oldRole)
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldRole == GetRoleString(RoleManager) && len(user.Projects) != 0 {
|
||||||
|
return fmt.Errorf("Cannot remove role 'manager' while user %s still has assigned project(s) : %v", username, user.Projects)
|
||||||
|
}
|
||||||
|
|
||||||
|
var newroles []string
|
||||||
|
for _, r := range user.Roles {
|
||||||
|
if r != oldRole {
|
||||||
|
newroles = append(newroles, r) // Append all roles not matching requested to be deleted role
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var mroles, _ = json.Marshal(newroles)
|
||||||
|
if _, err := sq.Update("user").Set("roles", mroles).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
||||||
|
log.Errorf("Error while removing role for user '%s'", user.Username)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *Authentication) AddProject(
|
||||||
|
ctx context.Context,
|
||||||
|
username string,
|
||||||
|
project string) error {
|
||||||
|
|
||||||
user, err := auth.GetUser(username)
|
user, err := auth.GetUser(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if role != RoleAdmin && role != RoleApi && role != RoleUser {
|
if !user.HasRole(RoleManager) {
|
||||||
return fmt.Errorf("invalid user role: %#v", role)
|
return fmt.Errorf("user '%s' is not a manager!", username)
|
||||||
|
}
|
||||||
|
|
||||||
|
if user.HasProject(project) {
|
||||||
|
return fmt.Errorf("user '%s' already manages project '%s'", username, project)
|
||||||
|
}
|
||||||
|
|
||||||
|
projects, _ := json.Marshal(append(user.Projects, project))
|
||||||
|
if _, err := sq.Update("user").Set("projects", projects).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (auth *Authentication) RemoveProject(ctx context.Context, username string, project string) error {
|
||||||
|
user, err := auth.GetUser(username)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !user.HasRole(RoleManager) {
|
||||||
|
return fmt.Errorf("user '%#v' is not a manager!", username)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !user.HasProject(project) {
|
||||||
|
return fmt.Errorf("user '%#v': Cannot remove project '%#v' - Does not match!", username, project)
|
||||||
}
|
}
|
||||||
|
|
||||||
var exists bool
|
var exists bool
|
||||||
var newroles []string
|
var newprojects []string
|
||||||
for _, r := range user.Roles {
|
for _, p := range user.Projects {
|
||||||
if r != role {
|
if p != project {
|
||||||
newroles = append(newroles, r) // Append all roles not matching requested delete role
|
newprojects = append(newprojects, p) // Append all projects not matching requested to be deleted project
|
||||||
} else {
|
} else {
|
||||||
exists = true
|
exists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (exists == true) {
|
if exists == true {
|
||||||
var mroles, _ = json.Marshal(newroles)
|
var result interface{}
|
||||||
if _, err := sq.Update("user").Set("roles", mroles).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
if len(newprojects) == 0 {
|
||||||
|
result = "[]"
|
||||||
|
} else {
|
||||||
|
result, _ = json.Marshal(newprojects)
|
||||||
|
}
|
||||||
|
if _, err := sq.Update("user").Set("projects", result).Where("user.username = ?", username).RunWith(auth.db).Exec(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("user %#v already does not have role %#v", username, role)
|
return fmt.Errorf("user %s already does not manage project %s", username, project)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func FetchUser(ctx context.Context, db *sqlx.DB, username string) (*model.User, error) {
|
func FetchUser(ctx context.Context, db *sqlx.DB, username string) (*model.User, error) {
|
||||||
me := GetUser(ctx)
|
me := GetUser(ctx)
|
||||||
if me != nil && !me.HasRole(RoleAdmin) && !me.HasRole(RoleSupport) && me.Username != username {
|
if me != nil && me.Username != username && me.HasNotRoles([]Role{RoleAdmin, RoleSupport, RoleManager}) {
|
||||||
return nil, errors.New("forbidden")
|
return nil, errors.New("forbidden")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,9 +273,13 @@ func FetchUser(ctx context.Context, db *sqlx.DB, username string) (*model.User,
|
|||||||
if err := sq.Select("name", "email").From("user").Where("user.username = ?", username).
|
if err := sq.Select("name", "email").From("user").Where("user.username = ?", username).
|
||||||
RunWith(db).QueryRow().Scan(&name, &email); err != nil {
|
RunWith(db).QueryRow().Scan(&name, &email); err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
|
/* This warning will be logged *often* for non-local users, i.e. users mentioned only in job-table or archive, */
|
||||||
|
/* since FetchUser will be called to retrieve full name and mail for every job in query/list */
|
||||||
|
// log.Warnf("User '%s' Not found in DB", username)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Warnf("Error while fetching user '%s'", username)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{
|
|||||||
LdapConfig: nil,
|
LdapConfig: nil,
|
||||||
SessionMaxAge: "168h",
|
SessionMaxAge: "168h",
|
||||||
StopJobsExceedingWalltime: 0,
|
StopJobsExceedingWalltime: 0,
|
||||||
|
ShortRunningJobsDuration: 5 * 60,
|
||||||
UiDefaults: map[string]interface{}{
|
UiDefaults: map[string]interface{}{
|
||||||
"analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"},
|
"analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"},
|
||||||
"analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}},
|
"analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}},
|
||||||
@ -34,7 +35,6 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{
|
|||||||
"plot_general_colorBackground": true,
|
"plot_general_colorBackground": true,
|
||||||
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
|
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
|
||||||
"plot_general_lineWidth": 3,
|
"plot_general_lineWidth": 3,
|
||||||
"plot_list_hideShortRunningJobs": 5 * 60,
|
|
||||||
"plot_list_jobsPerPage": 50,
|
"plot_list_jobsPerPage": 50,
|
||||||
"plot_list_selectedMetrics": []string{"cpu_load", "ipc", "mem_used", "flops_any", "mem_bw"},
|
"plot_list_selectedMetrics": []string{"cpu_load", "ipc", "mem_used", "flops_any", "mem_bw"},
|
||||||
"plot_view_plotsPerRow": 3,
|
"plot_view_plotsPerRow": 3,
|
||||||
@ -49,7 +49,7 @@ func Init(flagConfigFile string) {
|
|||||||
raw, err := os.ReadFile(flagConfigFile)
|
raw, err := os.ReadFile(flagConfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
log.Fatal(err)
|
log.Fatalf("CONFIG ERROR: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil {
|
||||||
@ -58,7 +58,7 @@ func Init(flagConfigFile string) {
|
|||||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
if err := dec.Decode(&Keys); err != nil {
|
if err := dec.Decode(&Keys); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("could not decode: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
if Keys.Clusters == nil || len(Keys.Clusters) < 1 {
|
||||||
|
@ -19,7 +19,7 @@ func TestInit(t *testing.T) {
|
|||||||
func TestInitMinimal(t *testing.T) {
|
func TestInitMinimal(t *testing.T) {
|
||||||
fp := "../../docs/config.json"
|
fp := "../../docs/config.json"
|
||||||
Init(fp)
|
Init(fp)
|
||||||
if Keys.Addr != "0.0.0.0:8080" {
|
if Keys.Addr != "127.0.0.1:8080" {
|
||||||
t.Errorf("wrong addr\ngot: %s \nwant: 0.0.0.0:8080", Keys.Addr)
|
t.Errorf("wrong addr\ngot: %s \nwant: 127.0.0.1:8080", Keys.Addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,7 @@ type ComplexityRoot struct {
|
|||||||
Exclusive func(childComplexity int) int
|
Exclusive func(childComplexity int) int
|
||||||
ID func(childComplexity int) int
|
ID func(childComplexity int) int
|
||||||
JobID func(childComplexity int) int
|
JobID func(childComplexity int) int
|
||||||
|
JobName func(childComplexity int) int
|
||||||
MetaData func(childComplexity int) int
|
MetaData func(childComplexity int) int
|
||||||
MonitoringStatus func(childComplexity int) int
|
MonitoringStatus func(childComplexity int) int
|
||||||
NumAcc func(childComplexity int) int
|
NumAcc func(childComplexity int) int
|
||||||
@ -131,6 +132,7 @@ type ComplexityRoot struct {
|
|||||||
HistDuration func(childComplexity int) int
|
HistDuration func(childComplexity int) int
|
||||||
HistNumNodes func(childComplexity int) int
|
HistNumNodes func(childComplexity int) int
|
||||||
ID func(childComplexity int) int
|
ID func(childComplexity int) int
|
||||||
|
Name func(childComplexity int) int
|
||||||
ShortJobs func(childComplexity int) int
|
ShortJobs func(childComplexity int) int
|
||||||
TotalCoreHours func(childComplexity int) int
|
TotalCoreHours func(childComplexity int) int
|
||||||
TotalJobs func(childComplexity int) int
|
TotalJobs func(childComplexity int) int
|
||||||
@ -274,6 +276,8 @@ type ClusterResolver interface {
|
|||||||
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
Partitions(ctx context.Context, obj *schema.Cluster) ([]string, error)
|
||||||
}
|
}
|
||||||
type JobResolver interface {
|
type JobResolver interface {
|
||||||
|
JobName(ctx context.Context, obj *schema.Job) (*string, error)
|
||||||
|
|
||||||
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error)
|
||||||
|
|
||||||
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
|
MetaData(ctx context.Context, obj *schema.Job) (interface{}, error)
|
||||||
@ -466,6 +470,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.Job.JobID(childComplexity), true
|
return e.complexity.Job.JobID(childComplexity), true
|
||||||
|
|
||||||
|
case "Job.jobName":
|
||||||
|
if e.complexity.Job.JobName == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.Job.JobName(childComplexity), true
|
||||||
|
|
||||||
case "Job.metaData":
|
case "Job.metaData":
|
||||||
if e.complexity.Job.MetaData == nil {
|
if e.complexity.Job.MetaData == nil {
|
||||||
break
|
break
|
||||||
@ -676,6 +687,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||||||
|
|
||||||
return e.complexity.JobsStatistics.ID(childComplexity), true
|
return e.complexity.JobsStatistics.ID(childComplexity), true
|
||||||
|
|
||||||
|
case "JobsStatistics.name":
|
||||||
|
if e.complexity.JobsStatistics.Name == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.complexity.JobsStatistics.Name(childComplexity), true
|
||||||
|
|
||||||
case "JobsStatistics.shortJobs":
|
case "JobsStatistics.shortJobs":
|
||||||
if e.complexity.JobsStatistics.ShortJobs == nil {
|
if e.complexity.JobsStatistics.ShortJobs == nil {
|
||||||
break
|
break
|
||||||
@ -1433,6 +1451,7 @@ type Job {
|
|||||||
jobId: Int!
|
jobId: Int!
|
||||||
user: String!
|
user: String!
|
||||||
project: String!
|
project: String!
|
||||||
|
jobName: String
|
||||||
cluster: String!
|
cluster: String!
|
||||||
subCluster: String!
|
subCluster: String!
|
||||||
startTime: Time!
|
startTime: Time!
|
||||||
@ -1635,6 +1654,7 @@ input JobFilter {
|
|||||||
arrayJobId: Int
|
arrayJobId: Int
|
||||||
user: StringInput
|
user: StringInput
|
||||||
project: StringInput
|
project: StringInput
|
||||||
|
jobName: StringInput
|
||||||
cluster: StringInput
|
cluster: StringInput
|
||||||
partition: StringInput
|
partition: StringInput
|
||||||
duration: IntRange
|
duration: IntRange
|
||||||
@ -1668,6 +1688,7 @@ input StringInput {
|
|||||||
contains: String
|
contains: String
|
||||||
startsWith: String
|
startsWith: String
|
||||||
endsWith: String
|
endsWith: String
|
||||||
|
in: [String!]
|
||||||
}
|
}
|
||||||
|
|
||||||
input IntRange { from: Int!, to: Int! }
|
input IntRange { from: Int!, to: Int! }
|
||||||
@ -1688,6 +1709,7 @@ type HistoPoint {
|
|||||||
|
|
||||||
type JobsStatistics {
|
type JobsStatistics {
|
||||||
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
id: ID! # If ` + "`" + `groupBy` + "`" + ` was used, ID of the user/project/cluster
|
||||||
|
name: String # if User-Statistics: Given Name of Account (ID) Owner
|
||||||
totalJobs: Int! # Number of jobs that matched
|
totalJobs: Int! # Number of jobs that matched
|
||||||
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
shortJobs: Int! # Number of jobs with a duration of less than 2 minutes
|
||||||
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
|
||||||
@ -3090,6 +3112,47 @@ func (ec *executionContext) fieldContext_Job_project(ctx context.Context, field
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _Job_jobName(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_Job_jobName(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return ec.resolvers.Job().JobName(rctx, obj)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.(*string)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_Job_jobName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "Job",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: true,
|
||||||
|
IsResolver: true,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type String does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
func (ec *executionContext) _Job_cluster(ctx context.Context, field graphql.CollectedField, obj *schema.Job) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
fc, err := ec.fieldContext_Job_cluster(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -4282,6 +4345,8 @@ func (ec *executionContext) fieldContext_JobResultList_items(ctx context.Context
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
|
case "jobName":
|
||||||
|
return ec.fieldContext_Job_jobName(ctx, field)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -4492,6 +4557,47 @@ func (ec *executionContext) fieldContext_JobsStatistics_id(ctx context.Context,
|
|||||||
return fc, nil
|
return fc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) _JobsStatistics_name(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
||||||
|
fc, err := ec.fieldContext_JobsStatistics_name(ctx, field)
|
||||||
|
if err != nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
ctx = graphql.WithFieldContext(ctx, fc)
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
ret = graphql.Null
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||||
|
ctx = rctx // use context from middleware stack in children
|
||||||
|
return obj.Name, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
ec.Error(ctx, err)
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
if resTmp == nil {
|
||||||
|
return graphql.Null
|
||||||
|
}
|
||||||
|
res := resTmp.(*string)
|
||||||
|
fc.Result = res
|
||||||
|
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec *executionContext) fieldContext_JobsStatistics_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||||
|
fc = &graphql.FieldContext{
|
||||||
|
Object: "JobsStatistics",
|
||||||
|
Field: field,
|
||||||
|
IsMethod: false,
|
||||||
|
IsResolver: false,
|
||||||
|
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||||
|
return nil, errors.New("field of type String does not have child fields")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *executionContext) _JobsStatistics_totalJobs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
func (ec *executionContext) _JobsStatistics_totalJobs(ctx context.Context, field graphql.CollectedField, obj *model.JobsStatistics) (ret graphql.Marshaler) {
|
||||||
fc, err := ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
|
fc, err := ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -5557,7 +5663,6 @@ func (ec *executionContext) _Mutation_createTag(ctx context.Context, field graph
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -5620,7 +5725,6 @@ func (ec *executionContext) _Mutation_deleteTag(ctx context.Context, field graph
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -5675,7 +5779,6 @@ func (ec *executionContext) _Mutation_addTagsToJob(ctx context.Context, field gr
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -5738,7 +5841,6 @@ func (ec *executionContext) _Mutation_removeTagsFromJob(ctx context.Context, fie
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -5801,7 +5903,6 @@ func (ec *executionContext) _Mutation_updateConfiguration(ctx context.Context, f
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -5993,7 +6094,6 @@ func (ec *executionContext) _Query_clusters(ctx context.Context, field graphql.C
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6047,7 +6147,6 @@ func (ec *executionContext) _Query_tags(ctx context.Context, field graphql.Colle
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6099,7 +6198,6 @@ func (ec *executionContext) _Query_user(ctx context.Context, field graphql.Colle
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -6159,7 +6257,6 @@ func (ec *executionContext) _Query_allocatedNodes(ctx context.Context, field gra
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6220,7 +6317,6 @@ func (ec *executionContext) _Query_job(ctx context.Context, field graphql.Collec
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -6246,6 +6342,8 @@ func (ec *executionContext) fieldContext_Query_job(ctx context.Context, field gr
|
|||||||
return ec.fieldContext_Job_user(ctx, field)
|
return ec.fieldContext_Job_user(ctx, field)
|
||||||
case "project":
|
case "project":
|
||||||
return ec.fieldContext_Job_project(ctx, field)
|
return ec.fieldContext_Job_project(ctx, field)
|
||||||
|
case "jobName":
|
||||||
|
return ec.fieldContext_Job_jobName(ctx, field)
|
||||||
case "cluster":
|
case "cluster":
|
||||||
return ec.fieldContext_Job_cluster(ctx, field)
|
return ec.fieldContext_Job_cluster(ctx, field)
|
||||||
case "subCluster":
|
case "subCluster":
|
||||||
@ -6318,7 +6416,6 @@ func (ec *executionContext) _Query_jobMetrics(ctx context.Context, field graphql
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6381,7 +6478,6 @@ func (ec *executionContext) _Query_jobsFootprints(ctx context.Context, field gra
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -6439,7 +6535,6 @@ func (ec *executionContext) _Query_jobs(ctx context.Context, field graphql.Colle
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6504,7 +6599,6 @@ func (ec *executionContext) _Query_jobsStatistics(ctx context.Context, field gra
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6527,6 +6621,8 @@ func (ec *executionContext) fieldContext_Query_jobsStatistics(ctx context.Contex
|
|||||||
switch field.Name {
|
switch field.Name {
|
||||||
case "id":
|
case "id":
|
||||||
return ec.fieldContext_JobsStatistics_id(ctx, field)
|
return ec.fieldContext_JobsStatistics_id(ctx, field)
|
||||||
|
case "name":
|
||||||
|
return ec.fieldContext_JobsStatistics_name(ctx, field)
|
||||||
case "totalJobs":
|
case "totalJobs":
|
||||||
return ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
|
return ec.fieldContext_JobsStatistics_totalJobs(ctx, field)
|
||||||
case "shortJobs":
|
case "shortJobs":
|
||||||
@ -6575,7 +6671,6 @@ func (ec *executionContext) _Query_jobsCount(ctx context.Context, field graphql.
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6636,7 +6731,6 @@ func (ec *executionContext) _Query_rooflineHeatmap(ctx context.Context, field gr
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6691,7 +6785,6 @@ func (ec *executionContext) _Query_nodeMetrics(ctx context.Context, field graphq
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
if !graphql.HasFieldError(ctx, fc) {
|
if !graphql.HasFieldError(ctx, fc) {
|
||||||
@ -6754,7 +6847,6 @@ func (ec *executionContext) _Query___type(ctx context.Context, field graphql.Col
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -6828,7 +6920,6 @@ func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.C
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ec.Error(ctx, err)
|
ec.Error(ctx, err)
|
||||||
return graphql.Null
|
|
||||||
}
|
}
|
||||||
if resTmp == nil {
|
if resTmp == nil {
|
||||||
return graphql.Null
|
return graphql.Null
|
||||||
@ -10650,7 +10741,7 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
|||||||
asMap[k] = v
|
asMap[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax"}
|
fieldsInOrder := [...]string{"tags", "jobId", "arrayJobId", "user", "project", "jobName", "cluster", "partition", "duration", "minRunningFor", "numNodes", "numAccelerators", "numHWThreads", "startTime", "state", "flopsAnyAvg", "memBwAvg", "loadAvg", "memUsedMax"}
|
||||||
for _, k := range fieldsInOrder {
|
for _, k := range fieldsInOrder {
|
||||||
v, ok := asMap[k]
|
v, ok := asMap[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -10697,6 +10788,14 @@ func (ec *executionContext) unmarshalInputJobFilter(ctx context.Context, obj int
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
|
case "jobName":
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("jobName"))
|
||||||
|
it.JobName, err = ec.unmarshalOStringInput2ᚖgithubᚗcomᚋClusterCockpitᚋccᚑbackendᚋinternalᚋgraphᚋmodelᚐStringInput(ctx, v)
|
||||||
|
if err != nil {
|
||||||
|
return it, err
|
||||||
|
}
|
||||||
case "cluster":
|
case "cluster":
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -10890,7 +10989,7 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
|||||||
asMap[k] = v
|
asMap[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldsInOrder := [...]string{"eq", "contains", "startsWith", "endsWith"}
|
fieldsInOrder := [...]string{"eq", "contains", "startsWith", "endsWith", "in"}
|
||||||
for _, k := range fieldsInOrder {
|
for _, k := range fieldsInOrder {
|
||||||
v, ok := asMap[k]
|
v, ok := asMap[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -10929,6 +11028,14 @@ func (ec *executionContext) unmarshalInputStringInput(ctx context.Context, obj i
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return it, err
|
return it, err
|
||||||
}
|
}
|
||||||
|
case "in":
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in"))
|
||||||
|
it.In, err = ec.unmarshalOString2ᚕstringᚄ(ctx, v)
|
||||||
|
if err != nil {
|
||||||
|
return it, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11261,6 +11368,23 @@ func (ec *executionContext) _Job(ctx context.Context, sel ast.SelectionSet, obj
|
|||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
atomic.AddUint32(&invalids, 1)
|
atomic.AddUint32(&invalids, 1)
|
||||||
}
|
}
|
||||||
|
case "jobName":
|
||||||
|
field := field
|
||||||
|
|
||||||
|
innerFunc := func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ec.Error(ctx, ec.Recover(ctx, r))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
res = ec._Job_jobName(ctx, field, obj)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Concurrently(i, func() graphql.Marshaler {
|
||||||
|
return innerFunc(ctx)
|
||||||
|
|
||||||
|
})
|
||||||
case "cluster":
|
case "cluster":
|
||||||
|
|
||||||
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
out.Values[i] = ec._Job_cluster(ctx, field, obj)
|
||||||
@ -11570,6 +11694,10 @@ func (ec *executionContext) _JobsStatistics(ctx context.Context, sel ast.Selecti
|
|||||||
if out.Values[i] == graphql.Null {
|
if out.Values[i] == graphql.Null {
|
||||||
invalids++
|
invalids++
|
||||||
}
|
}
|
||||||
|
case "name":
|
||||||
|
|
||||||
|
out.Values[i] = ec._JobsStatistics_name(ctx, field, obj)
|
||||||
|
|
||||||
case "totalJobs":
|
case "totalJobs":
|
||||||
|
|
||||||
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
out.Values[i] = ec._JobsStatistics_totalJobs(ctx, field, obj)
|
||||||
@ -11832,7 +11960,6 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet)
|
|||||||
})
|
})
|
||||||
|
|
||||||
out := graphql.NewFieldSet(fields)
|
out := graphql.NewFieldSet(fields)
|
||||||
var invalids uint32
|
|
||||||
for i, field := range fields {
|
for i, field := range fields {
|
||||||
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
|
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
|
||||||
Object: field.Name,
|
Object: field.Name,
|
||||||
@ -11848,36 +11975,24 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet)
|
|||||||
return ec._Mutation_createTag(ctx, field)
|
return ec._Mutation_createTag(ctx, field)
|
||||||
})
|
})
|
||||||
|
|
||||||
if out.Values[i] == graphql.Null {
|
|
||||||
invalids++
|
|
||||||
}
|
|
||||||
case "deleteTag":
|
case "deleteTag":
|
||||||
|
|
||||||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
return ec._Mutation_deleteTag(ctx, field)
|
return ec._Mutation_deleteTag(ctx, field)
|
||||||
})
|
})
|
||||||
|
|
||||||
if out.Values[i] == graphql.Null {
|
|
||||||
invalids++
|
|
||||||
}
|
|
||||||
case "addTagsToJob":
|
case "addTagsToJob":
|
||||||
|
|
||||||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
return ec._Mutation_addTagsToJob(ctx, field)
|
return ec._Mutation_addTagsToJob(ctx, field)
|
||||||
})
|
})
|
||||||
|
|
||||||
if out.Values[i] == graphql.Null {
|
|
||||||
invalids++
|
|
||||||
}
|
|
||||||
case "removeTagsFromJob":
|
case "removeTagsFromJob":
|
||||||
|
|
||||||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
return ec._Mutation_removeTagsFromJob(ctx, field)
|
return ec._Mutation_removeTagsFromJob(ctx, field)
|
||||||
})
|
})
|
||||||
|
|
||||||
if out.Values[i] == graphql.Null {
|
|
||||||
invalids++
|
|
||||||
}
|
|
||||||
case "updateConfiguration":
|
case "updateConfiguration":
|
||||||
|
|
||||||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||||
@ -11889,9 +12004,6 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.Dispatch()
|
out.Dispatch()
|
||||||
if invalids > 0 {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11946,7 +12058,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
})
|
})
|
||||||
|
|
||||||
out := graphql.NewFieldSet(fields)
|
out := graphql.NewFieldSet(fields)
|
||||||
var invalids uint32
|
|
||||||
for i, field := range fields {
|
for i, field := range fields {
|
||||||
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
|
innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
|
||||||
Object: field.Name,
|
Object: field.Name,
|
||||||
@ -11966,9 +12077,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_clusters(ctx, field)
|
res = ec._Query_clusters(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -11989,9 +12097,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_tags(ctx, field)
|
res = ec._Query_tags(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12032,9 +12137,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_allocatedNodes(ctx, field)
|
res = ec._Query_allocatedNodes(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12075,9 +12177,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_jobMetrics(ctx, field)
|
res = ec._Query_jobMetrics(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12118,9 +12217,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_jobs(ctx, field)
|
res = ec._Query_jobs(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12141,9 +12237,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_jobsStatistics(ctx, field)
|
res = ec._Query_jobsStatistics(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12164,9 +12257,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_jobsCount(ctx, field)
|
res = ec._Query_jobsCount(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12187,9 +12277,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_rooflineHeatmap(ctx, field)
|
res = ec._Query_rooflineHeatmap(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12210,9 +12297,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
res = ec._Query_nodeMetrics(ctx, field)
|
res = ec._Query_nodeMetrics(ctx, field)
|
||||||
if res == graphql.Null {
|
|
||||||
atomic.AddUint32(&invalids, 1)
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12240,9 +12324,6 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.Dispatch()
|
out.Dispatch()
|
||||||
if invalids > 0 {
|
|
||||||
return graphql.Null
|
|
||||||
}
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ type JobFilter struct {
|
|||||||
ArrayJobID *int `json:"arrayJobId"`
|
ArrayJobID *int `json:"arrayJobId"`
|
||||||
User *StringInput `json:"user"`
|
User *StringInput `json:"user"`
|
||||||
Project *StringInput `json:"project"`
|
Project *StringInput `json:"project"`
|
||||||
|
JobName *StringInput `json:"jobName"`
|
||||||
Cluster *StringInput `json:"cluster"`
|
Cluster *StringInput `json:"cluster"`
|
||||||
Partition *StringInput `json:"partition"`
|
Partition *StringInput `json:"partition"`
|
||||||
Duration *schema.IntRange `json:"duration"`
|
Duration *schema.IntRange `json:"duration"`
|
||||||
@ -72,6 +73,7 @@ type JobResultList struct {
|
|||||||
|
|
||||||
type JobsStatistics struct {
|
type JobsStatistics struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
|
Name *string `json:"name"`
|
||||||
TotalJobs int `json:"totalJobs"`
|
TotalJobs int `json:"totalJobs"`
|
||||||
ShortJobs int `json:"shortJobs"`
|
ShortJobs int `json:"shortJobs"`
|
||||||
TotalWalltime int `json:"totalWalltime"`
|
TotalWalltime int `json:"totalWalltime"`
|
||||||
@ -102,10 +104,11 @@ type PageRequest struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type StringInput struct {
|
type StringInput struct {
|
||||||
Eq *string `json:"eq"`
|
Eq *string `json:"eq"`
|
||||||
Contains *string `json:"contains"`
|
Contains *string `json:"contains"`
|
||||||
StartsWith *string `json:"startsWith"`
|
StartsWith *string `json:"startsWith"`
|
||||||
EndsWith *string `json:"endsWith"`
|
EndsWith *string `json:"endsWith"`
|
||||||
|
In []string `json:"in"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TimeRangeOutput struct {
|
type TimeRangeOutput struct {
|
||||||
|
@ -2,6 +2,7 @@ package graph
|
|||||||
|
|
||||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||||
// will be copied through when generating and any unknown code will be moved to the end.
|
// will be copied through when generating and any unknown code will be moved to the end.
|
||||||
|
// Code generated by github.com/99designs/gqlgen version v0.17.24
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,6 +26,11 @@ func (r *clusterResolver) Partitions(ctx context.Context, obj *schema.Cluster) (
|
|||||||
return r.Repo.Partitions(obj.Name)
|
return r.Repo.Partitions(obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JobName is the resolver for the jobName field.
|
||||||
|
func (r *jobResolver) JobName(ctx context.Context, obj *schema.Job) (*string, error) {
|
||||||
|
return r.Repo.FetchJobName(obj)
|
||||||
|
}
|
||||||
|
|
||||||
// Tags is the resolver for the tags field.
|
// Tags is the resolver for the tags field.
|
||||||
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
func (r *jobResolver) Tags(ctx context.Context, obj *schema.Job) ([]*schema.Tag, error) {
|
||||||
return r.Repo.GetTags(&obj.ID)
|
return r.Repo.GetTags(&obj.ID)
|
||||||
@ -43,6 +50,7 @@ func (r *jobResolver) UserData(ctx context.Context, obj *schema.Job) (*model.Use
|
|||||||
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
|
func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name string) (*schema.Tag, error) {
|
||||||
id, err := r.Repo.CreateTag(typeArg, name)
|
id, err := r.Repo.CreateTag(typeArg, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while creating tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,6 +66,7 @@ func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, er
|
|||||||
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
||||||
jid, err := strconv.ParseInt(job, 10, 64)
|
jid, err := strconv.ParseInt(job, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while adding tag to job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,10 +74,12 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
|||||||
for _, tagId := range tagIds {
|
for _, tagId := range tagIds {
|
||||||
tid, err := strconv.ParseInt(tagId, 10, 64)
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing tag id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if tags, err = r.Repo.AddTag(jid, tid); err != nil {
|
if tags, err = r.Repo.AddTag(jid, tid); err != nil {
|
||||||
|
log.Warn("Error while adding tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -80,6 +91,7 @@ func (r *mutationResolver) AddTagsToJob(ctx context.Context, job string, tagIds
|
|||||||
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, tagIds []string) ([]*schema.Tag, error) {
|
||||||
jid, err := strconv.ParseInt(job, 10, 64)
|
jid, err := strconv.ParseInt(job, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing job id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,10 +99,12 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
for _, tagId := range tagIds {
|
for _, tagId := range tagIds {
|
||||||
tid, err := strconv.ParseInt(tagId, 10, 64)
|
tid, err := strconv.ParseInt(tagId, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing tag id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if tags, err = r.Repo.RemoveTag(jid, tid); err != nil {
|
if tags, err = r.Repo.RemoveTag(jid, tid); err != nil {
|
||||||
|
log.Warn("Error while removing tag")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,6 +115,7 @@ func (r *mutationResolver) RemoveTagsFromJob(ctx context.Context, job string, ta
|
|||||||
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
// UpdateConfiguration is the resolver for the updateConfiguration field.
|
||||||
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
func (r *mutationResolver) UpdateConfiguration(ctx context.Context, name string, value string) (*string, error) {
|
||||||
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, auth.GetUser(ctx)); err != nil {
|
if err := repository.GetUserCfgRepo().UpdateConfig(name, value, auth.GetUser(ctx)); err != nil {
|
||||||
|
log.Warn("Error while updating user config")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,6 +141,7 @@ func (r *queryResolver) User(ctx context.Context, username string) (*model.User,
|
|||||||
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*model.Count, error) {
|
||||||
data, err := r.Repo.AllocatedNodes(cluster)
|
data, err := r.Repo.AllocatedNodes(cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while fetching allocated nodes")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,15 +160,17 @@ func (r *queryResolver) AllocatedNodes(ctx context.Context, cluster string) ([]*
|
|||||||
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error) {
|
||||||
numericId, err := strconv.ParseInt(id, 10, 64)
|
numericId, err := strconv.ParseInt(id, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing job id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
job, err := r.Repo.FindById(numericId)
|
job, err := r.Repo.FindById(numericId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if user := auth.GetUser(ctx); user != nil && !user.HasRole(auth.RoleAdmin) && !user.HasRole(auth.RoleSupport) && job.User != user.Username {
|
if user := auth.GetUser(ctx); user != nil && job.User != user.Username && user.HasNotRoles([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
return nil, errors.New("you are not allowed to see this job")
|
return nil, errors.New("you are not allowed to see this job")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,17 +181,23 @@ func (r *queryResolver) Job(ctx context.Context, id string) (*schema.Job, error)
|
|||||||
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) {
|
func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []string, scopes []schema.MetricScope) ([]*model.JobMetricWithName, error) {
|
||||||
job, err := r.Query().Job(ctx, id)
|
job, err := r.Query().Job(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while querying job for metrics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := metricdata.LoadData(job, metrics, scopes, ctx)
|
data, err := metricdata.LoadData(job, metrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := []*model.JobMetricWithName{}
|
res := []*model.JobMetricWithName{}
|
||||||
for name, md := range data {
|
for name, md := range data {
|
||||||
for scope, metric := range md {
|
for scope, metric := range md {
|
||||||
|
if metric.Scope != schema.MetricScope(scope) {
|
||||||
|
log.Panic("metric.Scope != schema.MetricScope(scope) : Should not happen!")
|
||||||
|
}
|
||||||
|
|
||||||
res = append(res, &model.JobMetricWithName{
|
res = append(res, &model.JobMetricWithName{
|
||||||
Name: name,
|
Name: name,
|
||||||
Scope: scope,
|
Scope: scope,
|
||||||
@ -201,11 +225,13 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
|||||||
|
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, page, order)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while querying jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err := r.Repo.CountJobs(ctx, filter)
|
count, err := r.Repo.CountJobs(ctx, filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while counting jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,13 +240,14 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
|
|||||||
|
|
||||||
// JobsStatistics is the resolver for the jobsStatistics field.
|
// JobsStatistics is the resolver for the jobsStatistics field.
|
||||||
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
||||||
return r.jobsStatistics(ctx, filter, groupBy)
|
return r.Repo.JobsStatistics(ctx, filter, groupBy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobsCount is the resolver for the jobsCount field.
|
// JobsCount is the resolver for the jobsCount field.
|
||||||
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) {
|
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) {
|
||||||
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit)
|
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while counting grouped jobs")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,6 +281,7 @@ func (r *queryResolver) NodeMetrics(ctx context.Context, cluster string, nodes [
|
|||||||
|
|
||||||
data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
data, err := metricdata.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading node data")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,197 +6,16 @@ package graph
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/99designs/gqlgen/graphql"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GraphQL validation should make sure that no unkown values can be specified.
|
|
||||||
var groupBy2column = map[model.Aggregate]string{
|
|
||||||
model.AggregateUser: "job.user",
|
|
||||||
model.AggregateProject: "job.project",
|
|
||||||
model.AggregateCluster: "job.cluster",
|
|
||||||
}
|
|
||||||
|
|
||||||
const ShortJobDuration int = 5 * 60
|
|
||||||
|
|
||||||
// Helper function for the jobsStatistics GraphQL query placed here so that schema.resolvers.go is not too full.
|
|
||||||
func (r *queryResolver) jobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
|
||||||
// In case `groupBy` is nil (not used), the model.JobsStatistics used is at the key '' (empty string)
|
|
||||||
stats := map[string]*model.JobsStatistics{}
|
|
||||||
|
|
||||||
// `socketsPerNode` and `coresPerSocket` can differ from cluster to cluster, so we need to explicitly loop over those.
|
|
||||||
for _, cluster := range archive.Clusters {
|
|
||||||
for _, subcluster := range cluster.SubClusters {
|
|
||||||
corehoursCol := fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes * %d * %d) / 3600) as int)", subcluster.SocketsPerNode, subcluster.CoresPerSocket)
|
|
||||||
var query sq.SelectBuilder
|
|
||||||
if groupBy == nil {
|
|
||||||
query = sq.Select(
|
|
||||||
"''",
|
|
||||||
"COUNT(job.id)",
|
|
||||||
"CAST(ROUND(SUM(job.duration) / 3600) as int)",
|
|
||||||
corehoursCol,
|
|
||||||
).From("job")
|
|
||||||
} else {
|
|
||||||
col := groupBy2column[*groupBy]
|
|
||||||
query = sq.Select(
|
|
||||||
col,
|
|
||||||
"COUNT(job.id)",
|
|
||||||
"CAST(ROUND(SUM(job.duration) / 3600) as int)",
|
|
||||||
corehoursCol,
|
|
||||||
).From("job").GroupBy(col)
|
|
||||||
}
|
|
||||||
|
|
||||||
query = query.
|
|
||||||
Where("job.cluster = ?", cluster.Name).
|
|
||||||
Where("job.subcluster = ?", subcluster.Name)
|
|
||||||
|
|
||||||
query = repository.SecurityCheck(ctx, query)
|
|
||||||
for _, f := range filter {
|
|
||||||
query = repository.BuildWhereClause(f, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
var id sql.NullString
|
|
||||||
var jobs, walltime, corehours sql.NullInt64
|
|
||||||
if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if id.Valid {
|
|
||||||
if s, ok := stats[id.String]; ok {
|
|
||||||
s.TotalJobs += int(jobs.Int64)
|
|
||||||
s.TotalWalltime += int(walltime.Int64)
|
|
||||||
s.TotalCoreHours += int(corehours.Int64)
|
|
||||||
} else {
|
|
||||||
stats[id.String] = &model.JobsStatistics{
|
|
||||||
ID: id.String,
|
|
||||||
TotalJobs: int(jobs.Int64),
|
|
||||||
TotalWalltime: int(walltime.Int64),
|
|
||||||
TotalCoreHours: int(corehours.Int64),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if groupBy == nil {
|
|
||||||
query := sq.Select("COUNT(job.id)").From("job").Where("job.duration < ?", ShortJobDuration)
|
|
||||||
query = repository.SecurityCheck(ctx, query)
|
|
||||||
for _, f := range filter {
|
|
||||||
query = repository.BuildWhereClause(f, query)
|
|
||||||
}
|
|
||||||
if err := query.RunWith(r.DB).QueryRow().Scan(&(stats[""].ShortJobs)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
col := groupBy2column[*groupBy]
|
|
||||||
query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < ?", ShortJobDuration)
|
|
||||||
query = repository.SecurityCheck(ctx, query)
|
|
||||||
for _, f := range filter {
|
|
||||||
query = repository.BuildWhereClause(f, query)
|
|
||||||
}
|
|
||||||
rows, err := query.RunWith(r.DB).Query()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
var id sql.NullString
|
|
||||||
var shortJobs sql.NullInt64
|
|
||||||
if err := rows.Scan(&id, &shortJobs); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if id.Valid {
|
|
||||||
stats[id.String].ShortJobs = int(shortJobs.Int64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculating the histogram data is expensive, so only do it if needed.
|
|
||||||
// An explicit resolver can not be used because we need to know the filters.
|
|
||||||
histogramsNeeded := false
|
|
||||||
fields := graphql.CollectFieldsCtx(ctx, nil)
|
|
||||||
for _, col := range fields {
|
|
||||||
if col.Name == "histDuration" || col.Name == "histNumNodes" {
|
|
||||||
histogramsNeeded = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make([]*model.JobsStatistics, 0, len(stats))
|
|
||||||
for _, stat := range stats {
|
|
||||||
res = append(res, stat)
|
|
||||||
id, col := "", ""
|
|
||||||
if groupBy != nil {
|
|
||||||
id = stat.ID
|
|
||||||
col = groupBy2column[*groupBy]
|
|
||||||
}
|
|
||||||
|
|
||||||
if histogramsNeeded {
|
|
||||||
var err error
|
|
||||||
value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as int) as value`, time.Now().Unix())
|
|
||||||
stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter, id, col)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// `value` must be the column grouped by, but renamed to "value". `id` and `col` can optionally be used
|
|
||||||
// to add a condition to the query of the kind "<col> = <id>".
|
|
||||||
func (r *queryResolver) jobsStatisticsHistogram(ctx context.Context, value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) {
|
|
||||||
query := sq.Select(value, "COUNT(job.id) AS count").From("job")
|
|
||||||
query = repository.SecurityCheck(ctx, query)
|
|
||||||
for _, f := range filters {
|
|
||||||
query = repository.BuildWhereClause(f, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(id) != 0 && len(col) != 0 {
|
|
||||||
query = query.Where(col+" = ?", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
points := make([]*model.HistoPoint, 0)
|
|
||||||
for rows.Next() {
|
|
||||||
point := model.HistoPoint{}
|
|
||||||
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
points = append(points, &point)
|
|
||||||
}
|
|
||||||
return points, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const MAX_JOBS_FOR_ANALYSIS = 500
|
const MAX_JOBS_FOR_ANALYSIS = 500
|
||||||
|
|
||||||
// Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full.
|
// Helper function for the rooflineHeatmap GraphQL query placed here so that schema.resolvers.go is not too full.
|
||||||
@ -208,10 +27,11 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
|
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while querying jobs for roofline")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
||||||
return nil, fmt.Errorf("too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
|
return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
|
||||||
}
|
}
|
||||||
|
|
||||||
fcols, frows := float64(cols), float64(rows)
|
fcols, frows := float64(cols), float64(rows)
|
||||||
@ -228,19 +48,20 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
|
|
||||||
jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx)
|
jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while loading metrics for roofline")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
|
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
|
||||||
if flops_ == nil && membw_ == nil {
|
if flops_ == nil && membw_ == nil {
|
||||||
return nil, fmt.Errorf("'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
return nil, fmt.Errorf("GRAPH/STATS > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
flops, ok1 := flops_["node"]
|
flops, ok1 := flops_["node"]
|
||||||
membw, ok2 := membw_["node"]
|
membw, ok2 := membw_["node"]
|
||||||
if !ok1 || !ok2 {
|
if !ok1 || !ok2 {
|
||||||
// TODO/FIXME:
|
// TODO/FIXME:
|
||||||
return nil, errors.New("todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
return nil, errors.New("GRAPH/STATS > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
|
||||||
}
|
}
|
||||||
|
|
||||||
for n := 0; n < len(flops.Series); n++ {
|
for n := 0; n < len(flops.Series); n++ {
|
||||||
@ -272,10 +93,11 @@ func (r *queryResolver) rooflineHeatmap(
|
|||||||
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobFilter, metrics []string) (*model.Footprints, error) {
|
||||||
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
jobs, err := r.Repo.QueryJobs(ctx, filter, &model.PageRequest{Page: 1, ItemsPerPage: MAX_JOBS_FOR_ANALYSIS + 1}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while querying jobs for footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
|
||||||
return nil, fmt.Errorf("too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
|
return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
|
||||||
}
|
}
|
||||||
|
|
||||||
avgs := make([][]schema.Float, len(metrics))
|
avgs := make([][]schema.Float, len(metrics))
|
||||||
@ -290,6 +112,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
if err := metricdata.LoadAverages(job, metrics, avgs, ctx); err != nil {
|
||||||
|
log.Error("Error while loading averages for footprint")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -78,6 +79,7 @@ func (ccms *CCMetricStore) Init(rawConfig json.RawMessage) error {
|
|||||||
|
|
||||||
var config CCMetricStoreConfig
|
var config CCMetricStoreConfig
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw json config")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,11 +126,13 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
|
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
if err := json.NewEncoder(buf).Encode(body); err != nil {
|
||||||
|
log.Warn("Error while encoding request body")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, ccms.queryEndpoint, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while building request body")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ccms.jwt != "" {
|
if ccms.jwt != "" {
|
||||||
@ -137,6 +141,7 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
|
|
||||||
res, err := ccms.client.Do(req)
|
res, err := ccms.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing request")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,6 +151,7 @@ func (ccms *CCMetricStore) doRequest(
|
|||||||
|
|
||||||
var resBody ApiQueryResponse
|
var resBody ApiQueryResponse
|
||||||
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
|
if err := json.NewDecoder(bufio.NewReader(res.Body)).Decode(&resBody); err != nil {
|
||||||
|
log.Warn("Error while decoding result body")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,6 +166,7 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
|
|
||||||
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes)
|
queries, assignedScope, err := ccms.buildQueries(job, metrics, scopes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while building queries")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,6 +181,7 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing request")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,6 +208,7 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
|
|
||||||
for _, res := range row {
|
for _, res := range row {
|
||||||
if res.Error != nil {
|
if res.Error != nil {
|
||||||
|
/* Build list for "partial errors", if any */
|
||||||
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
|
errors = append(errors, fmt.Sprintf("failed to fetch '%s' from host '%s': %s", query.Metric, query.Hostname, *res.Error))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -240,7 +249,8 @@ func (ccms *CCMetricStore) LoadData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
return jobData, fmt.Errorf("cc-metric-store: %s", strings.Join(errors, ", "))
|
/* Returns list for "partial errors" */
|
||||||
|
return jobData, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
return jobData, nil
|
return jobData, nil
|
||||||
@ -272,8 +282,8 @@ func (ccms *CCMetricStore) buildQueries(
|
|||||||
remoteName := ccms.toRemoteName(metric)
|
remoteName := ccms.toRemoteName(metric)
|
||||||
mc := archive.GetMetricConfig(job.Cluster, metric)
|
mc := archive.GetMetricConfig(job.Cluster, metric)
|
||||||
if mc == nil {
|
if mc == nil {
|
||||||
// return nil, fmt.Errorf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
// return nil, fmt.Errorf("METRICDATA/CCMS > metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
// log.Printf("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
log.Infof("metric '%s' is not specified for cluster '%s'", metric, job.Cluster)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,7 +493,7 @@ func (ccms *CCMetricStore) buildQueries(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, fmt.Errorf("TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
|
return nil, nil, fmt.Errorf("METRICDATA/CCMS > TODO: unhandled case: native-scope=%s, requested-scope=%s", nativeScope, requestedScope)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -498,6 +508,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
|
|
||||||
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode})
|
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while building query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -512,6 +523,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing request")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,7 +533,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
metric := ccms.toLocalName(query.Metric)
|
metric := ccms.toLocalName(query.Metric)
|
||||||
data := res[0]
|
data := res[0]
|
||||||
if data.Error != nil {
|
if data.Error != nil {
|
||||||
return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
metricdata, ok := stats[metric]
|
metricdata, ok := stats[metric]
|
||||||
@ -531,7 +543,7 @@ func (ccms *CCMetricStore) LoadStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
|
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
|
||||||
return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
|
return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
|
||||||
}
|
}
|
||||||
|
|
||||||
metricdata[query.Hostname] = schema.MetricStatistics{
|
metricdata[query.Hostname] = schema.MetricStatistics{
|
||||||
@ -577,6 +589,7 @@ func (ccms *CCMetricStore) LoadNodeData(
|
|||||||
|
|
||||||
resBody, err := ccms.doRequest(ctx, &req)
|
resBody, err := ccms.doRequest(ctx, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing request")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -593,11 +606,12 @@ func (ccms *CCMetricStore) LoadNodeData(
|
|||||||
metric := ccms.toLocalName(query.Metric)
|
metric := ccms.toLocalName(query.Metric)
|
||||||
qdata := res[0]
|
qdata := res[0]
|
||||||
if qdata.Error != nil {
|
if qdata.Error != nil {
|
||||||
|
/* Build list for "partial errors", if any */
|
||||||
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
|
errors = append(errors, fmt.Sprintf("fetching %s for node %s failed: %s", metric, query.Hostname, *qdata.Error))
|
||||||
}
|
}
|
||||||
|
|
||||||
if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() {
|
if qdata.Avg.IsNaN() || qdata.Min.IsNaN() || qdata.Max.IsNaN() {
|
||||||
// return nil, fmt.Errorf("fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
|
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
|
||||||
qdata.Avg, qdata.Min, qdata.Max = 0., 0., 0.
|
qdata.Avg, qdata.Min, qdata.Max = 0., 0., 0.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -626,7 +640,8 @@ func (ccms *CCMetricStore) LoadNodeData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
return data, fmt.Errorf("cc-metric-store: %s", strings.Join(errors, ", "))
|
/* Returns list of "partial errors" */
|
||||||
|
return data, fmt.Errorf("METRICDATA/CCMS > Errors: %s", strings.Join(errors, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, nil
|
return data, nil
|
||||||
|
@ -10,11 +10,11 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||||
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
influxdb2Api "github.com/influxdata/influxdb-client-go/v2/api"
|
||||||
@ -37,6 +37,7 @@ type InfluxDBv2DataRepository struct {
|
|||||||
func (idb *InfluxDBv2DataRepository) Init(rawConfig json.RawMessage) error {
|
func (idb *InfluxDBv2DataRepository) Init(rawConfig json.RawMessage) error {
|
||||||
var config InfluxDBv2DataRepositoryConfig
|
var config InfluxDBv2DataRepositoryConfig
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw json config")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +72,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
for _, h := range job.Resources {
|
for _, h := range job.Resources {
|
||||||
if h.HWThreads != nil || h.Accelerators != nil {
|
if h.HWThreads != nil || h.Accelerators != nil {
|
||||||
// TODO
|
// TODO
|
||||||
return nil, errors.New("the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
|
return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
|
||||||
}
|
}
|
||||||
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
|
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
|
||||||
}
|
}
|
||||||
@ -84,7 +85,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
switch scope {
|
switch scope {
|
||||||
case "node":
|
case "node":
|
||||||
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows
|
// Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows
|
||||||
// log.Println("Note: Scope 'node' requested. ")
|
// log.Info("Scope 'node' requested. ")
|
||||||
query = fmt.Sprintf(`
|
query = fmt.Sprintf(`
|
||||||
from(bucket: "%s")
|
from(bucket: "%s")
|
||||||
|> range(start: %s, stop: %s)
|
|> range(start: %s, stop: %s)
|
||||||
@ -97,10 +98,10 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
|
idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))),
|
||||||
measurementsCond, hostsCond)
|
measurementsCond, hostsCond)
|
||||||
case "socket":
|
case "socket":
|
||||||
log.Println("Note: Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
log.Info("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
continue
|
continue
|
||||||
case "core":
|
case "core":
|
||||||
log.Println("Note: Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
log.Info(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ")
|
||||||
continue
|
continue
|
||||||
// Get Finest Granularity only, Set NULL to 0.0
|
// Get Finest Granularity only, Set NULL to 0.0
|
||||||
// query = fmt.Sprintf(`
|
// query = fmt.Sprintf(`
|
||||||
@ -114,13 +115,14 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
// idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )),
|
||||||
// measurementsCond, hostsCond)
|
// measurementsCond, hostsCond)
|
||||||
default:
|
default:
|
||||||
log.Println("Note: Unknown Scope requested: Will return 'node' scope. ")
|
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
||||||
continue
|
continue
|
||||||
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node'")
|
// return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'")
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := idb.queryClient.Query(ctx, query)
|
rows, err := idb.queryClient.Query(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,6 +193,7 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
// hostSeries.Data = append(hostSeries.Data, schema.Float(val))
|
||||||
// }
|
// }
|
||||||
default:
|
default:
|
||||||
|
log.Infof("Unknown scope '%s' requested: Will return 'node' scope.", scope)
|
||||||
continue
|
continue
|
||||||
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
|
// return nil, errors.New("the InfluxDB metric data repository does not yet support other scopes than 'node, core'")
|
||||||
}
|
}
|
||||||
@ -201,21 +204,22 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// Get Stats
|
// Get Stats
|
||||||
stats, err := idb.LoadStats(job, metrics, ctx)
|
stats, err := idb.LoadStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading statistics")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, scope := range scopes {
|
for _, scope := range scopes {
|
||||||
if scope == "node" { // No 'socket/core' support yet
|
if scope == "node" { // No 'socket/core' support yet
|
||||||
for metric, nodes := range stats {
|
for metric, nodes := range stats {
|
||||||
// log.Println(fmt.Sprintf("<< Add Stats for : Field %s >>", metric))
|
// log.Debugf("<< Add Stats for : Field %s >>", metric)
|
||||||
for node, stats := range nodes {
|
for node, stats := range nodes {
|
||||||
// log.Println(fmt.Sprintf("<< Add Stats for : Host %s : Min %.2f, Max %.2f, Avg %.2f >>", node, stats.Min, stats.Max, stats.Avg ))
|
// log.Debugf("<< Add Stats for : Host %s : Min %.2f, Max %.2f, Avg %.2f >>", node, stats.Min, stats.Max, stats.Avg )
|
||||||
for index, _ := range jobData[metric][scope].Series {
|
for index, _ := range jobData[metric][scope].Series {
|
||||||
// log.Println(fmt.Sprintf("<< Try to add Stats to Series in Position %d >>", index))
|
// log.Debugf("<< Try to add Stats to Series in Position %d >>", index)
|
||||||
if jobData[metric][scope].Series[index].Hostname == node {
|
if jobData[metric][scope].Series[index].Hostname == node {
|
||||||
// log.Println(fmt.Sprintf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname))
|
// log.Debugf("<< Match for Series in Position %d : Host %s >>", index, jobData[metric][scope].Series[index].Hostname)
|
||||||
jobData[metric][scope].Series[index].Statistics = schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max}
|
jobData[metric][scope].Series[index].Statistics = &schema.MetricStatistics{Avg: stats.Avg, Min: stats.Min, Max: stats.Max}
|
||||||
// log.Println(fmt.Sprintf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg))
|
// log.Debugf("<< Result Inner: Min %.2f, Max %.2f, Avg %.2f >>", jobData[metric][scope].Series[index].Statistics.Min, jobData[metric][scope].Series[index].Statistics.Max, jobData[metric][scope].Series[index].Statistics.Avg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,9 +231,9 @@ func (idb *InfluxDBv2DataRepository) LoadData(
|
|||||||
// for _, scope := range scopes {
|
// for _, scope := range scopes {
|
||||||
// for _, met := range metrics {
|
// for _, met := range metrics {
|
||||||
// for _, series := range jobData[met][scope].Series {
|
// for _, series := range jobData[met][scope].Series {
|
||||||
// log.Println(fmt.Sprintf("<< Result: %d data points for metric %s on %s with scope %s, Stats: Min %.2f, Max %.2f, Avg %.2f >>",
|
// log.Debugf("<< Result: %d data points for metric %s on %s with scope %s, Stats: Min %.2f, Max %.2f, Avg %.2f >>",
|
||||||
// len(series.Data), met, series.Hostname, scope,
|
// len(series.Data), met, series.Hostname, scope,
|
||||||
// series.Statistics.Min, series.Statistics.Max, series.Statistics.Avg))
|
// series.Statistics.Min, series.Statistics.Max, series.Statistics.Avg)
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
@ -248,7 +252,7 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
|
|||||||
for _, h := range job.Resources {
|
for _, h := range job.Resources {
|
||||||
if h.HWThreads != nil || h.Accelerators != nil {
|
if h.HWThreads != nil || h.Accelerators != nil {
|
||||||
// TODO
|
// TODO
|
||||||
return nil, errors.New("the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
|
return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support HWThreads or Accelerators")
|
||||||
}
|
}
|
||||||
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
|
hostsConds = append(hostsConds, fmt.Sprintf(`r["hostname"] == "%s"`, h.Hostname))
|
||||||
}
|
}
|
||||||
@ -257,7 +261,7 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
|
|||||||
// lenMet := len(metrics)
|
// lenMet := len(metrics)
|
||||||
|
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
// log.Println(fmt.Sprintf("<< You are here: %s (Index %d of %d metrics)", metric, index, lenMet))
|
// log.Debugf("<< You are here: %s (Index %d of %d metrics)", metric, index, lenMet)
|
||||||
|
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
data = from(bucket: "%s")
|
data = from(bucket: "%s")
|
||||||
@ -274,6 +278,7 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
|
|||||||
|
|
||||||
rows, err := idb.queryClient.Query(ctx, query)
|
rows, err := idb.queryClient.Query(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while performing query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,17 +289,17 @@ func (idb *InfluxDBv2DataRepository) LoadStats(
|
|||||||
|
|
||||||
avg, avgok := row.ValueByKey("avg").(float64)
|
avg, avgok := row.ValueByKey("avg").(float64)
|
||||||
if !avgok {
|
if !avgok {
|
||||||
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic AVG. Expected 'float64', got %v", metric, avg))
|
// log.Debugf(">> Assertion error for metric %s, statistic AVG. Expected 'float64', got %v", metric, avg)
|
||||||
avg = 0.0
|
avg = 0.0
|
||||||
}
|
}
|
||||||
min, minok := row.ValueByKey("min").(float64)
|
min, minok := row.ValueByKey("min").(float64)
|
||||||
if !minok {
|
if !minok {
|
||||||
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic MIN. Expected 'float64', got %v", metric, min))
|
// log.Debugf(">> Assertion error for metric %s, statistic MIN. Expected 'float64', got %v", metric, min)
|
||||||
min = 0.0
|
min = 0.0
|
||||||
}
|
}
|
||||||
max, maxok := row.ValueByKey("max").(float64)
|
max, maxok := row.ValueByKey("max").(float64)
|
||||||
if !maxok {
|
if !maxok {
|
||||||
// log.Println(fmt.Sprintf(">> Assertion error for metric %s, statistic MAX. Expected 'float64', got %v", metric, max))
|
// log.Debugf(">> Assertion error for metric %s, statistic MAX. Expected 'float64', got %v", metric, max)
|
||||||
max = 0.0
|
max = 0.0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,7 +323,7 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData(
|
|||||||
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
|
|
||||||
// TODO : Implement to be used in Analysis- und System/Node-View
|
// TODO : Implement to be used in Analysis- und System/Node-View
|
||||||
log.Println(fmt.Sprintf("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes))
|
log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes)
|
||||||
|
|
||||||
return nil, errors.New("unimplemented for InfluxDBv2DataRepository")
|
return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository")
|
||||||
}
|
}
|
||||||
|
@ -46,6 +46,7 @@ func Init(disableArchive bool) error {
|
|||||||
Kind string `json:"kind"`
|
Kind string `json:"kind"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
|
if err := json.Unmarshal(cluster.MetricDataRepository, &kind); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw json MetricDataRepository")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,13 +56,16 @@ func Init(disableArchive bool) error {
|
|||||||
mdr = &CCMetricStore{}
|
mdr = &CCMetricStore{}
|
||||||
case "influxdb":
|
case "influxdb":
|
||||||
mdr = &InfluxDBv2DataRepository{}
|
mdr = &InfluxDBv2DataRepository{}
|
||||||
|
case "prometheus":
|
||||||
|
mdr = &PrometheusDataRepository{}
|
||||||
case "test":
|
case "test":
|
||||||
mdr = &TestMetricDataRepository{}
|
mdr = &TestMetricDataRepository{}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unkown metric data repository '%s' for cluster '%s'", kind.Kind, cluster.Name)
|
return fmt.Errorf("METRICDATA/METRICDATA > Unknown MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
|
if err := mdr.Init(cluster.MetricDataRepository); err != nil {
|
||||||
|
log.Errorf("Error initializing MetricDataRepository %v for cluster %v", kind.Kind, cluster.Name)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
metricDataRepos[cluster.Name] = mdr
|
metricDataRepos[cluster.Name] = mdr
|
||||||
@ -88,7 +92,7 @@ func LoadData(job *schema.Job,
|
|||||||
repo, ok := metricDataRepos[job.Cluster]
|
repo, ok := metricDataRepos[job.Cluster]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("no metric data repository configured for '%s'", job.Cluster), 0, 0
|
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster), 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if scopes == nil {
|
if scopes == nil {
|
||||||
@ -105,8 +109,9 @@ func LoadData(job *schema.Job,
|
|||||||
jd, err = repo.LoadData(job, metrics, scopes, ctx)
|
jd, err = repo.LoadData(job, metrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(jd) != 0 {
|
if len(jd) != 0 {
|
||||||
log.Errorf("partial error: %s", err.Error())
|
log.Warnf("partial error: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
|
log.Error("Error while loading job data from metric repository")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -114,6 +119,7 @@ func LoadData(job *schema.Job,
|
|||||||
} else {
|
} else {
|
||||||
jd, err = archive.GetHandle().LoadJobData(job)
|
jd, err = archive.GetHandle().LoadJobData(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while loading job data from archive")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,10 +162,12 @@ func LoadData(job *schema.Job,
|
|||||||
}
|
}
|
||||||
|
|
||||||
prepareJobData(job, jd, scopes)
|
prepareJobData(job, jd, scopes)
|
||||||
|
|
||||||
return jd, ttl, size
|
return jd, ttl, size
|
||||||
})
|
})
|
||||||
|
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
|
log.Error("Error in returned dataset")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,11 +187,12 @@ func LoadAverages(
|
|||||||
|
|
||||||
repo, ok := metricDataRepos[job.Cluster]
|
repo, ok := metricDataRepos[job.Cluster]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("no metric data repository configured for '%s'", job.Cluster)
|
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
stats, err := repo.LoadStats(job, metrics, ctx)
|
stats, err := repo.LoadStats(job, metrics, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +223,7 @@ func LoadNodeData(
|
|||||||
|
|
||||||
repo, ok := metricDataRepos[cluster]
|
repo, ok := metricDataRepos[cluster]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("no metric data repository configured for '%s'", cluster)
|
return nil, fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
if metrics == nil {
|
if metrics == nil {
|
||||||
@ -226,14 +235,15 @@ func LoadNodeData(
|
|||||||
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(data) != 0 {
|
if len(data) != 0 {
|
||||||
log.Errorf("partial error: %s", err.Error())
|
log.Warnf("partial error: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
|
log.Error("Error while loading node data from metric repository")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if data == nil {
|
if data == nil {
|
||||||
return nil, fmt.Errorf("the metric data repository for '%s' does not support this query", cluster)
|
return nil, fmt.Errorf("METRICDATA/METRICDATA > the metric data repository for '%s' does not support this query", cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, nil
|
return data, nil
|
||||||
@ -300,6 +310,7 @@ func ArchiveJob(job *schema.Job, ctx context.Context) (*schema.JobMeta, error) {
|
|||||||
|
|
||||||
jobData, err := LoadData(job, allMetrics, scopes, ctx)
|
jobData, err := LoadData(job, allMetrics, scopes, ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error wile loading job data for archiving")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
446
internal/metricdata/prometheus.go
Normal file
446
internal/metricdata/prometheus.go
Normal file
@ -0,0 +1,446 @@
|
|||||||
|
// Copyright (C) 2022 DKRZ
|
||||||
|
// All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package metricdata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
|
promapi "github.com/prometheus/client_golang/api"
|
||||||
|
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
|
promcfg "github.com/prometheus/common/config"
|
||||||
|
promm "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PrometheusDataRepositoryConfig struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Suffix string `json:"suffix,omitempty"`
|
||||||
|
Templates map[string]string `json:"query-templates"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrometheusDataRepository struct {
|
||||||
|
client promapi.Client
|
||||||
|
queryClient promv1.API
|
||||||
|
suffix string
|
||||||
|
templates map[string]*template.Template
|
||||||
|
}
|
||||||
|
|
||||||
|
type PromQLArgs struct {
|
||||||
|
Nodes string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Trie map[rune]Trie
|
||||||
|
|
||||||
|
var logOnce sync.Once
|
||||||
|
|
||||||
|
func contains(s []schema.MetricScope, str schema.MetricScope) bool {
|
||||||
|
for _, v := range s {
|
||||||
|
if v == str {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func MinMaxMean(data []schema.Float) (float64, float64, float64) {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return 0.0, 0.0, 0.0
|
||||||
|
}
|
||||||
|
min := math.MaxFloat64
|
||||||
|
max := -math.MaxFloat64
|
||||||
|
var sum float64
|
||||||
|
var n float64
|
||||||
|
for _, val := range data {
|
||||||
|
if val.IsNaN() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sum += float64(val)
|
||||||
|
n += 1
|
||||||
|
if float64(val) > max {
|
||||||
|
max = float64(val)
|
||||||
|
}
|
||||||
|
if float64(val) < min {
|
||||||
|
min = float64(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return min, max, sum / n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewritten from
|
||||||
|
// https://github.com/ermanh/trieregex/blob/master/trieregex/trieregex.py
|
||||||
|
func nodeRegex(nodes []string) string {
|
||||||
|
root := Trie{}
|
||||||
|
// add runes of each compute node to trie
|
||||||
|
for _, node := range nodes {
|
||||||
|
_trie := root
|
||||||
|
for _, c := range node {
|
||||||
|
if _, ok := _trie[c]; !ok {
|
||||||
|
_trie[c] = Trie{}
|
||||||
|
}
|
||||||
|
_trie = _trie[c]
|
||||||
|
}
|
||||||
|
_trie['*'] = Trie{}
|
||||||
|
}
|
||||||
|
// recursively build regex from rune trie
|
||||||
|
var trieRegex func(trie Trie, reset bool) string
|
||||||
|
trieRegex = func(trie Trie, reset bool) string {
|
||||||
|
if reset == true {
|
||||||
|
trie = root
|
||||||
|
}
|
||||||
|
if len(trie) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if len(trie) == 1 {
|
||||||
|
for key, _trie := range trie {
|
||||||
|
if key == '*' {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return regexp.QuoteMeta(string(key)) + trieRegex(_trie, false)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sequences := []string{}
|
||||||
|
for key, _trie := range trie {
|
||||||
|
if key != '*' {
|
||||||
|
sequences = append(sequences, regexp.QuoteMeta(string(key))+trieRegex(_trie, false))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Slice(sequences, func(i, j int) bool {
|
||||||
|
return (-len(sequences[i]) < -len(sequences[j])) || (sequences[i] < sequences[j])
|
||||||
|
})
|
||||||
|
var result string
|
||||||
|
// single edge from this tree node
|
||||||
|
if len(sequences) == 1 {
|
||||||
|
result = sequences[0]
|
||||||
|
if len(result) > 1 {
|
||||||
|
result = "(?:" + result + ")"
|
||||||
|
}
|
||||||
|
// multiple edges, each length 1
|
||||||
|
} else if s := strings.Join(sequences, ""); len(s) == len(sequences) {
|
||||||
|
// char or numeric range
|
||||||
|
if len(s)-1 == int(s[len(s)-1])-int(s[0]) {
|
||||||
|
result = fmt.Sprintf("[%c-%c]", s[0], s[len(s)-1])
|
||||||
|
// char or numeric set
|
||||||
|
} else {
|
||||||
|
result = "[" + s + "]"
|
||||||
|
}
|
||||||
|
// multiple edges of different lengths
|
||||||
|
} else {
|
||||||
|
result = "(?:" + strings.Join(sequences, "|") + ")"
|
||||||
|
}
|
||||||
|
if _, ok := trie['*']; ok {
|
||||||
|
result += "?"
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return trieRegex(root, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error {
|
||||||
|
var config PrometheusDataRepositoryConfig
|
||||||
|
// parse config
|
||||||
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw json config")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// support basic authentication
|
||||||
|
var rt http.RoundTripper = nil
|
||||||
|
if prom_pw := os.Getenv("PROMETHEUS_PASSWORD"); prom_pw != "" && config.Username != "" {
|
||||||
|
prom_pw := promcfg.Secret(prom_pw)
|
||||||
|
rt = promcfg.NewBasicAuthRoundTripper(config.Username, prom_pw, "", promapi.DefaultRoundTripper)
|
||||||
|
} else {
|
||||||
|
if config.Username != "" {
|
||||||
|
return errors.New("METRICDATA/PROMETHEUS > Prometheus username provided, but PROMETHEUS_PASSWORD not set.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// init client
|
||||||
|
client, err := promapi.NewClient(promapi.Config{
|
||||||
|
Address: config.Url,
|
||||||
|
RoundTripper: rt,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error while initializing new prometheus client")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// init query client
|
||||||
|
pdb.client = client
|
||||||
|
pdb.queryClient = promv1.NewAPI(pdb.client)
|
||||||
|
// site config
|
||||||
|
pdb.suffix = config.Suffix
|
||||||
|
// init query templates
|
||||||
|
pdb.templates = make(map[string]*template.Template)
|
||||||
|
for metric, templ := range config.Templates {
|
||||||
|
pdb.templates[metric], err = template.New(metric).Parse(templ)
|
||||||
|
if err == nil {
|
||||||
|
log.Debugf("Added PromQL template for %s: %s", metric, templ)
|
||||||
|
} else {
|
||||||
|
log.Warnf("Failed to parse PromQL template %s for metric %s", templ, metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: respect scope argument
|
||||||
|
func (pdb *PrometheusDataRepository) FormatQuery(
|
||||||
|
metric string,
|
||||||
|
scope schema.MetricScope,
|
||||||
|
nodes []string,
|
||||||
|
cluster string) (string, error) {
|
||||||
|
|
||||||
|
args := PromQLArgs{}
|
||||||
|
if len(nodes) > 0 {
|
||||||
|
args.Nodes = fmt.Sprintf("(%s)%s", nodeRegex(nodes), pdb.suffix)
|
||||||
|
} else {
|
||||||
|
args.Nodes = fmt.Sprintf(".*%s", pdb.suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if templ, ok := pdb.templates[metric]; ok {
|
||||||
|
err := templ.Execute(buf, args)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ))
|
||||||
|
} else {
|
||||||
|
query := buf.String()
|
||||||
|
log.Debugf("PromQL: %s", query)
|
||||||
|
return query, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > No PromQL for metric %s configured.", metric))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert PromAPI row to CC schema.Series
|
||||||
|
func (pdb *PrometheusDataRepository) RowToSeries(
|
||||||
|
from time.Time,
|
||||||
|
step int64,
|
||||||
|
steps int64,
|
||||||
|
row *promm.SampleStream) schema.Series {
|
||||||
|
ts := from.Unix()
|
||||||
|
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
||||||
|
// init array of expected length with NaN
|
||||||
|
values := make([]schema.Float, steps+1)
|
||||||
|
for i, _ := range values {
|
||||||
|
values[i] = schema.NaN
|
||||||
|
}
|
||||||
|
// copy recorded values from prom sample pair
|
||||||
|
for _, v := range row.Values {
|
||||||
|
idx := (v.Timestamp.Unix() - ts) / step
|
||||||
|
values[idx] = schema.Float(v.Value)
|
||||||
|
}
|
||||||
|
min, max, mean := MinMaxMean(values)
|
||||||
|
// output struct
|
||||||
|
return schema.Series{
|
||||||
|
Hostname: hostname,
|
||||||
|
Data: values,
|
||||||
|
Statistics: &schema.MetricStatistics{
|
||||||
|
Avg: mean,
|
||||||
|
Min: min,
|
||||||
|
Max: max,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdb *PrometheusDataRepository) LoadData(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
ctx context.Context) (schema.JobData, error) {
|
||||||
|
|
||||||
|
// TODO respect requested scope
|
||||||
|
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
||||||
|
scopes = append(scopes, schema.MetricScopeNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobData := make(schema.JobData)
|
||||||
|
// parse job specs
|
||||||
|
nodes := make([]string, len(job.Resources))
|
||||||
|
for i, resource := range job.Resources {
|
||||||
|
nodes[i] = resource.Hostname
|
||||||
|
}
|
||||||
|
from := job.StartTime
|
||||||
|
to := job.StartTime.Add(time.Duration(job.Duration) * time.Second)
|
||||||
|
|
||||||
|
for _, scope := range scopes {
|
||||||
|
if scope != schema.MetricScopeNode {
|
||||||
|
logOnce.Do(func() {
|
||||||
|
log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range metrics {
|
||||||
|
metricConfig := archive.GetMetricConfig(job.Cluster, metric)
|
||||||
|
if metricConfig == nil {
|
||||||
|
log.Warnf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster)
|
||||||
|
return nil, errors.New("Prometheus config error")
|
||||||
|
}
|
||||||
|
query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while formatting prometheus query")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ranged query over all job nodes
|
||||||
|
r := promv1.Range{
|
||||||
|
Start: from,
|
||||||
|
End: to,
|
||||||
|
Step: time.Duration(metricConfig.Timestep * 1e9),
|
||||||
|
}
|
||||||
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query)
|
||||||
|
return nil, errors.New("Prometheus query error")
|
||||||
|
}
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
log.Warnf("Warnings: %v\n", warnings)
|
||||||
|
}
|
||||||
|
|
||||||
|
// init data structures
|
||||||
|
if _, ok := jobData[metric]; !ok {
|
||||||
|
jobData[metric] = make(map[schema.MetricScope]*schema.JobMetric)
|
||||||
|
}
|
||||||
|
jobMetric, ok := jobData[metric][scope]
|
||||||
|
if !ok {
|
||||||
|
jobMetric = &schema.JobMetric{
|
||||||
|
Unit: metricConfig.Unit,
|
||||||
|
Scope: scope,
|
||||||
|
Timestep: metricConfig.Timestep,
|
||||||
|
Series: make([]schema.Series, 0),
|
||||||
|
}
|
||||||
|
jobData[metric][scope] = jobMetric
|
||||||
|
}
|
||||||
|
step := int64(metricConfig.Timestep)
|
||||||
|
steps := int64(to.Sub(from).Seconds()) / step
|
||||||
|
// iter rows of host, metric, values
|
||||||
|
for _, row := range result.(promm.Matrix) {
|
||||||
|
jobMetric.Series = append(jobMetric.Series,
|
||||||
|
pdb.RowToSeries(from, step, steps, row))
|
||||||
|
}
|
||||||
|
// sort by hostname to get uniform coloring
|
||||||
|
sort.Slice(jobMetric.Series, func(i, j int) bool {
|
||||||
|
return (jobMetric.Series[i].Hostname < jobMetric.Series[j].Hostname)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return jobData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO change implementation to precomputed/cached stats
|
||||||
|
func (pdb *PrometheusDataRepository) LoadStats(
|
||||||
|
job *schema.Job,
|
||||||
|
metrics []string,
|
||||||
|
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
|
||||||
|
|
||||||
|
// map of metrics of nodes of stats
|
||||||
|
stats := map[string]map[string]schema.MetricStatistics{}
|
||||||
|
|
||||||
|
data, err := pdb.LoadData(job, metrics, []schema.MetricScope{schema.MetricScopeNode}, ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job for stats")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for metric, metricData := range data {
|
||||||
|
stats[metric] = make(map[string]schema.MetricStatistics)
|
||||||
|
for _, series := range metricData[schema.MetricScopeNode].Series {
|
||||||
|
stats[metric][series.Hostname] = *series.Statistics
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pdb *PrometheusDataRepository) LoadNodeData(
|
||||||
|
cluster string,
|
||||||
|
metrics, nodes []string,
|
||||||
|
scopes []schema.MetricScope,
|
||||||
|
from, to time.Time,
|
||||||
|
ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) {
|
||||||
|
t0 := time.Now()
|
||||||
|
// Map of hosts of metrics of value slices
|
||||||
|
data := make(map[string]map[string][]*schema.JobMetric)
|
||||||
|
// query db for each metric
|
||||||
|
// TODO: scopes seems to be always empty
|
||||||
|
if len(scopes) == 0 || !contains(scopes, schema.MetricScopeNode) {
|
||||||
|
scopes = append(scopes, schema.MetricScopeNode)
|
||||||
|
}
|
||||||
|
for _, scope := range scopes {
|
||||||
|
if scope != schema.MetricScopeNode {
|
||||||
|
logOnce.Do(func() {
|
||||||
|
log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, metric := range metrics {
|
||||||
|
metricConfig := archive.GetMetricConfig(cluster, metric)
|
||||||
|
if metricConfig == nil {
|
||||||
|
log.Warnf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster)
|
||||||
|
return nil, errors.New("Prometheus config error")
|
||||||
|
}
|
||||||
|
query, err := pdb.FormatQuery(metric, scope, nodes, cluster)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while formatting prometheus query")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ranged query over all nodes
|
||||||
|
r := promv1.Range{
|
||||||
|
Start: from,
|
||||||
|
End: to,
|
||||||
|
Step: time.Duration(metricConfig.Timestep * 1e9),
|
||||||
|
}
|
||||||
|
result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Prometheus query error in LoadNodeData: %v\n", err)
|
||||||
|
return nil, errors.New("Prometheus query error")
|
||||||
|
}
|
||||||
|
if len(warnings) > 0 {
|
||||||
|
log.Warnf("Warnings: %v\n", warnings)
|
||||||
|
}
|
||||||
|
|
||||||
|
step := int64(metricConfig.Timestep)
|
||||||
|
steps := int64(to.Sub(from).Seconds()) / step
|
||||||
|
|
||||||
|
// iter rows of host, metric, values
|
||||||
|
for _, row := range result.(promm.Matrix) {
|
||||||
|
hostname := strings.TrimSuffix(string(row.Metric["exported_instance"]), pdb.suffix)
|
||||||
|
hostdata, ok := data[hostname]
|
||||||
|
if !ok {
|
||||||
|
hostdata = make(map[string][]*schema.JobMetric)
|
||||||
|
data[hostname] = hostdata
|
||||||
|
}
|
||||||
|
// output per host and metric
|
||||||
|
hostdata[metric] = append(hostdata[metric], &schema.JobMetric{
|
||||||
|
Unit: metricConfig.Unit,
|
||||||
|
Scope: scope,
|
||||||
|
Timestep: metricConfig.Timestep,
|
||||||
|
Series: []schema.Series{pdb.RowToSeries(from, step, steps, row)},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t1 := time.Since(t0)
|
||||||
|
log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1)
|
||||||
|
return data, nil
|
||||||
|
}
|
@ -5,12 +5,15 @@
|
|||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/mattn/go-sqlite3"
|
||||||
|
"github.com/qustavo/sqlhooks/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -19,7 +22,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type DBConnection struct {
|
type DBConnection struct {
|
||||||
DB *sqlx.DB
|
DB *sqlx.DB
|
||||||
|
Driver string
|
||||||
}
|
}
|
||||||
|
|
||||||
func Connect(driver string, db string) {
|
func Connect(driver string, db string) {
|
||||||
@ -28,7 +32,9 @@ func Connect(driver string, db string) {
|
|||||||
|
|
||||||
dbConnOnce.Do(func() {
|
dbConnOnce.Do(func() {
|
||||||
if driver == "sqlite3" {
|
if driver == "sqlite3" {
|
||||||
dbHandle, err = sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", db))
|
sql.Register("sqlite3WithHooks", sqlhooks.Wrap(&sqlite3.SQLiteDriver{}, &Hooks{}))
|
||||||
|
dbHandle, err = sqlx.Open("sqlite3WithHooks", fmt.Sprintf("%s?_foreign_keys=on", db))
|
||||||
|
// dbHandle, err = sqlx.Open("sqlite3", fmt.Sprintf("%s?_foreign_keys=on", db))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -39,7 +45,7 @@ func Connect(driver string, db string) {
|
|||||||
} else if driver == "mysql" {
|
} else if driver == "mysql" {
|
||||||
dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db))
|
dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("sqlx.Open() error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbHandle.SetConnMaxLifetime(time.Minute * 3)
|
dbHandle.SetConnMaxLifetime(time.Minute * 3)
|
||||||
@ -49,7 +55,8 @@ func Connect(driver string, db string) {
|
|||||||
log.Fatalf("unsupported database driver: %s", driver)
|
log.Fatalf("unsupported database driver: %s", driver)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbConnInstance = &DBConnection{DB: dbHandle}
|
dbConnInstance = &DBConnection{DB: dbHandle, Driver: driver}
|
||||||
|
checkDBVersion(driver, dbHandle.DB)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
internal/repository/hooks.go
Normal file
28
internal/repository/hooks.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hooks satisfies the sqlhook.Hooks interface
|
||||||
|
type Hooks struct{}
|
||||||
|
|
||||||
|
// Before hook will print the query with it's args and return the context with the timestamp
|
||||||
|
func (h *Hooks) Before(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
|
||||||
|
log.Infof("SQL query %s %q", query, args)
|
||||||
|
return context.WithValue(ctx, "begin", time.Now()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// After hook will get the timestamp registered on the Before hook and print the elapsed time
|
||||||
|
func (h *Hooks) After(ctx context.Context, query string, args ...interface{}) (context.Context, error) {
|
||||||
|
begin := ctx.Value("begin").(time.Time)
|
||||||
|
log.Infof("Took: %s\n", time.Since(begin))
|
||||||
|
return ctx, nil
|
||||||
|
}
|
@ -20,67 +20,6 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/pkg/units"
|
"github.com/ClusterCockpit/cc-backend/pkg/units"
|
||||||
)
|
)
|
||||||
|
|
||||||
// `AUTO_INCREMENT` is in a comment because of this hack:
|
|
||||||
// https://stackoverflow.com/a/41028314 (sqlite creates unique ids automatically)
|
|
||||||
const JobsDBSchema string = `
|
|
||||||
DROP TABLE IF EXISTS jobtag;
|
|
||||||
DROP TABLE IF EXISTS job;
|
|
||||||
DROP TABLE IF EXISTS tag;
|
|
||||||
|
|
||||||
CREATE TABLE job (
|
|
||||||
id INTEGER PRIMARY KEY /*!40101 AUTO_INCREMENT */,
|
|
||||||
job_id BIGINT NOT NULL,
|
|
||||||
cluster VARCHAR(255) NOT NULL,
|
|
||||||
subcluster VARCHAR(255) NOT NULL,
|
|
||||||
start_time BIGINT NOT NULL, -- Unix timestamp
|
|
||||||
|
|
||||||
user VARCHAR(255) NOT NULL,
|
|
||||||
project VARCHAR(255) NOT NULL,
|
|
||||||
` + "`partition`" + ` VARCHAR(255) NOT NULL, -- partition is a keyword in mysql -.-
|
|
||||||
array_job_id BIGINT NOT NULL,
|
|
||||||
duration INT NOT NULL DEFAULT 0,
|
|
||||||
walltime INT NOT NULL DEFAULT 0,
|
|
||||||
job_state VARCHAR(255) NOT NULL CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled', 'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
|
||||||
meta_data TEXT, -- JSON
|
|
||||||
resources TEXT NOT NULL, -- JSON
|
|
||||||
|
|
||||||
num_nodes INT NOT NULL,
|
|
||||||
num_hwthreads INT NOT NULL,
|
|
||||||
num_acc INT NOT NULL,
|
|
||||||
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
|
||||||
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
|
||||||
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
|
||||||
|
|
||||||
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
|
||||||
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
load_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
|
||||||
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
|
||||||
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
|
||||||
|
|
||||||
CREATE TABLE tag (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
tag_type VARCHAR(255) NOT NULL,
|
|
||||||
tag_name VARCHAR(255) NOT NULL,
|
|
||||||
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
|
||||||
|
|
||||||
CREATE TABLE jobtag (
|
|
||||||
job_id INTEGER,
|
|
||||||
tag_id INTEGER,
|
|
||||||
PRIMARY KEY (job_id, tag_id),
|
|
||||||
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
|
||||||
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
|
||||||
`
|
|
||||||
|
|
||||||
// Indexes are created after the job-archive is traversed for faster inserts.
|
|
||||||
const JobsDbIndexes string = `
|
|
||||||
CREATE INDEX job_by_user ON job (user);
|
|
||||||
CREATE INDEX job_by_starttime ON job (start_time);
|
|
||||||
CREATE INDEX job_by_job_id ON job (job_id);
|
|
||||||
CREATE INDEX job_by_state ON job (job_state);
|
|
||||||
`
|
|
||||||
const NamedJobInsert string = `INSERT INTO job (
|
const NamedJobInsert string = `INSERT INTO job (
|
||||||
job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc,
|
job_id, user, project, cluster, subcluster, ` + "`partition`" + `, array_job_id, num_nodes, num_hwthreads, num_acc,
|
||||||
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, resources, meta_data,
|
exclusive, monitoring_status, smt, job_state, start_time, duration, walltime, resources, meta_data,
|
||||||
@ -96,40 +35,44 @@ func HandleImportFlag(flag string) error {
|
|||||||
for _, pair := range strings.Split(flag, ",") {
|
for _, pair := range strings.Split(flag, ",") {
|
||||||
files := strings.Split(pair, ":")
|
files := strings.Split(pair, ":")
|
||||||
if len(files) != 2 {
|
if len(files) != 2 {
|
||||||
return fmt.Errorf("invalid import flag format")
|
return fmt.Errorf("REPOSITORY/INIT > invalid import flag format")
|
||||||
}
|
}
|
||||||
|
|
||||||
raw, err := os.ReadFile(files[0])
|
raw, err := os.ReadFile(files[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while reading metadata file for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if config.Keys.Validate {
|
if config.Keys.Validate {
|
||||||
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
|
if err := schema.Validate(schema.Meta, bytes.NewReader(raw)); err != nil {
|
||||||
return fmt.Errorf("validate job meta: %v", err)
|
return fmt.Errorf("REPOSITORY/INIT > validate job meta: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// }
|
|
||||||
dec := json.NewDecoder(bytes.NewReader(raw))
|
dec := json.NewDecoder(bytes.NewReader(raw))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
|
jobMeta := schema.JobMeta{BaseJob: schema.JobDefaults}
|
||||||
if err := dec.Decode(&jobMeta); err != nil {
|
if err := dec.Decode(&jobMeta); err != nil {
|
||||||
|
log.Warn("Error while decoding raw json metadata for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
raw, err = os.ReadFile(files[1])
|
raw, err = os.ReadFile(files[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while reading jobdata file for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Keys.Validate {
|
if config.Keys.Validate {
|
||||||
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
|
if err := schema.Validate(schema.Data, bytes.NewReader(raw)); err != nil {
|
||||||
return fmt.Errorf("validate job data: %v", err)
|
return fmt.Errorf("REPOSITORY/INIT > validate job data: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dec = json.NewDecoder(bytes.NewReader(raw))
|
dec = json.NewDecoder(bytes.NewReader(raw))
|
||||||
dec.DisallowUnknownFields()
|
dec.DisallowUnknownFields()
|
||||||
jobData := schema.JobData{}
|
jobData := schema.JobData{}
|
||||||
if err := dec.Decode(&jobData); err != nil {
|
if err := dec.Decode(&jobData); err != nil {
|
||||||
|
log.Warn("Error while decoding raw json jobdata for import")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,10 +81,11 @@ func HandleImportFlag(flag string) error {
|
|||||||
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
|
jobMeta.MonitoringStatus = schema.MonitoringStatusArchivingSuccessful
|
||||||
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
|
if job, err := GetJobRepository().Find(&jobMeta.JobID, &jobMeta.Cluster, &jobMeta.StartTime); err != sql.ErrNoRows {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while finding job in jobRepository")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
|
return fmt.Errorf("REPOSITORY/INIT > a job with that jobId, cluster and startTime does already exist (dbid: %d)", job.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
job := schema.Job{
|
job := schema.Job{
|
||||||
@ -157,38 +101,45 @@ func HandleImportFlag(flag string) error {
|
|||||||
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
|
job.FileBwAvg = loadJobStat(&jobMeta, "file_bw")
|
||||||
job.RawResources, err = json.Marshal(job.Resources)
|
job.RawResources, err = json.Marshal(job.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while marshaling job resources")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while marshaling job metadata")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SanityChecks(&job.BaseJob); err != nil {
|
if err := SanityChecks(&job.BaseJob); err != nil {
|
||||||
|
log.Warn("BaseJob SanityChecks failed")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
|
if err := archive.GetHandle().ImportJob(&jobMeta, &jobData); err != nil {
|
||||||
|
log.Error("Error while importing job")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
|
res, err := GetConnection().DB.NamedExec(NamedJobInsert, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while NamedJobInsert")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := res.LastInsertId()
|
id, err := res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while getting last insert ID")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tag := range job.Tags {
|
for _, tag := range job.Tags {
|
||||||
if _, err := GetJobRepository().AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
if _, err := GetJobRepository().AddTagOrCreate(id, tag.Type, tag.Name); err != nil {
|
||||||
|
log.Error("Error while adding or creating tag")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -200,21 +151,17 @@ func InitDB() error {
|
|||||||
starttime := time.Now()
|
starttime := time.Now()
|
||||||
log.Print("Building job table...")
|
log.Print("Building job table...")
|
||||||
|
|
||||||
// Basic database structure:
|
|
||||||
_, err := db.DB.Exec(JobsDBSchema)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserts are bundled into transactions because in sqlite,
|
// Inserts are bundled into transactions because in sqlite,
|
||||||
// that speeds up inserts A LOT.
|
// that speeds up inserts A LOT.
|
||||||
tx, err := db.DB.Beginx()
|
tx, err := db.DB.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while bundling transactions")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
stmt, err := tx.PrepareNamed(NamedJobInsert)
|
stmt, err := tx.PrepareNamed(NamedJobInsert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while preparing namedJobInsert")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tags := make(map[string]int64)
|
tags := make(map[string]int64)
|
||||||
@ -236,12 +183,14 @@ func InitDB() error {
|
|||||||
if i%10 == 0 {
|
if i%10 == 0 {
|
||||||
if tx != nil {
|
if tx != nil {
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
|
log.Warn("Error while committing transactions for jobMeta")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, err = db.DB.Beginx()
|
tx, err = db.DB.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while bundling transactions for jobMeta")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -264,34 +213,34 @@ func InitDB() error {
|
|||||||
|
|
||||||
job.RawResources, err = json.Marshal(job.Resources)
|
job.RawResources, err = json.Marshal(job.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB()- %v", err)
|
log.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB()- %v", err)
|
log.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SanityChecks(&job.BaseJob); err != nil {
|
if err := SanityChecks(&job.BaseJob); err != nil {
|
||||||
log.Errorf("repository initDB()- %v", err)
|
log.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := stmt.Exec(job)
|
res, err := stmt.Exec(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB()- %v", err)
|
log.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := res.LastInsertId()
|
id, err := res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("repository initDB()- %v", err)
|
log.Errorf("repository initDB(): %v", err)
|
||||||
errorOccured++
|
errorOccured++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -302,16 +251,19 @@ func InitDB() error {
|
|||||||
if !ok {
|
if !ok {
|
||||||
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
|
res, err := tx.Exec(`INSERT INTO tag (tag_name, tag_type) VALUES (?, ?)`, tag.Name, tag.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Errorf("Error while inserting tag into tag table: %v (Type %v)", tag.Name, tag.Type)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tagId, err = res.LastInsertId()
|
tagId, err = res.LastInsertId()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while getting last insert ID")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tags[tagstr] = tagId
|
tags[tagstr] = tagId
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
|
if _, err := tx.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES (?, ?)`, id, tagId); err != nil {
|
||||||
|
log.Errorf("Error while inserting jobtag into jobtag table: %v (TagID %v)", id, tagId)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -322,16 +274,11 @@ func InitDB() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if errorOccured > 0 {
|
if errorOccured > 0 {
|
||||||
log.Errorf("Error in import of %d jobs!", errorOccured)
|
log.Warnf("Error in import of %d jobs!", errorOccured)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return err
|
log.Warn("Error while committing SQL transactions")
|
||||||
}
|
|
||||||
|
|
||||||
// Create indexes after inserts so that they do not
|
|
||||||
// need to be continually updated.
|
|
||||||
if _, err := db.DB.Exec(JobsDbIndexes); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,13 +289,14 @@ func InitDB() error {
|
|||||||
// This function also sets the subcluster if necessary!
|
// This function also sets the subcluster if necessary!
|
||||||
func SanityChecks(job *schema.BaseJob) error {
|
func SanityChecks(job *schema.BaseJob) error {
|
||||||
if c := archive.GetCluster(job.Cluster); c == nil {
|
if c := archive.GetCluster(job.Cluster); c == nil {
|
||||||
return fmt.Errorf("no such cluster: %#v", job.Cluster)
|
return fmt.Errorf("no such cluster: %v", job.Cluster)
|
||||||
}
|
}
|
||||||
if err := archive.AssignSubCluster(job); err != nil {
|
if err := archive.AssignSubCluster(job); err != nil {
|
||||||
|
log.Warn("Error while assigning subcluster to job")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !job.State.Valid() {
|
if !job.State.Valid() {
|
||||||
return fmt.Errorf("not a valid job state: %#v", job.State)
|
return fmt.Errorf("not a valid job state: %v", job.State)
|
||||||
}
|
}
|
||||||
if len(job.Resources) == 0 || len(job.User) == 0 {
|
if len(job.Resources) == 0 || len(job.User) == 0 {
|
||||||
return fmt.Errorf("'resources' and 'user' should not be empty")
|
return fmt.Errorf("'resources' and 'user' should not be empty")
|
||||||
|
@ -14,8 +14,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/99designs/gqlgen/graphql"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
@ -29,10 +33,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type JobRepository struct {
|
type JobRepository struct {
|
||||||
DB *sqlx.DB
|
DB *sqlx.DB
|
||||||
|
driver string
|
||||||
|
|
||||||
stmtCache *sq.StmtCache
|
stmtCache *sq.StmtCache
|
||||||
cache *lrucache.Cache
|
cache *lrucache.Cache
|
||||||
|
|
||||||
|
archiveChannel chan *schema.Job
|
||||||
|
archivePending sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetJobRepository() *JobRepository {
|
func GetJobRepository() *JobRepository {
|
||||||
@ -40,10 +48,15 @@ func GetJobRepository() *JobRepository {
|
|||||||
db := GetConnection()
|
db := GetConnection()
|
||||||
|
|
||||||
jobRepoInstance = &JobRepository{
|
jobRepoInstance = &JobRepository{
|
||||||
DB: db.DB,
|
DB: db.DB,
|
||||||
stmtCache: sq.NewStmtCache(db.DB),
|
driver: db.Driver,
|
||||||
cache: lrucache.New(1024 * 1024),
|
|
||||||
|
stmtCache: sq.NewStmtCache(db.DB),
|
||||||
|
cache: lrucache.New(1024 * 1024),
|
||||||
|
archiveChannel: make(chan *schema.Job, 128),
|
||||||
}
|
}
|
||||||
|
// start archiving worker
|
||||||
|
go jobRepoInstance.archivingWorker()
|
||||||
})
|
})
|
||||||
|
|
||||||
return jobRepoInstance
|
return jobRepoInstance
|
||||||
@ -60,14 +73,20 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
if err := row.Scan(
|
if err := row.Scan(
|
||||||
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
|
&job.ID, &job.JobID, &job.User, &job.Project, &job.Cluster, &job.SubCluster, &job.StartTimeUnix, &job.Partition, &job.ArrayJobId,
|
||||||
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
&job.NumNodes, &job.NumHWThreads, &job.NumAcc, &job.Exclusive, &job.MonitoringStatus, &job.SMT, &job.State,
|
||||||
&job.Duration, &job.Walltime, &job.RawResources /*&job.MetaData*/); err != nil {
|
&job.Duration, &job.Walltime, &job.RawResources /*&job.RawMetaData*/); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
|
if err := json.Unmarshal(job.RawResources, &job.Resources); err != nil {
|
||||||
|
log.Warn("Error while unmarhsaling raw resources json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
|
||||||
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
job.StartTime = time.Unix(job.StartTimeUnix, 0)
|
||||||
if job.Duration == 0 && job.State == schema.JobStateRunning {
|
if job.Duration == 0 && job.State == schema.JobStateRunning {
|
||||||
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
job.Duration = int32(time.Since(job.StartTime).Seconds())
|
||||||
@ -77,11 +96,14 @@ func scanJob(row interface{ Scan(...interface{}) error }) (*schema.Job, error) {
|
|||||||
return job, nil
|
return job, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
func (r *JobRepository) FetchJobName(job *schema.Job) (*string, error) {
|
||||||
|
start := time.Now()
|
||||||
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
||||||
job.MetaData = cached.(map[string]string)
|
job.MetaData = cached.(map[string]string)
|
||||||
return job.MetaData, nil
|
if jobName := job.MetaData["jobName"]; jobName != "" {
|
||||||
|
return &jobName, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
||||||
@ -98,6 +120,40 @@ func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||||
|
log.Infof("Timer FetchJobName %s", time.Since(start))
|
||||||
|
|
||||||
|
if jobName := job.MetaData["jobName"]; jobName != "" {
|
||||||
|
return &jobName, nil
|
||||||
|
} else {
|
||||||
|
return new(string), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) FetchMetadata(job *schema.Job) (map[string]string, error) {
|
||||||
|
start := time.Now()
|
||||||
|
cachekey := fmt.Sprintf("metadata:%d", job.ID)
|
||||||
|
if cached := r.cache.Get(cachekey, nil); cached != nil {
|
||||||
|
job.MetaData = cached.(map[string]string)
|
||||||
|
return job.MetaData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sq.Select("job.meta_data").From("job").Where("job.id = ?", job.ID).
|
||||||
|
RunWith(r.stmtCache).QueryRow().Scan(&job.RawMetaData); err != nil {
|
||||||
|
log.Warn("Error while scanning for job metadata")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(job.RawMetaData) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(job.RawMetaData, &job.MetaData); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw metadata json")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.cache.Put(cachekey, job.MetaData, len(job.RawMetaData), 24*time.Hour)
|
||||||
|
log.Infof("Timer FetchMetadata %s", time.Since(start))
|
||||||
return job.MetaData, nil
|
return job.MetaData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,6 +162,7 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
|||||||
r.cache.Del(cachekey)
|
r.cache.Del(cachekey)
|
||||||
if job.MetaData == nil {
|
if job.MetaData == nil {
|
||||||
if _, err = r.FetchMetadata(job); err != nil {
|
if _, err = r.FetchMetadata(job); err != nil {
|
||||||
|
log.Warnf("Error while fetching metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -122,10 +179,12 @@ func (r *JobRepository) UpdateMetadata(job *schema.Job, key, val string) (err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
|
if job.RawMetaData, err = json.Marshal(job.MetaData); err != nil {
|
||||||
|
log.Warnf("Error while marshaling metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err = sq.Update("job").Set("meta_data", job.RawMetaData).Where("job.id = ?", job.ID).RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
|
log.Warnf("Error while updating metadata for job, DB ID '%v'", job.ID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,6 +202,7 @@ func (r *JobRepository) Find(
|
|||||||
cluster *string,
|
cluster *string,
|
||||||
startTime *int64) (*schema.Job, error) {
|
startTime *int64) (*schema.Job, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
q := sq.Select(jobColumns...).From("job").
|
q := sq.Select(jobColumns...).From("job").
|
||||||
Where("job.job_id = ?", *jobId)
|
Where("job.job_id = ?", *jobId)
|
||||||
|
|
||||||
@ -153,9 +213,50 @@ func (r *JobRepository) Find(
|
|||||||
q = q.Where("job.start_time = ?", *startTime)
|
q = q.Where("job.start_time = ?", *startTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Timer Find %s", time.Since(start))
|
||||||
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
return scanJob(q.RunWith(r.stmtCache).QueryRow())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find executes a SQL query to find a specific batch job.
|
||||||
|
// The job is queried using the batch job id, the cluster name,
|
||||||
|
// and the start time of the job in UNIX epoch time seconds.
|
||||||
|
// It returns a pointer to a schema.Job data structure and an error variable.
|
||||||
|
// To check if no job was found test err == sql.ErrNoRows
|
||||||
|
func (r *JobRepository) FindAll(
|
||||||
|
jobId *int64,
|
||||||
|
cluster *string,
|
||||||
|
startTime *int64) ([]*schema.Job, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
q := sq.Select(jobColumns...).From("job").
|
||||||
|
Where("job.job_id = ?", *jobId)
|
||||||
|
|
||||||
|
if cluster != nil {
|
||||||
|
q = q.Where("job.cluster = ?", *cluster)
|
||||||
|
}
|
||||||
|
if startTime != nil {
|
||||||
|
q = q.Where("job.start_time = ?", *startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jobs := make([]*schema.Job, 0, 10)
|
||||||
|
for rows.Next() {
|
||||||
|
job, err := scanJob(rows)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
jobs = append(jobs, job)
|
||||||
|
}
|
||||||
|
log.Infof("Timer FindAll %s", time.Since(start))
|
||||||
|
return jobs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// FindById executes a SQL query to find a specific batch job.
|
// FindById executes a SQL query to find a specific batch job.
|
||||||
// The job is queried using the database id.
|
// The job is queried using the database id.
|
||||||
// It returns a pointer to a schema.Job data structure and an error variable.
|
// It returns a pointer to a schema.Job data structure and an error variable.
|
||||||
@ -171,12 +272,12 @@ func (r *JobRepository) FindById(jobId int64) (*schema.Job, error) {
|
|||||||
func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
|
func (r *JobRepository) Start(job *schema.JobMeta) (id int64, err error) {
|
||||||
job.RawResources, err = json.Marshal(job.Resources)
|
job.RawResources, err = json.Marshal(job.Resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, fmt.Errorf("encoding resources field failed: %w", err)
|
return -1, fmt.Errorf("REPOSITORY/JOB > encoding resources field failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
job.RawMetaData, err = json.Marshal(job.MetaData)
|
job.RawMetaData, err = json.Marshal(job.MetaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, fmt.Errorf("encoding metaData field failed: %w", err)
|
return -1, fmt.Errorf("REPOSITORY/JOB > encoding metaData field failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := r.DB.NamedExec(`INSERT INTO job (
|
res, err := r.DB.NamedExec(`INSERT INTO job (
|
||||||
@ -210,6 +311,29 @@ func (r *JobRepository) Stop(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) {
|
||||||
|
var cnt int
|
||||||
|
qs := fmt.Sprintf("SELECT count(*) FROM job WHERE job.start_time < %d", startTime)
|
||||||
|
err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement
|
||||||
|
_, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf(" DeleteJobsBefore(%d): error %#v", startTime, err)
|
||||||
|
} else {
|
||||||
|
log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt)
|
||||||
|
}
|
||||||
|
return cnt, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) DeleteJobById(id int64) error {
|
||||||
|
_, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("DeleteJobById(%d): error %#v", id, err)
|
||||||
|
} else {
|
||||||
|
log.Infof("DeleteJobById(%d): Success", id)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Use node hours instead: SELECT job.user, sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN CAST(strftime('%s', 'now') AS INTEGER) - job.start_time ELSE job.duration END)) as x FROM job GROUP BY user ORDER BY x DESC;
|
// TODO: Use node hours instead: SELECT job.user, sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN CAST(strftime('%s', 'now') AS INTEGER) - job.start_time ELSE job.duration END)) as x FROM job GROUP BY user ORDER BY x DESC;
|
||||||
func (r *JobRepository) CountGroupedJobs(
|
func (r *JobRepository) CountGroupedJobs(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@ -218,6 +342,7 @@ func (r *JobRepository) CountGroupedJobs(
|
|||||||
weight *model.Weights,
|
weight *model.Weights,
|
||||||
limit *int) (map[string]int, error) {
|
limit *int) (map[string]int, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
if !aggreg.IsValid() {
|
if !aggreg.IsValid() {
|
||||||
return nil, errors.New("invalid aggregate")
|
return nil, errors.New("invalid aggregate")
|
||||||
}
|
}
|
||||||
@ -232,11 +357,17 @@ func (r *JobRepository) CountGroupedJobs(
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
|
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
|
||||||
runner = r.DB
|
runner = r.DB
|
||||||
|
default:
|
||||||
|
log.Infof("CountGroupedJobs() Weight %v unknown.", *weight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
q := sq.Select("job."+string(aggreg), count).From("job").GroupBy("job." + string(aggreg)).OrderBy("count DESC")
|
q, qerr := SecurityCheck(ctx, sq.Select("job."+string(aggreg), count).From("job").GroupBy("job."+string(aggreg)).OrderBy("count DESC"))
|
||||||
q = SecurityCheck(ctx, q)
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range filters {
|
for _, f := range filters {
|
||||||
q = BuildWhereClause(f, q)
|
q = BuildWhereClause(f, q)
|
||||||
}
|
}
|
||||||
@ -247,6 +378,7 @@ func (r *JobRepository) CountGroupedJobs(
|
|||||||
counts := map[string]int{}
|
counts := map[string]int{}
|
||||||
rows, err := q.RunWith(runner).Query()
|
rows, err := q.RunWith(runner).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,12 +386,14 @@ func (r *JobRepository) CountGroupedJobs(
|
|||||||
var group string
|
var group string
|
||||||
var count int
|
var count int
|
||||||
if err := rows.Scan(&group, &count); err != nil {
|
if err := rows.Scan(&group, &count); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
counts[group] = count
|
counts[group] = count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Timer CountGroupedJobs %s", time.Since(start))
|
||||||
return counts, nil
|
return counts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,7 +407,7 @@ func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop updates the job with the database id jobId using the provided arguments.
|
// Stop updates the job with the database id jobId using the provided arguments.
|
||||||
func (r *JobRepository) Archive(
|
func (r *JobRepository) MarkArchived(
|
||||||
jobId int64,
|
jobId int64,
|
||||||
monitoringStatus int32,
|
monitoringStatus int32,
|
||||||
metricStats map[string]schema.JobStatistics) error {
|
metricStats map[string]schema.JobStatistics) error {
|
||||||
@ -296,56 +430,165 @@ func (r *JobRepository) Archive(
|
|||||||
stmt = stmt.Set("net_bw_avg", stats.Avg)
|
stmt = stmt.Set("net_bw_avg", stats.Avg)
|
||||||
case "file_bw":
|
case "file_bw":
|
||||||
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
stmt = stmt.Set("file_bw_avg", stats.Avg)
|
||||||
|
default:
|
||||||
|
log.Infof("MarkArchived() Metric '%v' unknown", metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil {
|
if _, err := stmt.RunWith(r.stmtCache).Exec(); err != nil {
|
||||||
|
log.Warn("Error while marking job as archived")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrNotFound = errors.New("no such job or user")
|
// Archiving worker thread
|
||||||
|
func (r *JobRepository) archivingWorker() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case job, ok := <-r.archiveChannel:
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// not using meta data, called to load JobMeta into Cache?
|
||||||
|
// will fail if job meta not in repository
|
||||||
|
if _, err := r.FetchMetadata(job); err != nil {
|
||||||
|
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||||
|
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// FindJobOrUser returns a job database ID or a username if a job or user
|
// metricdata.ArchiveJob will fetch all the data from a MetricDataRepository and push into configured archive backend
|
||||||
// machtes the search term. As 0 is a valid job id, check if username is ""
|
// TODO: Maybe use context with cancel/timeout here
|
||||||
// instead in order to check what matched. If nothing matches the search,
|
jobMeta, err := metricdata.ArchiveJob(job, context.Background())
|
||||||
// `ErrNotFound` is returned.
|
if err != nil {
|
||||||
func (r *JobRepository) FindJobOrUser(
|
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||||
ctx context.Context,
|
r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed)
|
||||||
searchterm string) (job int64, username string, err error) {
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
user := auth.GetUser(ctx)
|
// Update the jobs database entry one last time:
|
||||||
if id, err := strconv.Atoi(searchterm); err == nil {
|
if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil {
|
||||||
qb := sq.Select("job.id").From("job").Where("job.job_id = ?", id)
|
log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error())
|
||||||
if user != nil && !user.HasRole(auth.RoleAdmin) && !user.HasRole(auth.RoleSupport) {
|
continue
|
||||||
qb = qb.Where("job.user = ?", user.Username)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err := qb.RunWith(r.stmtCache).QueryRow().Scan(&job)
|
log.Printf("archiving job (dbid: %d) successful", job.ID)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
r.archivePending.Done()
|
||||||
return 0, "", err
|
|
||||||
} else if err == nil {
|
|
||||||
return job, "", nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if user == nil || user.HasRole(auth.RoleAdmin) || user.HasRole(auth.RoleSupport) {
|
// Trigger async archiving
|
||||||
err := sq.Select("job.user").Distinct().From("job").
|
func (r *JobRepository) TriggerArchiving(job *schema.Job) {
|
||||||
Where("job.user = ?", searchterm).
|
r.archivePending.Add(1)
|
||||||
RunWith(r.stmtCache).QueryRow().Scan(&username)
|
r.archiveChannel <- job
|
||||||
if err != nil && err != sql.ErrNoRows {
|
}
|
||||||
return 0, "", err
|
|
||||||
} else if err == nil {
|
// Wait for background thread to finish pending archiving operations
|
||||||
return 0, username, nil
|
func (r *JobRepository) WaitForArchiving() {
|
||||||
|
// close channel and wait for worker to process remaining jobs
|
||||||
|
r.archivePending.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrNotFound = errors.New("no such jobname, project or user")
|
||||||
|
var ErrForbidden = errors.New("not authorized")
|
||||||
|
|
||||||
|
// FindJobnameOrUserOrProject returns a jobName or a username or a projectId if a jobName or user or project matches the search term.
|
||||||
|
// If query is found to be an integer (= conversion to INT datatype succeeds), skip back to parent call
|
||||||
|
// If nothing matches the search, `ErrNotFound` is returned.
|
||||||
|
|
||||||
|
func (r *JobRepository) FindUserOrProjectOrJobname(ctx context.Context, searchterm string) (username string, project string, metasnip string, err error) {
|
||||||
|
if _, err := strconv.Atoi(searchterm); err == nil { // Return empty on successful conversion: parent method will redirect for integer jobId
|
||||||
|
return "", "", "", nil
|
||||||
|
} else { // Has to have letters and logged-in user for other guesses
|
||||||
|
user := auth.GetUser(ctx)
|
||||||
|
if user != nil {
|
||||||
|
// Find username in jobs (match)
|
||||||
|
uresult, _ := r.FindColumnValue(user, searchterm, "job", "user", "user", false)
|
||||||
|
if uresult != "" {
|
||||||
|
return uresult, "", "", nil
|
||||||
|
}
|
||||||
|
// Find username by name (like)
|
||||||
|
nresult, _ := r.FindColumnValue(user, searchterm, "user", "username", "name", true)
|
||||||
|
if nresult != "" {
|
||||||
|
return nresult, "", "", nil
|
||||||
|
}
|
||||||
|
// Find projectId in jobs (match)
|
||||||
|
presult, _ := r.FindColumnValue(user, searchterm, "job", "project", "project", false)
|
||||||
|
if presult != "" {
|
||||||
|
return "", presult, "", nil
|
||||||
|
}
|
||||||
|
// Still no return (or not authorized for above): Try JobName
|
||||||
|
// Match Metadata, on hit, parent method redirects to jobName GQL query
|
||||||
|
err := sq.Select("job.cluster").Distinct().From("job").
|
||||||
|
Where("job.meta_data LIKE ?", "%"+searchterm+"%").
|
||||||
|
RunWith(r.stmtCache).QueryRow().Scan(&metasnip)
|
||||||
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
return "", "", "", err
|
||||||
|
} else if err == nil {
|
||||||
|
return "", "", metasnip[0:1], nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return "", "", "", ErrNotFound
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0, "", ErrNotFound
|
func (r *JobRepository) FindColumnValue(user *auth.User, searchterm string, table string, selectColumn string, whereColumn string, isLike bool) (result string, err error) {
|
||||||
|
compareStr := " = ?"
|
||||||
|
query := searchterm
|
||||||
|
if isLike == true {
|
||||||
|
compareStr = " LIKE ?"
|
||||||
|
query = "%" + searchterm + "%"
|
||||||
|
}
|
||||||
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
|
err := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
||||||
|
Where(table+"."+whereColumn+compareStr, query).
|
||||||
|
RunWith(r.stmtCache).QueryRow().Scan(&result)
|
||||||
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
return "", err
|
||||||
|
} else if err == nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
return "", ErrNotFound
|
||||||
|
} else {
|
||||||
|
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||||
|
return "", ErrForbidden
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *JobRepository) FindColumnValues(user *auth.User, query string, table string, selectColumn string, whereColumn string) (results []string, err error) {
|
||||||
|
emptyResult := make([]string, 0)
|
||||||
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
|
rows, err := sq.Select(table+"."+selectColumn).Distinct().From(table).
|
||||||
|
Where(table+"."+whereColumn+" LIKE ?", fmt.Sprint("%", query, "%")).
|
||||||
|
RunWith(r.stmtCache).Query()
|
||||||
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
return emptyResult, err
|
||||||
|
} else if err == nil {
|
||||||
|
for rows.Next() {
|
||||||
|
var result string
|
||||||
|
err := rows.Scan(&result)
|
||||||
|
if err != nil {
|
||||||
|
rows.Close()
|
||||||
|
log.Warnf("Error while scanning rows: %v", err)
|
||||||
|
return emptyResult, err
|
||||||
|
}
|
||||||
|
results = append(results, result)
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
return emptyResult, ErrNotFound
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Infof("Non-Admin User %s : Requested Query '%s' on table '%s' : Forbidden", user.Name, query, table)
|
||||||
|
return emptyResult, ErrForbidden
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
||||||
var err error
|
var err error
|
||||||
|
start := time.Now()
|
||||||
partitions := r.cache.Get("partitions:"+cluster, func() (interface{}, time.Duration, int) {
|
partitions := r.cache.Get("partitions:"+cluster, func() (interface{}, time.Duration, int) {
|
||||||
parts := []string{}
|
parts := []string{}
|
||||||
if err = r.DB.Select(&parts, `SELECT DISTINCT job.partition FROM job WHERE job.cluster = ?;`, cluster); err != nil {
|
if err = r.DB.Select(&parts, `SELECT DISTINCT job.partition FROM job WHERE job.cluster = ?;`, cluster); err != nil {
|
||||||
@ -357,6 +600,7 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
log.Infof("Timer Partitions %s", time.Since(start))
|
||||||
return partitions.([]string), nil
|
return partitions.([]string), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,12 +608,14 @@ func (r *JobRepository) Partitions(cluster string) ([]string, error) {
|
|||||||
// Hosts with zero jobs running on them will not show up!
|
// Hosts with zero jobs running on them will not show up!
|
||||||
func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]int, error) {
|
func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]int, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
subclusters := make(map[string]map[string]int)
|
subclusters := make(map[string]map[string]int)
|
||||||
rows, err := sq.Select("resources", "subcluster").From("job").
|
rows, err := sq.Select("resources", "subcluster").From("job").
|
||||||
Where("job.job_state = 'running'").
|
Where("job.job_state = 'running'").
|
||||||
Where("job.cluster = ?", cluster).
|
Where("job.cluster = ?", cluster).
|
||||||
RunWith(r.stmtCache).Query()
|
RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,9 +626,11 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
var resources []*schema.Resource
|
var resources []*schema.Resource
|
||||||
var subcluster string
|
var subcluster string
|
||||||
if err := rows.Scan(&raw, &subcluster); err != nil {
|
if err := rows.Scan(&raw, &subcluster); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(raw, &resources); err != nil {
|
if err := json.Unmarshal(raw, &resources); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw resources json")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,11 +645,13 @@ func (r *JobRepository) AllocatedNodes(cluster string) (map[string]map[string]in
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("Timer AllocatedNodes %s", time.Since(start))
|
||||||
return subclusters, nil
|
return subclusters, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
res, err := sq.Update("job").
|
res, err := sq.Update("job").
|
||||||
Set("monitoring_status", schema.MonitoringStatusArchivingFailed).
|
Set("monitoring_status", schema.MonitoringStatusArchivingFailed).
|
||||||
Set("duration", 0).
|
Set("duration", 0).
|
||||||
@ -411,16 +661,255 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error {
|
|||||||
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
|
Where(fmt.Sprintf("(%d - job.start_time) > (job.walltime + %d)", time.Now().Unix(), seconds)).
|
||||||
RunWith(r.DB).Exec()
|
RunWith(r.DB).Exec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while stopping jobs exceeding walltime")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rowsAffected, err := res.RowsAffected()
|
rowsAffected, err := res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while fetching affected rows after stopping due to exceeded walltime")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rowsAffected > 0 {
|
if rowsAffected > 0 {
|
||||||
log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected)
|
log.Infof("%d jobs have been marked as failed due to running too long", rowsAffected)
|
||||||
}
|
}
|
||||||
|
log.Infof("Timer StopJobsExceedingWalltimeBy %s", time.Since(start))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GraphQL validation should make sure that no unkown values can be specified.
|
||||||
|
var groupBy2column = map[model.Aggregate]string{
|
||||||
|
model.AggregateUser: "job.user",
|
||||||
|
model.AggregateProject: "job.project",
|
||||||
|
model.AggregateCluster: "job.cluster",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for the jobsStatistics GraphQL query placed here so that schema.resolvers.go is not too full.
|
||||||
|
func (r *JobRepository) JobsStatistics(ctx context.Context,
|
||||||
|
filter []*model.JobFilter,
|
||||||
|
groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
// In case `groupBy` is nil (not used), the model.JobsStatistics used is at the key '' (empty string)
|
||||||
|
stats := map[string]*model.JobsStatistics{}
|
||||||
|
var castType string
|
||||||
|
|
||||||
|
if r.driver == "sqlite3" {
|
||||||
|
castType = "int"
|
||||||
|
} else if r.driver == "mysql" {
|
||||||
|
castType = "unsigned"
|
||||||
|
}
|
||||||
|
|
||||||
|
// `socketsPerNode` and `coresPerSocket` can differ from cluster to cluster, so we need to explicitly loop over those.
|
||||||
|
for _, cluster := range archive.Clusters {
|
||||||
|
for _, subcluster := range cluster.SubClusters {
|
||||||
|
corehoursCol := fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes * %d * %d) / 3600) as %s)", subcluster.SocketsPerNode, subcluster.CoresPerSocket, castType)
|
||||||
|
var rawQuery sq.SelectBuilder
|
||||||
|
if groupBy == nil {
|
||||||
|
rawQuery = sq.Select(
|
||||||
|
"''",
|
||||||
|
"COUNT(job.id)",
|
||||||
|
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
|
||||||
|
corehoursCol,
|
||||||
|
).From("job")
|
||||||
|
} else {
|
||||||
|
col := groupBy2column[*groupBy]
|
||||||
|
rawQuery = sq.Select(
|
||||||
|
col,
|
||||||
|
"COUNT(job.id)",
|
||||||
|
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
|
||||||
|
corehoursCol,
|
||||||
|
).From("job").GroupBy(col)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawQuery = rawQuery.
|
||||||
|
Where("job.cluster = ?", cluster.Name).
|
||||||
|
Where("job.subcluster = ?", subcluster.Name)
|
||||||
|
|
||||||
|
query, qerr := SecurityCheck(ctx, rawQuery)
|
||||||
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range filter {
|
||||||
|
query = BuildWhereClause(f, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while querying DB for job statistics")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var id sql.NullString
|
||||||
|
var jobs, walltime, corehours sql.NullInt64
|
||||||
|
if err := rows.Scan(&id, &jobs, &walltime, &corehours); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id.Valid {
|
||||||
|
if s, ok := stats[id.String]; ok {
|
||||||
|
s.TotalJobs += int(jobs.Int64)
|
||||||
|
s.TotalWalltime += int(walltime.Int64)
|
||||||
|
s.TotalCoreHours += int(corehours.Int64)
|
||||||
|
} else {
|
||||||
|
stats[id.String] = &model.JobsStatistics{
|
||||||
|
ID: id.String,
|
||||||
|
TotalJobs: int(jobs.Int64),
|
||||||
|
TotalWalltime: int(walltime.Int64),
|
||||||
|
TotalCoreHours: int(corehours.Int64),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if groupBy == nil {
|
||||||
|
|
||||||
|
query := sq.Select("COUNT(job.id)").From("job").Where("job.duration < ?", config.Keys.ShortRunningJobsDuration)
|
||||||
|
query, qerr := SecurityCheck(ctx, query)
|
||||||
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range filter {
|
||||||
|
query = BuildWhereClause(f, query)
|
||||||
|
}
|
||||||
|
if err := query.RunWith(r.DB).QueryRow().Scan(&(stats[""].ShortJobs)); err != nil {
|
||||||
|
log.Warn("Error while scanning rows for short job stats")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
col := groupBy2column[*groupBy]
|
||||||
|
|
||||||
|
query := sq.Select(col, "COUNT(job.id)").From("job").Where("job.duration < ?", config.Keys.ShortRunningJobsDuration)
|
||||||
|
|
||||||
|
query, qerr := SecurityCheck(ctx, query)
|
||||||
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range filter {
|
||||||
|
query = BuildWhereClause(f, query)
|
||||||
|
}
|
||||||
|
rows, err := query.RunWith(r.DB).Query()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while querying jobs for short jobs")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var id sql.NullString
|
||||||
|
var shortJobs sql.NullInt64
|
||||||
|
if err := rows.Scan(&id, &shortJobs); err != nil {
|
||||||
|
log.Warn("Error while scanning rows for short jobs")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id.Valid {
|
||||||
|
stats[id.String].ShortJobs = int(shortJobs.Int64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if col == "job.user" {
|
||||||
|
for id := range stats {
|
||||||
|
emptyDash := "-"
|
||||||
|
user := auth.GetUser(ctx)
|
||||||
|
name, _ := r.FindColumnValue(user, id, "user", "name", "username", false)
|
||||||
|
if name != "" {
|
||||||
|
stats[id].Name = &name
|
||||||
|
} else {
|
||||||
|
stats[id].Name = &emptyDash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculating the histogram data is expensive, so only do it if needed.
|
||||||
|
// An explicit resolver can not be used because we need to know the filters.
|
||||||
|
histogramsNeeded := false
|
||||||
|
fields := graphql.CollectFieldsCtx(ctx, nil)
|
||||||
|
for _, col := range fields {
|
||||||
|
if col.Name == "histDuration" || col.Name == "histNumNodes" {
|
||||||
|
histogramsNeeded = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]*model.JobsStatistics, 0, len(stats))
|
||||||
|
for _, stat := range stats {
|
||||||
|
res = append(res, stat)
|
||||||
|
id, col := "", ""
|
||||||
|
if groupBy != nil {
|
||||||
|
id = stat.ID
|
||||||
|
col = groupBy2column[*groupBy]
|
||||||
|
}
|
||||||
|
|
||||||
|
if histogramsNeeded {
|
||||||
|
var err error
|
||||||
|
value := fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
|
||||||
|
stat.HistDuration, err = r.jobsStatisticsHistogram(ctx, value, filter, id, col)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job statistics histogram: running jobs")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat.HistNumNodes, err = r.jobsStatisticsHistogram(ctx, "job.num_nodes as value", filter, id, col)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job statistics histogram: num nodes")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Timer JobStatistics %s", time.Since(start))
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `value` must be the column grouped by, but renamed to "value". `id` and `col` can optionally be used
|
||||||
|
// to add a condition to the query of the kind "<col> = <id>".
|
||||||
|
func (r *JobRepository) jobsStatisticsHistogram(ctx context.Context,
|
||||||
|
value string, filters []*model.JobFilter, id, col string) ([]*model.HistoPoint, error) {
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
query := sq.Select(value, "COUNT(job.id) AS count").From("job")
|
||||||
|
query, qerr := SecurityCheck(ctx, sq.Select(value, "COUNT(job.id) AS count").From("job"))
|
||||||
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range filters {
|
||||||
|
query = BuildWhereClause(f, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(id) != 0 && len(col) != 0 {
|
||||||
|
query = query.Where(col+" = ?", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows, err := query.GroupBy("value").RunWith(r.DB).Query()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
points := make([]*model.HistoPoint, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
point := model.HistoPoint{}
|
||||||
|
if err := rows.Scan(&point.Value, &point.Count); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
points = append(points, &point)
|
||||||
|
}
|
||||||
|
log.Infof("Timer jobsStatisticsHistogram %s", time.Since(start))
|
||||||
|
return points, nil
|
||||||
|
}
|
||||||
|
@ -8,10 +8,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
log.Init("info", true)
|
||||||
Connect("sqlite3", "../../test/test.db")
|
Connect("sqlite3", "../../test/test.db")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
113
internal/repository/migration.go
Normal file
113
internal/repository/migration.go
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
// Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||||
|
// All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
|
"github.com/golang-migrate/migrate/v4"
|
||||||
|
"github.com/golang-migrate/migrate/v4/database/mysql"
|
||||||
|
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||||
|
"github.com/golang-migrate/migrate/v4/source/iofs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const supportedVersion uint = 3
|
||||||
|
|
||||||
|
//go:embed migrations/*
|
||||||
|
var migrationFiles embed.FS
|
||||||
|
|
||||||
|
func checkDBVersion(backend string, db *sql.DB) {
|
||||||
|
var m *migrate.Migrate
|
||||||
|
|
||||||
|
if backend == "sqlite3" {
|
||||||
|
|
||||||
|
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = migrate.NewWithInstance("iofs", d, "sqlite3", driver)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else if backend == "mysql" {
|
||||||
|
driver, err := mysql.WithInstance(db, &mysql.Config{})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations/mysql")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = migrate.NewWithInstance("iofs", d, "mysql", driver)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v, _, err := m.Version()
|
||||||
|
if err != nil {
|
||||||
|
if err == migrate.ErrNilVersion {
|
||||||
|
log.Warn("Legacy database without version or missing database file!")
|
||||||
|
} else {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v < supportedVersion {
|
||||||
|
log.Warnf("Unsupported database version %d, need %d.\nPlease backup your database file and run cc-backend --migrate-db", v, supportedVersion)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v > supportedVersion {
|
||||||
|
log.Warnf("Unsupported database version %d, need %d.\nPlease refer to documentation how to downgrade db with external migrate tool!", v, supportedVersion)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MigrateDB(backend string, db string) {
|
||||||
|
var m *migrate.Migrate
|
||||||
|
|
||||||
|
if backend == "sqlite3" {
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations/sqlite3")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("sqlite3://%s?_foreign_keys=on", db))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else if backend == "mysql" {
|
||||||
|
d, err := iofs.New(migrationFiles, "migrations/mysql")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err = migrate.NewWithSourceInstance("iofs", d, fmt.Sprintf("mysql://%s?multiStatements=true", db))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.Up(); err != nil {
|
||||||
|
if err == migrate.ErrNoChange {
|
||||||
|
log.Info("DB already up to date!")
|
||||||
|
} else {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Close()
|
||||||
|
}
|
@ -0,0 +1,5 @@
|
|||||||
|
DROP TABLE IF EXISTS job;
|
||||||
|
DROP TABLE IF EXISTS tags;
|
||||||
|
DROP TABLE IF EXISTS jobtag;
|
||||||
|
DROP TABLE IF EXISTS configuration;
|
||||||
|
DROP TABLE IF EXISTS user;
|
62
internal/repository/migrations/mysql/01_init-schema.up.sql
Normal file
62
internal/repository/migrations/mysql/01_init-schema.up.sql
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS job (
|
||||||
|
id INTEGER AUTO_INCREMENT PRIMARY KEY ,
|
||||||
|
job_id BIGINT NOT NULL,
|
||||||
|
cluster VARCHAR(255) NOT NULL,
|
||||||
|
subcluster VARCHAR(255) NOT NULL,
|
||||||
|
start_time BIGINT NOT NULL, -- Unix timestamp
|
||||||
|
|
||||||
|
user VARCHAR(255) NOT NULL,
|
||||||
|
project VARCHAR(255) NOT NULL,
|
||||||
|
`partition` VARCHAR(255) NOT NULL,
|
||||||
|
array_job_id BIGINT NOT NULL,
|
||||||
|
duration INT NOT NULL DEFAULT 0,
|
||||||
|
walltime INT NOT NULL DEFAULT 0,
|
||||||
|
job_state VARCHAR(255) NOT NULL
|
||||||
|
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
|
||||||
|
'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
||||||
|
meta_data TEXT, -- JSON
|
||||||
|
resources TEXT NOT NULL, -- JSON
|
||||||
|
|
||||||
|
num_nodes INT NOT NULL,
|
||||||
|
num_hwthreads INT NOT NULL,
|
||||||
|
num_acc INT NOT NULL,
|
||||||
|
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
||||||
|
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
||||||
|
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
||||||
|
|
||||||
|
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
||||||
|
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
load_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS tag (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
tag_type VARCHAR(255) NOT NULL,
|
||||||
|
tag_name VARCHAR(255) NOT NULL,
|
||||||
|
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS jobtag (
|
||||||
|
job_id INTEGER,
|
||||||
|
tag_id INTEGER,
|
||||||
|
PRIMARY KEY (job_id, tag_id),
|
||||||
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS configuration (
|
||||||
|
username varchar(255),
|
||||||
|
confkey varchar(255),
|
||||||
|
value varchar(255),
|
||||||
|
PRIMARY KEY (username, confkey),
|
||||||
|
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
|
username varchar(255) PRIMARY KEY NOT NULL,
|
||||||
|
password varchar(255) DEFAULT NULL,
|
||||||
|
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
|
||||||
|
name varchar(255) DEFAULT NULL,
|
||||||
|
roles varchar(255) NOT NULL DEFAULT "[]",
|
||||||
|
email varchar(255) DEFAULT NULL);
|
@ -0,0 +1,5 @@
|
|||||||
|
DROP INDEX IF EXISTS job_stats;
|
||||||
|
DROP INDEX IF EXISTS job_by_user;
|
||||||
|
DROP INDEX IF EXISTS job_by_starttime;
|
||||||
|
DROP INDEX IF EXISTS job_by_job_id;
|
||||||
|
DROP INDEX IF EXISTS job_by_state;
|
5
internal/repository/migrations/mysql/02_add-index.up.sql
Normal file
5
internal/repository/migrations/mysql/02_add-index.up.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
CREATE INDEX IF NOT EXISTS job_stats ON job (cluster,subcluster,user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_user ON job (user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_starttime ON job (start_time);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_job_id ON job (job_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_state ON job (job_state);
|
@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE user DROP COLUMN projects;
|
@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE user ADD COLUMN projects varchar(255) NOT NULL DEFAULT "[]";
|
@ -0,0 +1,5 @@
|
|||||||
|
DROP TABLE IF EXISTS job;
|
||||||
|
DROP TABLE IF EXISTS tags;
|
||||||
|
DROP TABLE IF EXISTS jobtag;
|
||||||
|
DROP TABLE IF EXISTS configuration;
|
||||||
|
DROP TABLE IF EXISTS user;
|
62
internal/repository/migrations/sqlite3/01_init-schema.up.sql
Normal file
62
internal/repository/migrations/sqlite3/01_init-schema.up.sql
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
CREATE TABLE IF NOT EXISTS job (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
job_id BIGINT NOT NULL,
|
||||||
|
cluster VARCHAR(255) NOT NULL,
|
||||||
|
subcluster VARCHAR(255) NOT NULL,
|
||||||
|
start_time BIGINT NOT NULL, -- Unix timestamp
|
||||||
|
|
||||||
|
user VARCHAR(255) NOT NULL,
|
||||||
|
project VARCHAR(255) NOT NULL,
|
||||||
|
partition VARCHAR(255) NOT NULL,
|
||||||
|
array_job_id BIGINT NOT NULL,
|
||||||
|
duration INT NOT NULL DEFAULT 0,
|
||||||
|
walltime INT NOT NULL DEFAULT 0,
|
||||||
|
job_state VARCHAR(255) NOT NULL
|
||||||
|
CHECK(job_state IN ('running', 'completed', 'failed', 'cancelled',
|
||||||
|
'stopped', 'timeout', 'preempted', 'out_of_memory')),
|
||||||
|
meta_data TEXT, -- JSON
|
||||||
|
resources TEXT NOT NULL, -- JSON
|
||||||
|
|
||||||
|
num_nodes INT NOT NULL,
|
||||||
|
num_hwthreads INT NOT NULL,
|
||||||
|
num_acc INT NOT NULL,
|
||||||
|
smt TINYINT NOT NULL DEFAULT 1 CHECK(smt IN (0, 1 )),
|
||||||
|
exclusive TINYINT NOT NULL DEFAULT 1 CHECK(exclusive IN (0, 1, 2)),
|
||||||
|
monitoring_status TINYINT NOT NULL DEFAULT 1 CHECK(monitoring_status IN (0, 1, 2, 3)),
|
||||||
|
|
||||||
|
mem_used_max REAL NOT NULL DEFAULT 0.0,
|
||||||
|
flops_any_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
mem_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
load_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
net_data_vol_total REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_bw_avg REAL NOT NULL DEFAULT 0.0,
|
||||||
|
file_data_vol_total REAL NOT NULL DEFAULT 0.0);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS tag (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
tag_type VARCHAR(255) NOT NULL,
|
||||||
|
tag_name VARCHAR(255) NOT NULL,
|
||||||
|
CONSTRAINT be_unique UNIQUE (tag_type, tag_name));
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS jobtag (
|
||||||
|
job_id INTEGER,
|
||||||
|
tag_id INTEGER,
|
||||||
|
PRIMARY KEY (job_id, tag_id),
|
||||||
|
FOREIGN KEY (job_id) REFERENCES job (id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY (tag_id) REFERENCES tag (id) ON DELETE CASCADE);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS configuration (
|
||||||
|
username varchar(255),
|
||||||
|
confkey varchar(255),
|
||||||
|
value varchar(255),
|
||||||
|
PRIMARY KEY (username, confkey),
|
||||||
|
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
|
username varchar(255) PRIMARY KEY NOT NULL,
|
||||||
|
password varchar(255) DEFAULT NULL,
|
||||||
|
ldap tinyint NOT NULL DEFAULT 0, /* col called "ldap" for historic reasons, fills the "AuthSource" */
|
||||||
|
name varchar(255) DEFAULT NULL,
|
||||||
|
roles varchar(255) NOT NULL DEFAULT "[]",
|
||||||
|
email varchar(255) DEFAULT NULL);
|
@ -0,0 +1,5 @@
|
|||||||
|
DROP INDEX IF EXISTS job_stats;
|
||||||
|
DROP INDEX IF EXISTS job_by_user;
|
||||||
|
DROP INDEX IF EXISTS job_by_starttime;
|
||||||
|
DROP INDEX IF EXISTS job_by_job_id;
|
||||||
|
DROP INDEX IF EXISTS job_by_state;
|
@ -0,0 +1,5 @@
|
|||||||
|
CREATE INDEX IF NOT EXISTS job_stats ON job (cluster,subcluster,user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_user ON job (user);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_starttime ON job (start_time);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_job_id ON job (job_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS job_by_state ON job (job_state);
|
@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE user DROP COLUMN projects;
|
@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE user ADD COLUMN projects varchar(255) NOT NULL DEFAULT "[]";
|
@ -26,8 +26,11 @@ func (r *JobRepository) QueryJobs(
|
|||||||
page *model.PageRequest,
|
page *model.PageRequest,
|
||||||
order *model.OrderByInput) ([]*schema.Job, error) {
|
order *model.OrderByInput) ([]*schema.Job, error) {
|
||||||
|
|
||||||
query := sq.Select(jobColumns...).From("job")
|
query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job"))
|
||||||
query = SecurityCheck(ctx, query)
|
|
||||||
|
if qerr != nil {
|
||||||
|
return nil, qerr
|
||||||
|
}
|
||||||
|
|
||||||
if order != nil {
|
if order != nil {
|
||||||
field := toSnakeCase(order.Field)
|
field := toSnakeCase(order.Field)
|
||||||
@ -36,7 +39,7 @@ func (r *JobRepository) QueryJobs(
|
|||||||
} else if order.Order == model.SortDirectionEnumDesc {
|
} else if order.Order == model.SortDirectionEnumDesc {
|
||||||
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
|
query = query.OrderBy(fmt.Sprintf("job.%s DESC", field))
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.New("invalid sorting order")
|
return nil, errors.New("REPOSITORY/QUERY > invalid sorting order")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,12 +54,14 @@ func (r *JobRepository) QueryJobs(
|
|||||||
|
|
||||||
sql, args, err := query.ToSql()
|
sql, args, err := query.ToSql()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while converting query to sql")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
log.Debugf("SQL query: `%s`, args: %#v", sql, args)
|
||||||
rows, err := query.RunWith(r.stmtCache).Query()
|
rows, err := query.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,6 +70,7 @@ func (r *JobRepository) QueryJobs(
|
|||||||
job, err := scanJob(rows)
|
job, err := scanJob(rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
rows.Close()
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
jobs = append(jobs, job)
|
jobs = append(jobs, job)
|
||||||
@ -79,8 +85,12 @@ func (r *JobRepository) CountJobs(
|
|||||||
filters []*model.JobFilter) (int, error) {
|
filters []*model.JobFilter) (int, error) {
|
||||||
|
|
||||||
// count all jobs:
|
// count all jobs:
|
||||||
query := sq.Select("count(*)").From("job")
|
query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job"))
|
||||||
query = SecurityCheck(ctx, query)
|
|
||||||
|
if qerr != nil {
|
||||||
|
return 0, qerr
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range filters {
|
for _, f := range filters {
|
||||||
query = BuildWhereClause(f, query)
|
query = BuildWhereClause(f, query)
|
||||||
}
|
}
|
||||||
@ -92,13 +102,23 @@ func (r *JobRepository) CountJobs(
|
|||||||
return count, nil
|
return count, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SecurityCheck(ctx context.Context, query sq.SelectBuilder) sq.SelectBuilder {
|
func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (queryOut sq.SelectBuilder, err error) {
|
||||||
user := auth.GetUser(ctx)
|
user := auth.GetUser(ctx)
|
||||||
if user == nil || user.HasRole(auth.RoleAdmin) || user.HasRole(auth.RoleApi) || user.HasRole(auth.RoleSupport) {
|
if user == nil || user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleApi}) { // Admin & Co. : All jobs
|
||||||
return query
|
return query, nil
|
||||||
|
} else if user.HasRole(auth.RoleManager) { // Manager : Add filter for managed projects' jobs only + personal jobs
|
||||||
|
if len(user.Projects) != 0 {
|
||||||
|
return query.Where(sq.Or{sq.Eq{"job.project": user.Projects}, sq.Eq{"job.user": user.Username}}), nil
|
||||||
|
} else {
|
||||||
|
log.Infof("Manager-User '%s' has no defined projects to lookup! Query only personal jobs ...", user.Username)
|
||||||
|
return query.Where("job.user = ?", user.Username), nil
|
||||||
|
}
|
||||||
|
} else if user.HasRole(auth.RoleUser) { // User : Only personal jobs
|
||||||
|
return query.Where("job.user = ?", user.Username), nil
|
||||||
|
} else { // Unauthorized : Error
|
||||||
|
var qnil sq.SelectBuilder
|
||||||
|
return qnil, errors.New(fmt.Sprintf("User '%s' with unknown roles! [%#v]\n", user.Username, user.Roles))
|
||||||
}
|
}
|
||||||
|
|
||||||
return query.Where("job.user = ?", user.Username)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build a sq.SelectBuilder out of a schema.JobFilter.
|
// Build a sq.SelectBuilder out of a schema.JobFilter.
|
||||||
@ -118,6 +138,9 @@ func BuildWhereClause(filter *model.JobFilter, query sq.SelectBuilder) sq.Select
|
|||||||
if filter.Project != nil {
|
if filter.Project != nil {
|
||||||
query = buildStringCondition("job.project", filter.Project, query)
|
query = buildStringCondition("job.project", filter.Project, query)
|
||||||
}
|
}
|
||||||
|
if filter.JobName != nil {
|
||||||
|
query = buildStringCondition("job.meta_data", filter.JobName, query)
|
||||||
|
}
|
||||||
if filter.Cluster != nil {
|
if filter.Cluster != nil {
|
||||||
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
query = buildStringCondition("job.cluster", filter.Cluster, query)
|
||||||
}
|
}
|
||||||
@ -200,6 +223,13 @@ func buildStringCondition(field string, cond *model.StringInput, query sq.Select
|
|||||||
if cond.Contains != nil {
|
if cond.Contains != nil {
|
||||||
return query.Where(field+" LIKE ?", fmt.Sprint("%", *cond.Contains, "%"))
|
return query.Where(field+" LIKE ?", fmt.Sprint("%", *cond.Contains, "%"))
|
||||||
}
|
}
|
||||||
|
if cond.In != nil {
|
||||||
|
queryUsers := make([]string, len(cond.In))
|
||||||
|
for i, val := range cond.In {
|
||||||
|
queryUsers[i] = val
|
||||||
|
}
|
||||||
|
return query.Where(sq.Or{sq.Eq{"job.user": queryUsers}})
|
||||||
|
}
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +239,7 @@ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
|
|||||||
func toSnakeCase(str string) string {
|
func toSnakeCase(str string) string {
|
||||||
for _, c := range str {
|
for _, c := range str {
|
||||||
if c == '\'' || c == '\\' {
|
if c == '\'' || c == '\\' {
|
||||||
panic("A hacker (probably not)!!!")
|
log.Panic("toSnakeCase() attack vector!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,11 @@
|
|||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
sq "github.com/Masterminds/squirrel"
|
sq "github.com/Masterminds/squirrel"
|
||||||
)
|
)
|
||||||
@ -13,16 +17,19 @@ import (
|
|||||||
// Add the tag with id `tagId` to the job with the database id `jobId`.
|
// Add the tag with id `tagId` to the job with the database id `jobId`.
|
||||||
func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) {
|
func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) {
|
||||||
if _, err := r.stmtCache.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES ($1, $2)`, job, tag); err != nil {
|
if _, err := r.stmtCache.Exec(`INSERT INTO jobtag (job_id, tag_id) VALUES ($1, $2)`, job, tag); err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
j, err := r.FindById(job)
|
j, err := r.FindById(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTags(&job)
|
tags, err := r.GetTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,16 +39,19 @@ func (r *JobRepository) AddTag(job int64, tag int64) ([]*schema.Tag, error) {
|
|||||||
// Removes a tag from a job
|
// Removes a tag from a job
|
||||||
func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) {
|
func (r *JobRepository) RemoveTag(job, tag int64) ([]*schema.Tag, error) {
|
||||||
if _, err := r.stmtCache.Exec("DELETE FROM jobtag WHERE jobtag.job_id = $1 AND jobtag.tag_id = $2", job, tag); err != nil {
|
if _, err := r.stmtCache.Exec("DELETE FROM jobtag WHERE jobtag.job_id = $1 AND jobtag.tag_id = $2", job, tag); err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
j, err := r.FindById(job)
|
j, err := r.FindById(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while finding job by id")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags, err := r.GetTags(&job)
|
tags, err := r.GetTags(&job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while getting tags for job")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,7 +68,7 @@ func (r *JobRepository) CreateTag(tagType string, tagName string) (tagId int64,
|
|||||||
return res.LastInsertId()
|
return res.LastInsertId()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *JobRepository) CountTags(user *string) (tags []schema.Tag, counts map[string]int, err error) {
|
func (r *JobRepository) CountTags(user *auth.User) (tags []schema.Tag, counts map[string]int, err error) {
|
||||||
tags = make([]schema.Tag, 0, 100)
|
tags = make([]schema.Tag, 0, 100)
|
||||||
xrows, err := r.DB.Queryx("SELECT * FROM tag")
|
xrows, err := r.DB.Queryx("SELECT * FROM tag")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -77,9 +87,13 @@ func (r *JobRepository) CountTags(user *string) (tags []schema.Tag, counts map[s
|
|||||||
From("tag t").
|
From("tag t").
|
||||||
LeftJoin("jobtag jt ON t.id = jt.tag_id").
|
LeftJoin("jobtag jt ON t.id = jt.tag_id").
|
||||||
GroupBy("t.tag_name")
|
GroupBy("t.tag_name")
|
||||||
if user != nil {
|
|
||||||
q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ?)", *user)
|
if user != nil && user.HasRole(auth.RoleUser) { // USER: Only count own jobs
|
||||||
}
|
q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ?)", user.Username)
|
||||||
|
} else if user != nil && user.HasRole(auth.RoleManager) { // MANAGER: Count own jobs plus project's jobs
|
||||||
|
// Build ("project1", "project2", ...) list of variable length directly in SQL string
|
||||||
|
q = q.Where("jt.job_id IN (SELECT id FROM job WHERE job.user = ? OR job.project IN (\""+strings.Join(user.Projects, "\",\"")+"\"))", user.Username)
|
||||||
|
} // else: ADMIN || SUPPORT: Count all jobs
|
||||||
|
|
||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -138,6 +152,7 @@ func (r *JobRepository) GetTags(job *int64) ([]*schema.Tag, error) {
|
|||||||
|
|
||||||
rows, err := q.RunWith(r.stmtCache).Query()
|
rows, err := q.RunWith(r.stmtCache).Query()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while running query")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,6 +160,7 @@ func (r *JobRepository) GetTags(job *int64) ([]*schema.Tag, error) {
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
tag := &schema.Tag{}
|
tag := &schema.Tag{}
|
||||||
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name); err != nil {
|
if err := rows.Scan(&tag.ID, &tag.Type, &tag.Name); err != nil {
|
||||||
|
log.Warn("Error while scanning rows")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tags = append(tags, tag)
|
tags = append(tags, tag)
|
||||||
|
@ -6,12 +6,12 @@ package repository
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/config"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
"github.com/ClusterCockpit/cc-backend/pkg/lrucache"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
@ -33,21 +33,9 @@ func GetUserCfgRepo() *UserCfgRepo {
|
|||||||
userCfgRepoOnce.Do(func() {
|
userCfgRepoOnce.Do(func() {
|
||||||
db := GetConnection()
|
db := GetConnection()
|
||||||
|
|
||||||
_, err := db.DB.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS configuration (
|
|
||||||
username varchar(255),
|
|
||||||
confkey varchar(255),
|
|
||||||
value varchar(255),
|
|
||||||
PRIMARY KEY (username, confkey),
|
|
||||||
FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatalf("db.DB.Preparex() error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
userCfgRepoInstance = &UserCfgRepo{
|
userCfgRepoInstance = &UserCfgRepo{
|
||||||
@ -75,13 +63,14 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := uCfg.cache.Get(user.Username, func() (interface{}, time.Duration, int) {
|
data := uCfg.cache.Get(user.Username, func() (interface{}, time.Duration, int) {
|
||||||
config := make(map[string]interface{}, len(uCfg.uiDefaults))
|
uiconfig := make(map[string]interface{}, len(uCfg.uiDefaults))
|
||||||
for k, v := range uCfg.uiDefaults {
|
for k, v := range uCfg.uiDefaults {
|
||||||
config[k] = v
|
uiconfig[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := uCfg.Lookup.Query(user.Username)
|
rows, err := uCfg.Lookup.Query(user.Username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warnf("Error while looking up user uiconfig for user '%v'", user.Username)
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,22 +79,28 @@ func (uCfg *UserCfgRepo) GetUIConfig(user *auth.User) (map[string]interface{}, e
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var key, rawval string
|
var key, rawval string
|
||||||
if err := rows.Scan(&key, &rawval); err != nil {
|
if err := rows.Scan(&key, &rawval); err != nil {
|
||||||
|
log.Warn("Error while scanning user uiconfig values")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
var val interface{}
|
var val interface{}
|
||||||
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
|
if err := json.Unmarshal([]byte(rawval), &val); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw user uiconfig json")
|
||||||
return err, 0, 0
|
return err, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
size += len(key)
|
size += len(key)
|
||||||
size += len(rawval)
|
size += len(rawval)
|
||||||
config[key] = val
|
uiconfig[key] = val
|
||||||
}
|
}
|
||||||
|
|
||||||
return config, 24 * time.Hour, size
|
// Add global ShortRunningJobsDuration setting as plot_list_hideShortRunningJobs
|
||||||
|
uiconfig["plot_list_hideShortRunningJobs"] = config.Keys.ShortRunningJobsDuration
|
||||||
|
|
||||||
|
return uiconfig, 24 * time.Hour, size
|
||||||
})
|
})
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
|
log.Error("Error in returned dataset")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,6 +117,7 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
|||||||
if user == nil {
|
if user == nil {
|
||||||
var val interface{}
|
var val interface{}
|
||||||
if err := json.Unmarshal([]byte(value), &val); err != nil {
|
if err := json.Unmarshal([]byte(value), &val); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw user config json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,8 +127,8 @@ func (uCfg *UserCfgRepo) UpdateConfig(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`,
|
if _, err := uCfg.DB.Exec(`REPLACE INTO configuration (username, confkey, value) VALUES (?, ?, ?)`, user.Username, key, value); err != nil {
|
||||||
user, key, value); err != nil {
|
log.Warnf("Error while replacing user config in DB for user '%v'", user.Username)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,8 +12,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/internal/api"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
"github.com/ClusterCockpit/cc-backend/internal/auth"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph"
|
"github.com/ClusterCockpit/cc-backend/internal/config"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
@ -44,7 +45,7 @@ var routes []Route = []Route{
|
|||||||
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
|
{"/monitoring/user/{id}", "monitoring/user.tmpl", "User <ID> - ClusterCockpit", true, setupUserRoute},
|
||||||
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> - ClusterCockpit", false, setupClusterRoute},
|
{"/monitoring/systems/{cluster}", "monitoring/systems.tmpl", "Cluster <ID> - ClusterCockpit", false, setupClusterRoute},
|
||||||
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
|
{"/monitoring/node/{cluster}/{hostname}", "monitoring/node.tmpl", "Node <ID> - ClusterCockpit", false, setupNodeRoute},
|
||||||
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analaysis - ClusterCockpit", true, setupAnalysisRoute},
|
{"/monitoring/analysis/{cluster}", "monitoring/analysis.tmpl", "Analysis - ClusterCockpit", true, setupAnalysisRoute},
|
||||||
{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterRoute},
|
{"/monitoring/status/{cluster}", "monitoring/status.tmpl", "Status of <ID> - ClusterCockpit", false, setupClusterRoute},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,21 +62,21 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
State: []schema.JobState{schema.JobStateRunning},
|
State: []schema.JobState{schema.JobStateRunning},
|
||||||
}}, nil, nil)
|
}}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to count jobs: %s", err.Error())
|
log.Warnf("failed to count jobs: %s", err.Error())
|
||||||
runningJobs = map[string]int{}
|
runningJobs = map[string]int{}
|
||||||
}
|
}
|
||||||
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil)
|
totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to count jobs: %s", err.Error())
|
log.Warnf("failed to count jobs: %s", err.Error())
|
||||||
totalJobs = map[string]int{}
|
totalJobs = map[string]int{}
|
||||||
}
|
}
|
||||||
from := time.Now().Add(-24 * time.Hour)
|
from := time.Now().Add(-24 * time.Hour)
|
||||||
recentShortJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
recentShortJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, []*model.JobFilter{{
|
||||||
StartTime: &schema.TimeRange{From: &from, To: nil},
|
StartTime: &schema.TimeRange{From: &from, To: nil},
|
||||||
Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration},
|
Duration: &schema.IntRange{From: 0, To: config.Keys.ShortRunningJobsDuration},
|
||||||
}}, nil, nil)
|
}}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to count jobs: %s", err.Error())
|
log.Warnf("failed to count jobs: %s", err.Error())
|
||||||
recentShortJobs = map[string]int{}
|
recentShortJobs = map[string]int{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,6 +104,7 @@ func setupUserRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
username := mux.Vars(r)["id"]
|
username := mux.Vars(r)["id"]
|
||||||
i["id"] = username
|
i["id"] = username
|
||||||
i["username"] = username
|
i["username"] = username
|
||||||
|
// TODO: If forbidden (== err exists), redirect to error page
|
||||||
if user, _ := auth.FetchUser(r.Context(), jobRepo.DB, username); user != nil {
|
if user, _ := auth.FetchUser(r.Context(), jobRepo.DB, username); user != nil {
|
||||||
i["name"] = user.Name
|
i["name"] = user.Name
|
||||||
i["email"] = user.Email
|
i["email"] = user.Email
|
||||||
@ -141,16 +143,13 @@ func setupAnalysisRoute(i InfoType, r *http.Request) InfoType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
func setupTaglistRoute(i InfoType, r *http.Request) InfoType {
|
||||||
var username *string = nil
|
|
||||||
jobRepo := repository.GetJobRepository()
|
jobRepo := repository.GetJobRepository()
|
||||||
if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleAdmin) {
|
user := auth.GetUser(r.Context())
|
||||||
username = &user.Username
|
|
||||||
}
|
|
||||||
|
|
||||||
tags, counts, err := jobRepo.CountTags(username)
|
tags, counts, err := jobRepo.CountTags(user)
|
||||||
tagMap := make(map[string][]map[string]interface{})
|
tagMap := make(map[string][]map[string]interface{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("GetTags failed: %s", err.Error())
|
log.Warnf("GetTags failed: %s", err.Error())
|
||||||
i["tagmap"] = tagMap
|
i["tagmap"] = tagMap
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
@ -180,9 +179,17 @@ func buildFilterPresets(query url.Values) map[string]interface{} {
|
|||||||
filterPresets["project"] = query.Get("project")
|
filterPresets["project"] = query.Get("project")
|
||||||
filterPresets["projectMatch"] = "eq"
|
filterPresets["projectMatch"] = "eq"
|
||||||
}
|
}
|
||||||
if query.Get("user") != "" {
|
if query.Get("jobName") != "" {
|
||||||
filterPresets["user"] = query.Get("user")
|
filterPresets["jobName"] = query.Get("jobName")
|
||||||
filterPresets["userMatch"] = "eq"
|
}
|
||||||
|
if len(query["user"]) != 0 {
|
||||||
|
if len(query["user"]) == 1 {
|
||||||
|
filterPresets["user"] = query.Get("user")
|
||||||
|
filterPresets["userMatch"] = "contains"
|
||||||
|
} else {
|
||||||
|
filterPresets["user"] = query["user"]
|
||||||
|
filterPresets["userMatch"] = "in"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(query["state"]) != 0 {
|
if len(query["state"]) != 0 {
|
||||||
filterPresets["state"] = query["state"]
|
filterPresets["state"] = query["state"]
|
||||||
@ -270,17 +277,15 @@ func SetupRoutes(router *mux.Router, version string, hash string, buildTime stri
|
|||||||
title = strings.Replace(route.Title, "<ID>", id.(string), 1)
|
title = strings.Replace(route.Title, "<ID>", id.(string), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
username, isAdmin, isSupporter := "", true, true
|
// Get User -> What if NIL?
|
||||||
|
user := auth.GetUser(r.Context())
|
||||||
if user := auth.GetUser(r.Context()); user != nil {
|
// Get Roles
|
||||||
username = user.Username
|
availableRoles, _ := auth.GetValidRolesMap(user)
|
||||||
isAdmin = user.HasRole(auth.RoleAdmin)
|
|
||||||
isSupporter = user.HasRole(auth.RoleSupport)
|
|
||||||
}
|
|
||||||
|
|
||||||
page := web.Page{
|
page := web.Page{
|
||||||
Title: title,
|
Title: title,
|
||||||
User: web.User{Username: username, IsAdmin: isAdmin, IsSupporter: isSupporter},
|
User: *user,
|
||||||
|
Roles: availableRoles,
|
||||||
Build: web.Build{Version: version, Hash: hash, Buildtime: buildTime},
|
Build: web.Build{Version: version, Hash: hash, Buildtime: buildTime},
|
||||||
Config: conf,
|
Config: conf,
|
||||||
Infos: infos,
|
Infos: infos,
|
||||||
@ -294,3 +299,67 @@ func SetupRoutes(router *mux.Router, version string, hash string, buildTime stri
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func HandleSearchBar(rw http.ResponseWriter, r *http.Request, api *api.RestApi) {
|
||||||
|
if search := r.URL.Query().Get("searchId"); search != "" {
|
||||||
|
user := auth.GetUser(r.Context())
|
||||||
|
splitSearch := strings.Split(search, ":")
|
||||||
|
|
||||||
|
if len(splitSearch) == 2 {
|
||||||
|
switch strings.Trim(splitSearch[0], " ") {
|
||||||
|
case "jobId":
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||||
|
case "jobName":
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||||
|
case "projectId":
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect) // All Users: Redirect to Tablequery
|
||||||
|
case "username":
|
||||||
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
|
http.Redirect(rw, r, "/monitoring/users/?user="+url.QueryEscape(strings.Trim(splitSearch[1], " ")), http.StatusTemporaryRedirect)
|
||||||
|
} else {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Users: Redirect to Tablequery
|
||||||
|
}
|
||||||
|
case "name":
|
||||||
|
usernames, _ := api.JobRepository.FindColumnValues(user, strings.Trim(splitSearch[1], " "), "user", "username", "name")
|
||||||
|
if len(usernames) != 0 {
|
||||||
|
joinedNames := strings.Join(usernames, "&user=")
|
||||||
|
http.Redirect(rw, r, "/monitoring/users/?user="+joinedNames, http.StatusTemporaryRedirect)
|
||||||
|
} else {
|
||||||
|
if user.HasAnyRole([]auth.Role{auth.RoleAdmin, auth.RoleSupport, auth.RoleManager}) {
|
||||||
|
http.Redirect(rw, r, "/monitoring/users/?user=NoUserNameFound", http.StatusTemporaryRedirect)
|
||||||
|
} else {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Users: Redirect to Tablequery
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Warnf("Searchbar type parameter '%s' unknown", strings.Trim(splitSearch[0], " "))
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if len(splitSearch) == 1 {
|
||||||
|
username, project, jobname, err := api.JobRepository.FindUserOrProjectOrJobname(r.Context(), strings.Trim(search, " "))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error while searchbar best guess: %v", err.Error())
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||||
|
}
|
||||||
|
|
||||||
|
if username != "" {
|
||||||
|
http.Redirect(rw, r, "/monitoring/user/"+username, http.StatusTemporaryRedirect) // User: Redirect to user page
|
||||||
|
} else if project != "" {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?projectMatch=eq&project="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // projectId (equal)
|
||||||
|
} else if jobname != "" {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?jobName="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // JobName (contains)
|
||||||
|
} else {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?jobId="+url.QueryEscape(strings.Trim(search, " ")), http.StatusTemporaryRedirect) // No Result: Probably jobId
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Warnf("Searchbar query parameters malformed: %v", search)
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect) // Unknown: Redirect to Tablequery
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
http.Redirect(rw, r, "/monitoring/jobs/?", http.StatusTemporaryRedirect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -14,6 +14,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Very simple and limited .env file reader.
|
// Very simple and limited .env file reader.
|
||||||
@ -22,6 +24,7 @@ import (
|
|||||||
func LoadEnv(file string) error {
|
func LoadEnv(file string) error {
|
||||||
f, err := os.Open(file)
|
f, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while opening file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,14 +43,14 @@ func LoadEnv(file string) error {
|
|||||||
line = strings.TrimPrefix(line, "export ")
|
line = strings.TrimPrefix(line, "export ")
|
||||||
parts := strings.SplitN(line, "=", 2)
|
parts := strings.SplitN(line, "=", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return fmt.Errorf("unsupported line: %#v", line)
|
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
|
||||||
}
|
}
|
||||||
|
|
||||||
key := strings.TrimSpace(parts[0])
|
key := strings.TrimSpace(parts[0])
|
||||||
val := strings.TrimSpace(parts[1])
|
val := strings.TrimSpace(parts[1])
|
||||||
if strings.HasPrefix(val, "\"") {
|
if strings.HasPrefix(val, "\"") {
|
||||||
if !strings.HasSuffix(val, "\"") {
|
if !strings.HasSuffix(val, "\"") {
|
||||||
return fmt.Errorf("unsupported line: %#v", line)
|
return fmt.Errorf("RUNTIME/SETUP > unsupported line: %#v", line)
|
||||||
}
|
}
|
||||||
|
|
||||||
runes := []rune(val[1 : len(val)-1])
|
runes := []rune(val[1 : len(val)-1])
|
||||||
@ -65,7 +68,7 @@ func LoadEnv(file string) error {
|
|||||||
case '"':
|
case '"':
|
||||||
sb.WriteRune('"')
|
sb.WriteRune('"')
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupprorted escape sequence in quoted string: backslash %#v", runes[i])
|
return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i])
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -89,11 +92,13 @@ func DropPrivileges(username string, group string) error {
|
|||||||
if group != "" {
|
if group != "" {
|
||||||
g, err := user.LookupGroup(group)
|
g, err := user.LookupGroup(group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while looking up group")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gid, _ := strconv.Atoi(g.Gid)
|
gid, _ := strconv.Atoi(g.Gid)
|
||||||
if err := syscall.Setgid(gid); err != nil {
|
if err := syscall.Setgid(gid); err != nil {
|
||||||
|
log.Warn("Error while setting gid")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,11 +106,13 @@ func DropPrivileges(username string, group string) error {
|
|||||||
if username != "" {
|
if username != "" {
|
||||||
u, err := user.Lookup(username)
|
u, err := user.Lookup(username)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while looking up user")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
uid, _ := strconv.Atoi(u.Uid)
|
uid, _ := strconv.Atoi(u.Uid)
|
||||||
if err := syscall.Setuid(uid); err != nil {
|
if err := syscall.Setuid(uid); err != nil {
|
||||||
|
log.Warn("Error while setting uid")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,12 +40,15 @@ type JobContainer struct {
|
|||||||
|
|
||||||
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
var cache *lrucache.Cache = lrucache.New(128 * 1024 * 1024)
|
||||||
var ar ArchiveBackend
|
var ar ArchiveBackend
|
||||||
|
var useArchive bool
|
||||||
|
|
||||||
func Init(rawConfig json.RawMessage) error {
|
func Init(rawConfig json.RawMessage, disableArchive bool) error {
|
||||||
|
useArchive = !disableArchive
|
||||||
var kind struct {
|
var kind struct {
|
||||||
Kind string `json:"kind"`
|
Kind string `json:"kind"`
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(rawConfig, &kind); err != nil {
|
if err := json.Unmarshal(rawConfig, &kind); err != nil {
|
||||||
|
log.Warn("Error while unmarshaling raw config json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,11 +58,12 @@ func Init(rawConfig json.RawMessage) error {
|
|||||||
// case "s3":
|
// case "s3":
|
||||||
// ar = &S3Archive{}
|
// ar = &S3Archive{}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unkown archive backend '%s''", kind.Kind)
|
return fmt.Errorf("ARCHIVE/ARCHIVE > unkown archive backend '%s''", kind.Kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := ar.Init(rawConfig)
|
version, err := ar.Init(rawConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while initializing archiveBackend")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof("Load archive version %d", version)
|
log.Infof("Load archive version %d", version)
|
||||||
@ -78,6 +82,7 @@ func LoadAveragesFromArchive(
|
|||||||
|
|
||||||
metaFile, err := ar.LoadJobMeta(job)
|
metaFile, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job metadata from archiveBackend")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,6 +101,7 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
|||||||
|
|
||||||
metaFile, err := ar.LoadJobMeta(job)
|
metaFile, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job metadata from archiveBackend")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,12 +112,13 @@ func GetStatistics(job *schema.Job) (map[string]schema.JobStatistics, error) {
|
|||||||
// in that JSON file. If the job is not archived, nothing is done.
|
// in that JSON file. If the job is not archived, nothing is done.
|
||||||
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
|
func UpdateTags(job *schema.Job, tags []*schema.Tag) error {
|
||||||
|
|
||||||
if job.State == schema.JobStateRunning {
|
if job.State == schema.JobStateRunning || !useArchive {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
jobMeta, err := ar.LoadJobMeta(job)
|
jobMeta, err := ar.LoadJobMeta(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while loading job metadata from archiveBackend")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Clusters []*schema.Cluster
|
var Clusters []*schema.Cluster
|
||||||
@ -23,6 +24,7 @@ func initClusterConfig() error {
|
|||||||
|
|
||||||
cluster, err := ar.LoadClusterCfg(c)
|
cluster, err := ar.LoadClusterCfg(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warnf("Error while loading cluster config for cluster '%v'", c)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +61,7 @@ func initClusterConfig() error {
|
|||||||
|
|
||||||
nl, err := ParseNodeList(sc.Nodes)
|
nl, err := ParseNodeList(sc.Nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("in %s/cluster.json: %w", cluster.Name, err)
|
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > in %s/cluster.json: %w", cluster.Name, err)
|
||||||
}
|
}
|
||||||
nodeLists[cluster.Name][sc.Name] = nl
|
nodeLists[cluster.Name][sc.Name] = nl
|
||||||
}
|
}
|
||||||
@ -111,7 +113,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
|||||||
|
|
||||||
cluster := GetCluster(job.Cluster)
|
cluster := GetCluster(job.Cluster)
|
||||||
if cluster == nil {
|
if cluster == nil {
|
||||||
return fmt.Errorf("unkown cluster: %#v", job.Cluster)
|
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", job.Cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
if job.SubCluster != "" {
|
if job.SubCluster != "" {
|
||||||
@ -120,11 +122,11 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("already assigned subcluster %#v unkown (cluster: %#v)", job.SubCluster, job.Cluster)
|
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > already assigned subcluster %v unkown (cluster: %v)", job.SubCluster, job.Cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(job.Resources) == 0 {
|
if len(job.Resources) == 0 {
|
||||||
return fmt.Errorf("job without any resources/hosts")
|
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > job without any resources/hosts")
|
||||||
}
|
}
|
||||||
|
|
||||||
host0 := job.Resources[0].Hostname
|
host0 := job.Resources[0].Hostname
|
||||||
@ -140,7 +142,7 @@ func AssignSubCluster(job *schema.BaseJob) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("no subcluster found for cluster %#v and host %#v", job.Cluster, host0)
|
return fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", job.Cluster, host0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetSubClusterByNode(cluster, hostname string) (string, error) {
|
func GetSubClusterByNode(cluster, hostname string) (string, error) {
|
||||||
@ -153,12 +155,12 @@ func GetSubClusterByNode(cluster, hostname string) (string, error) {
|
|||||||
|
|
||||||
c := GetCluster(cluster)
|
c := GetCluster(cluster)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return "", fmt.Errorf("unkown cluster: %#v", cluster)
|
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > unkown cluster: %v", cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.SubClusters[0].Nodes == "" {
|
if c.SubClusters[0].Nodes == "" {
|
||||||
return c.SubClusters[0].Name, nil
|
return c.SubClusters[0].Name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", fmt.Errorf("no subcluster found for cluster %#v and host %#v", cluster, hostname)
|
return "", fmt.Errorf("ARCHIVE/CLUSTERCONFIG > no subcluster found for cluster %v and host %v", cluster, hostname)
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) {
|
|||||||
|
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend loadJobMeta()- %v", err)
|
log.Errorf("loadJobMeta() > open file error: %v", err)
|
||||||
return &schema.JobMeta{}, err
|
return &schema.JobMeta{}, err
|
||||||
}
|
}
|
||||||
if config.Keys.Validate {
|
if config.Keys.Validate {
|
||||||
@ -105,12 +105,12 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
|||||||
|
|
||||||
var config FsArchiveConfig
|
var config FsArchiveConfig
|
||||||
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
if err := json.Unmarshal(rawConfig, &config); err != nil {
|
||||||
log.Errorf("fsBackend Init()- %v", err)
|
log.Warnf("Init() > Unmarshal error: %#v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if config.Path == "" {
|
if config.Path == "" {
|
||||||
err := fmt.Errorf("fsBackend Init()- empty path")
|
err := fmt.Errorf("Init() : empty config.Path")
|
||||||
log.Errorf("fsBackend Init()- %v", err)
|
log.Errorf("Init() > config.Path error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
fsa.path = config.Path
|
fsa.path = config.Path
|
||||||
@ -133,7 +133,7 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
|||||||
|
|
||||||
entries, err := os.ReadDir(fsa.path)
|
entries, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend Init()- %v", err)
|
log.Errorf("Init() > ReadDir() error: %v", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,10 +150,10 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) (int, error) {
|
|||||||
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) {
|
||||||
var isCompressed bool = true
|
var isCompressed bool = true
|
||||||
filename := getPath(job, fsa.path, "data.json.gz")
|
filename := getPath(job, fsa.path, "data.json.gz")
|
||||||
|
|
||||||
if !checkFileExists(filename) {
|
if !checkFileExists(filename) {
|
||||||
filename = getPath(job, fsa.path, "data.json")
|
filename = getPath(job, fsa.path, "data.json")
|
||||||
isCompressed = false
|
isCompressed = false
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return loadJobData(filename, isCompressed)
|
return loadJobData(filename, isCompressed)
|
||||||
@ -169,12 +169,13 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) {
|
|||||||
|
|
||||||
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("fsBackend LoadClusterCfg()- %v", err)
|
log.Errorf("LoadClusterCfg() > open file error: %v", err)
|
||||||
return &schema.Cluster{}, err
|
return &schema.Cluster{}, err
|
||||||
}
|
// if config.Keys.Validate {
|
||||||
// if config.Keys.Validate {
|
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
||||||
if err := schema.Validate(schema.ClusterCfg, bytes.NewReader(b)); err != nil {
|
log.Warnf("Validate cluster config: %v\n", err)
|
||||||
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
return &schema.Cluster{}, fmt.Errorf("validate cluster config: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// }
|
// }
|
||||||
return DecodeCluster(bytes.NewReader(b))
|
return DecodeCluster(bytes.NewReader(b))
|
||||||
@ -186,7 +187,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
go func() {
|
go func() {
|
||||||
clustersDir, err := os.ReadDir(fsa.path)
|
clustersDir, err := os.ReadDir(fsa.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading clusters failed: %s", err.Error())
|
log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, clusterDir := range clustersDir {
|
for _, clusterDir := range clustersDir {
|
||||||
@ -195,7 +196,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
}
|
}
|
||||||
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl1Dir := range lvl1Dirs {
|
for _, lvl1Dir := range lvl1Dirs {
|
||||||
@ -206,19 +207,18 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
|
|
||||||
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lvl2Dir := range lvl2Dirs {
|
for _, lvl2Dir := range lvl2Dirs {
|
||||||
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name())
|
||||||
startTimeDirs, err := os.ReadDir(dirpath)
|
startTimeDirs, err := os.ReadDir(dirpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Reading jobs failed: %s", err.Error())
|
log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, startTimeDir := range startTimeDirs {
|
for _, startTimeDir := range startTimeDirs {
|
||||||
if startTimeDir.IsDir() {
|
if startTimeDir.IsDir() {
|
||||||
job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json"))
|
|
||||||
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
if err != nil && !errors.Is(err, &jsonschema.ValidationError{}) {
|
||||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
}
|
}
|
||||||
@ -237,6 +237,7 @@ func (fsa *FsArchive) Iter(loadMetricData bool) <-chan JobContainer {
|
|||||||
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
}
|
}
|
||||||
ch <- JobContainer{Meta: job, Data: &data}
|
ch <- JobContainer{Meta: job, Data: &data}
|
||||||
|
log.Errorf("in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error())
|
||||||
} else {
|
} else {
|
||||||
ch <- JobContainer{Meta: job, Data: nil}
|
ch <- JobContainer{Meta: job, Data: nil}
|
||||||
}
|
}
|
||||||
@ -259,12 +260,15 @@ func (fsa *FsArchive) StoreJobMeta(jobMeta *schema.JobMeta) error {
|
|||||||
}
|
}
|
||||||
f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
|
f, err := os.Create(getPath(&job, fsa.path, "meta.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while creating filepath for meta.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
||||||
|
log.Error("Error while encoding job metadata to meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
|
log.Warn("Error while closing meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,17 +291,21 @@ func (fsa *FsArchive) ImportJob(
|
|||||||
}
|
}
|
||||||
dir := getPath(&job, fsa.path, "")
|
dir := getPath(&job, fsa.path, "")
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||||
|
log.Error("Error while creating job archive path")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Create(path.Join(dir, "meta.json"))
|
f, err := os.Create(path.Join(dir, "meta.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while creating filepath for meta.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
if err := EncodeJobMeta(f, jobMeta); err != nil {
|
||||||
|
log.Error("Error while encoding job metadata to meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
|
log.Warn("Error while closing meta.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,11 +333,17 @@ func (fsa *FsArchive) ImportJob(
|
|||||||
|
|
||||||
f, err = os.Create(path.Join(dir, "data.json"))
|
f, err = os.Create(path.Join(dir, "data.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("Error while creating filepath for data.json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := EncodeJobData(f, jobData); err != nil {
|
if err := EncodeJobData(f, jobData); err != nil {
|
||||||
|
log.Error("Error while encoding job metricdata to data.json file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
log.Warn("Error while closing data.json file")
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
return f.Close()
|
return f.Close()
|
||||||
}
|
}
|
||||||
|
@ -10,9 +10,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.Init("info", true)
|
||||||
|
}
|
||||||
|
|
||||||
func TestInitEmptyPath(t *testing.T) {
|
func TestInitEmptyPath(t *testing.T) {
|
||||||
var fsa FsArchive
|
var fsa FsArchive
|
||||||
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))
|
_, err := fsa.Init(json.RawMessage("{\"kind\":\"../../test/archive\"}"))
|
||||||
|
@ -10,12 +10,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
||||||
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
|
data := cache.Get(k, func() (value interface{}, ttl time.Duration, size int) {
|
||||||
var d schema.JobData
|
var d schema.JobData
|
||||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||||
|
log.Warn("Error while decoding raw job data json")
|
||||||
return err, 0, 1000
|
return err, 0, 1000
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -23,6 +25,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err, ok := data.(error); ok {
|
if err, ok := data.(error); ok {
|
||||||
|
log.Warn("Error in decoded job data set")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,6 +35,7 @@ func DecodeJobData(r io.Reader, k string) (schema.JobData, error) {
|
|||||||
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
||||||
var d schema.JobMeta
|
var d schema.JobMeta
|
||||||
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
if err := json.NewDecoder(r).Decode(&d); err != nil {
|
||||||
|
log.Warn("Error while decoding raw job meta json")
|
||||||
return &d, err
|
return &d, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,6 +47,7 @@ func DecodeJobMeta(r io.Reader) (*schema.JobMeta, error) {
|
|||||||
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
||||||
var c schema.Cluster
|
var c schema.Cluster
|
||||||
if err := json.NewDecoder(r).Decode(&c); err != nil {
|
if err := json.NewDecoder(r).Decode(&c); err != nil {
|
||||||
|
log.Warn("Error while decoding raw cluster json")
|
||||||
return &c, err
|
return &c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,6 +59,7 @@ func DecodeCluster(r io.Reader) (*schema.Cluster, error) {
|
|||||||
func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
||||||
// Sanitize parameters
|
// Sanitize parameters
|
||||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||||
|
log.Warn("Error while encoding new job data json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,6 +69,7 @@ func EncodeJobData(w io.Writer, d *schema.JobData) error {
|
|||||||
func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
|
func EncodeJobMeta(w io.Writer, d *schema.JobMeta) error {
|
||||||
// Sanitize parameters
|
// Sanitize parameters
|
||||||
if err := json.NewEncoder(w).Encode(d); err != nil {
|
if err := json.NewEncoder(w).Encode(d); err != nil {
|
||||||
|
log.Warn("Error while encoding new job meta json")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ type NLExprIntRange struct {
|
|||||||
|
|
||||||
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
|
func (nle NLExprIntRange) consume(input string) (next string, ok bool) {
|
||||||
if !nle.zeroPadded || nle.digits < 1 {
|
if !nle.zeroPadded || nle.digits < 1 {
|
||||||
log.Error("node list: only zero-padded ranges are allowed")
|
log.Error("only zero-padded ranges are allowed")
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,6 +178,7 @@ func (nles NLExprIntRange) prefix() string {
|
|||||||
func ParseNodeList(raw string) (NodeList, error) {
|
func ParseNodeList(raw string) (NodeList, error) {
|
||||||
isLetter := func(r byte) bool { return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') }
|
isLetter := func(r byte) bool { return ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') }
|
||||||
isDigit := func(r byte) bool { return '0' <= r && r <= '9' }
|
isDigit := func(r byte) bool { return '0' <= r && r <= '9' }
|
||||||
|
isDash := func(r byte) bool { return r == '-' }
|
||||||
|
|
||||||
rawterms := []string{}
|
rawterms := []string{}
|
||||||
prevterm := 0
|
prevterm := 0
|
||||||
@ -187,7 +188,7 @@ func ParseNodeList(raw string) (NodeList, error) {
|
|||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
if i == len(raw) {
|
if i == len(raw) {
|
||||||
return nil, fmt.Errorf("node list: unclosed '['")
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
||||||
}
|
}
|
||||||
} else if raw[i] == ',' {
|
} else if raw[i] == ',' {
|
||||||
rawterms = append(rawterms, raw[prevterm:i])
|
rawterms = append(rawterms, raw[prevterm:i])
|
||||||
@ -205,41 +206,47 @@ func ParseNodeList(raw string) (NodeList, error) {
|
|||||||
limits() []map[string]int
|
limits() []map[string]int
|
||||||
prefix() string
|
prefix() string
|
||||||
}{}
|
}{}
|
||||||
|
|
||||||
for i := 0; i < len(rawterm); i++ {
|
for i := 0; i < len(rawterm); i++ {
|
||||||
c := rawterm[i]
|
c := rawterm[i]
|
||||||
if isLetter(c) || isDigit(c) {
|
if isLetter(c) || isDigit(c) {
|
||||||
j := i
|
j := i
|
||||||
for j < len(rawterm) && (isLetter(rawterm[j]) || isDigit(rawterm[j])) {
|
for j < len(rawterm) &&
|
||||||
|
(isLetter(rawterm[j]) ||
|
||||||
|
isDigit(rawterm[j]) ||
|
||||||
|
isDash(rawterm[j])) {
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
exprs = append(exprs, NLExprString(rawterm[i:j]))
|
exprs = append(exprs, NLExprString(rawterm[i:j]))
|
||||||
i = j - 1
|
i = j - 1
|
||||||
} else if c == '[' {
|
} else if c == '[' {
|
||||||
end := strings.Index(rawterm[i:], "]")
|
end := strings.Index(rawterm[i:], "]")
|
||||||
|
|
||||||
if end == -1 {
|
if end == -1 {
|
||||||
return nil, fmt.Errorf("node list: unclosed '['")
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > unclosed '['")
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.Split(rawterm[i+1:i+end], ",")
|
parts := strings.Split(rawterm[i+1:i+end], ",")
|
||||||
nles := NLExprIntRanges{}
|
nles := NLExprIntRanges{}
|
||||||
|
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
minus := strings.Index(part, "-")
|
minus := strings.Index(part, "-")
|
||||||
if minus == -1 {
|
if minus == -1 {
|
||||||
return nil, fmt.Errorf("node list: no '-' found inside '[...]'")
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > no '-' found inside '[...]'")
|
||||||
}
|
}
|
||||||
|
|
||||||
s1, s2 := part[0:minus], part[minus+1:]
|
s1, s2 := part[0:minus], part[minus+1:]
|
||||||
if len(s1) != len(s2) || len(s1) == 0 {
|
if len(s1) != len(s2) || len(s1) == 0 {
|
||||||
return nil, fmt.Errorf("node list: %#v and %#v are not of equal length or of length zero", s1, s2)
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > %v and %v are not of equal length or of length zero", s1, s2)
|
||||||
}
|
}
|
||||||
|
|
||||||
x1, err := strconv.ParseInt(s1, 10, 32)
|
x1, err := strconv.ParseInt(s1, 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("node list: %w", err)
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
|
||||||
}
|
}
|
||||||
x2, err := strconv.ParseInt(s2, 10, 32)
|
x2, err := strconv.ParseInt(s2, 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("node list: %w", err)
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > could not parse int: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nles = append(nles, NLExprIntRange{
|
nles = append(nles, NLExprIntRange{
|
||||||
@ -253,7 +260,7 @@ func ParseNodeList(raw string) (NodeList, error) {
|
|||||||
exprs = append(exprs, nles)
|
exprs = append(exprs, nles)
|
||||||
i += end
|
i += end
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("node list: invalid character: %#v", rune(c))
|
return nil, fmt.Errorf("ARCHIVE/NODELIST > invalid character: %#v", rune(c))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nl = append(nl, exprs)
|
nl = append(nl, exprs)
|
||||||
|
@ -57,3 +57,19 @@ func TestNodeListCommasInBrackets(t *testing.T) {
|
|||||||
t.Fatal("4")
|
t.Fatal("4")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeListCommasOutsideBrackets(t *testing.T) {
|
||||||
|
nl, err := ParseNodeList("cn-0010,cn0011,cn-00[13-18,22-24]")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !nl.Contains("cn-0010") || !nl.Contains("cn0011") {
|
||||||
|
t.Fatal("1")
|
||||||
|
}
|
||||||
|
if !nl.Contains("cn-0013") ||
|
||||||
|
!nl.Contains("cn-0015") ||
|
||||||
|
!nl.Contains("cn-0022") ||
|
||||||
|
!nl.Contains("cn-0018") {
|
||||||
|
t.Fatal("2")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
181
pkg/log/log.go
181
pkg/log/log.go
@ -12,8 +12,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Provides a simple way of logging with different levels.
|
// Provides a simple way of logging with different levels.
|
||||||
// Time/Data are not logged on purpose because systemd adds
|
// Time/Date are not logged because systemd adds
|
||||||
// them for us.
|
// them for us (Default, can be changed by flag '--logdate true').
|
||||||
//
|
//
|
||||||
// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html
|
// Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html
|
||||||
|
|
||||||
@ -22,109 +22,162 @@ var (
|
|||||||
InfoWriter io.Writer = os.Stderr
|
InfoWriter io.Writer = os.Stderr
|
||||||
WarnWriter io.Writer = os.Stderr
|
WarnWriter io.Writer = os.Stderr
|
||||||
ErrWriter io.Writer = os.Stderr
|
ErrWriter io.Writer = os.Stderr
|
||||||
|
CritWriter io.Writer = os.Stderr
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DebugPrefix string = "<7>[DEBUG] "
|
DebugPrefix string = "<7>[DEBUG] "
|
||||||
InfoPrefix string = "<6>[INFO] "
|
InfoPrefix string = "<6>[INFO] "
|
||||||
WarnPrefix string = "<4>[WARNING] "
|
WarnPrefix string = "<4>[WARNING] "
|
||||||
ErrPrefix string = "<3>[ERROR] "
|
ErrPrefix string = "<3>[ERROR] "
|
||||||
|
CritPrefix string = "<2>[CRITICAL] "
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, 0)
|
DebugLog *log.Logger
|
||||||
InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, 0)
|
InfoLog *log.Logger
|
||||||
WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, 0)
|
WarnLog *log.Logger
|
||||||
ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, 0)
|
ErrLog *log.Logger
|
||||||
|
CritLog *log.Logger
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
/* CONFIG */
|
||||||
if lvl, ok := os.LookupEnv("LOGLEVEL"); ok {
|
|
||||||
switch lvl {
|
func Init(lvl string, logdate bool) {
|
||||||
case "err", "fatal":
|
switch lvl {
|
||||||
WarnWriter = io.Discard
|
case "crit":
|
||||||
fallthrough
|
ErrWriter = io.Discard
|
||||||
case "warn":
|
fallthrough
|
||||||
InfoWriter = io.Discard
|
case "err", "fatal":
|
||||||
fallthrough
|
WarnWriter = io.Discard
|
||||||
case "info":
|
fallthrough
|
||||||
DebugWriter = io.Discard
|
case "warn":
|
||||||
case "debug":
|
InfoWriter = io.Discard
|
||||||
// Nothing to do...
|
fallthrough
|
||||||
default:
|
case "info":
|
||||||
Warnf("environment variable LOGLEVEL has invalid value %#v", lvl)
|
DebugWriter = io.Discard
|
||||||
}
|
case "debug":
|
||||||
|
// Nothing to do...
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl)
|
||||||
|
//SetLogLevel("debug")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !logdate {
|
||||||
|
DebugLog = log.New(DebugWriter, DebugPrefix, 0)
|
||||||
|
InfoLog = log.New(InfoWriter, InfoPrefix, log.Lshortfile)
|
||||||
|
WarnLog = log.New(WarnWriter, WarnPrefix, log.Lshortfile)
|
||||||
|
ErrLog = log.New(ErrWriter, ErrPrefix, log.Llongfile)
|
||||||
|
CritLog = log.New(CritWriter, CritPrefix, log.Llongfile)
|
||||||
|
} else {
|
||||||
|
DebugLog = log.New(DebugWriter, DebugPrefix, log.LstdFlags)
|
||||||
|
InfoLog = log.New(InfoWriter, InfoPrefix, log.LstdFlags|log.Lshortfile)
|
||||||
|
WarnLog = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile)
|
||||||
|
ErrLog = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile)
|
||||||
|
CritLog = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debug(v ...interface{}) {
|
/* PRINT */
|
||||||
if DebugWriter != io.Discard {
|
|
||||||
DebugLog.Print(v...)
|
// Private helper
|
||||||
}
|
func printStr(v ...interface{}) string {
|
||||||
}
|
return fmt.Sprint(v...)
|
||||||
|
|
||||||
func Info(v ...interface{}) {
|
|
||||||
if InfoWriter != io.Discard {
|
|
||||||
InfoLog.Print(v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Uses Info() -> If errorpath required at some point:
|
||||||
|
// Will need own writer with 'Output(2, out)' to correctly render path
|
||||||
func Print(v ...interface{}) {
|
func Print(v ...interface{}) {
|
||||||
Info(v...)
|
Info(v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Debug(v ...interface{}) {
|
||||||
|
DebugLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Info(v ...interface{}) {
|
||||||
|
InfoLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
func Warn(v ...interface{}) {
|
func Warn(v ...interface{}) {
|
||||||
if WarnWriter != io.Discard {
|
WarnLog.Output(2, printStr(v...))
|
||||||
WarnLog.Print(v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Error(v ...interface{}) {
|
func Error(v ...interface{}) {
|
||||||
if ErrWriter != io.Discard {
|
ErrLog.Output(2, printStr(v...))
|
||||||
ErrLog.Print(v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Writes panic stacktrace, but keeps application alive
|
||||||
|
func Panic(v ...interface{}) {
|
||||||
|
ErrLog.Output(2, printStr(v...))
|
||||||
|
panic("Panic triggered ...")
|
||||||
|
}
|
||||||
|
|
||||||
|
func Crit(v ...interface{}) {
|
||||||
|
CritLog.Output(2, printStr(v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes critical log, stops application
|
||||||
func Fatal(v ...interface{}) {
|
func Fatal(v ...interface{}) {
|
||||||
Error(v...)
|
CritLog.Output(2, printStr(v...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debugf(format string, v ...interface{}) {
|
/* PRINT FORMAT*/
|
||||||
if DebugWriter != io.Discard {
|
|
||||||
DebugLog.Printf(format, v...)
|
// Private helper
|
||||||
}
|
func printfStr(format string, v ...interface{}) string {
|
||||||
}
|
return fmt.Sprintf(format, v...)
|
||||||
|
|
||||||
func Infof(format string, v ...interface{}) {
|
|
||||||
if InfoWriter != io.Discard {
|
|
||||||
InfoLog.Printf(format, v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Uses Infof() -> If errorpath required at some point:
|
||||||
|
// Will need own writer with 'Output(2, out)' to correctly render path
|
||||||
func Printf(format string, v ...interface{}) {
|
func Printf(format string, v ...interface{}) {
|
||||||
Infof(format, v...)
|
Infof(format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Finfof(w io.Writer, format string, v ...interface{}) {
|
func Debugf(format string, v ...interface{}) {
|
||||||
if w != io.Discard {
|
DebugLog.Output(2, printfStr(format, v...))
|
||||||
fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
|
}
|
||||||
}
|
|
||||||
|
func Infof(format string, v ...interface{}) {
|
||||||
|
InfoLog.Output(2, printfStr(format, v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warnf(format string, v ...interface{}) {
|
func Warnf(format string, v ...interface{}) {
|
||||||
if WarnWriter != io.Discard {
|
WarnLog.Output(2, printfStr(format, v...))
|
||||||
WarnLog.Printf(format, v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Errorf(format string, v ...interface{}) {
|
func Errorf(format string, v ...interface{}) {
|
||||||
if ErrWriter != io.Discard {
|
ErrLog.Output(2, printfStr(format, v...))
|
||||||
ErrLog.Printf(format, v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Writes panic stacktrace, but keeps application alive
|
||||||
|
func Panicf(format string, v ...interface{}) {
|
||||||
|
ErrLog.Output(2, printfStr(format, v...))
|
||||||
|
panic("Panic triggered ...")
|
||||||
|
}
|
||||||
|
|
||||||
|
func Critf(format string, v ...interface{}) {
|
||||||
|
CritLog.Output(2, printfStr(format, v...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes crit log, stops application
|
||||||
func Fatalf(format string, v ...interface{}) {
|
func Fatalf(format string, v ...interface{}) {
|
||||||
Errorf(format, v...)
|
CritLog.Output(2, printfStr(format, v...))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* SPECIAL */
|
||||||
|
|
||||||
|
// func Finfof(w io.Writer, format string, v ...interface{}) {
|
||||||
|
// if w != io.Discard {
|
||||||
|
// if logDateTime {
|
||||||
|
// currentTime := time.Now()
|
||||||
|
// fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...)
|
||||||
|
// } else {
|
||||||
|
// fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
@ -69,7 +69,7 @@ func (c *Cache) Get(key string, computeValue ComputeValue) interface{} {
|
|||||||
if now.After(entry.expiration) {
|
if now.After(entry.expiration) {
|
||||||
if !c.evictEntry(entry) {
|
if !c.evictEntry(entry) {
|
||||||
if entry.expiration.IsZero() {
|
if entry.expiration.IsZero() {
|
||||||
panic("cache entry that shoud have been waited for could not be evicted.")
|
panic("LRUCACHE/CACHE > cache entry that shoud have been waited for could not be evicted.")
|
||||||
}
|
}
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
return entry.value
|
return entry.value
|
||||||
@ -208,7 +208,7 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
|
|||||||
size := 0
|
size := 0
|
||||||
for key, e := range c.entries {
|
for key, e := range c.entries {
|
||||||
if key != e.key {
|
if key != e.key {
|
||||||
panic("key mismatch")
|
panic("LRUCACHE/CACHE > key mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
if now.After(e.expiration) {
|
if now.After(e.expiration) {
|
||||||
@ -219,13 +219,13 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
|
|||||||
|
|
||||||
if e.prev != nil {
|
if e.prev != nil {
|
||||||
if e.prev.next != e {
|
if e.prev.next != e {
|
||||||
panic("list corrupted")
|
panic("LRUCACHE/CACHE > list corrupted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.next != nil {
|
if e.next != nil {
|
||||||
if e.next.prev != e {
|
if e.next.prev != e {
|
||||||
panic("list corrupted")
|
panic("LRUCACHE/CACHE > list corrupted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,18 +234,18 @@ func (c *Cache) Keys(f func(key string, val interface{})) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if size != c.usedmemory {
|
if size != c.usedmemory {
|
||||||
panic("size calculations failed")
|
panic("LRUCACHE/CACHE > size calculations failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.head != nil {
|
if c.head != nil {
|
||||||
if c.tail == nil || c.head.prev != nil {
|
if c.tail == nil || c.head.prev != nil {
|
||||||
panic("head/tail corrupted")
|
panic("LRUCACHE/CACHE > head/tail corrupted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.tail != nil {
|
if c.tail != nil {
|
||||||
if c.head == nil || c.tail.next != nil {
|
if c.head == nil || c.tail.next != nil {
|
||||||
panic("head/tail corrupted")
|
panic("LRUCACHE/CACHE > head/tail corrupted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -281,7 +281,7 @@ func (c *Cache) unlinkEntry(e *cacheEntry) {
|
|||||||
|
|
||||||
func (c *Cache) evictEntry(e *cacheEntry) bool {
|
func (c *Cache) evictEntry(e *cacheEntry) bool {
|
||||||
if e.waitingForComputation != 0 {
|
if e.waitingForComputation != 0 {
|
||||||
// panic("cannot evict this entry as other goroutines need the value")
|
// panic("LRUCACHE/CACHE > cannot evict this entry as other goroutines need the value")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,16 @@ type JWTAuthConfig struct {
|
|||||||
// Specifies for how long a session or JWT shall be valid
|
// Specifies for how long a session or JWT shall be valid
|
||||||
// as a string parsable by time.ParseDuration().
|
// as a string parsable by time.ParseDuration().
|
||||||
MaxAge int64 `json:"max-age"`
|
MaxAge int64 `json:"max-age"`
|
||||||
|
|
||||||
|
// Specifies which cookie should be checked for a JWT token (if no authorization header is present)
|
||||||
|
CookieName string `json:"cookieName"`
|
||||||
|
|
||||||
|
// Deny login for users not in database (but defined in JWT).
|
||||||
|
// Ignore user roles defined in JWTs ('roles' claim), get them from db.
|
||||||
|
ForceJWTValidationViaDatabase bool `json:"forceJWTValidationViaDatabase"`
|
||||||
|
|
||||||
|
// Specifies which issuer should be accepted when validating external JWTs ('iss' claim)
|
||||||
|
TrustedExternalIssuer string `json:"trustedExternalIssuer"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type IntRange struct {
|
type IntRange struct {
|
||||||
@ -106,6 +116,9 @@ type ProgramConfig struct {
|
|||||||
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
|
// If not zero, automatically mark jobs as stopped running X seconds longer than their walltime.
|
||||||
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
|
StopJobsExceedingWalltime int `json:"stop-jobs-exceeding-walltime"`
|
||||||
|
|
||||||
|
// Defines time X in seconds in which jobs are considered to be "short" and will be filtered in specific views.
|
||||||
|
ShortRunningJobsDuration int `json:"short-running-jobs-duration"`
|
||||||
|
|
||||||
// Array of Clusters
|
// Array of Clusters
|
||||||
Clusters []*ClusterConfig `json:"clusters"`
|
Clusters []*ClusterConfig `json:"clusters"`
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A custom float type is used so that (Un)MarshalJSON and
|
// A custom float type is used so that (Un)MarshalJSON and
|
||||||
@ -43,6 +45,7 @@ func (f *Float) UnmarshalJSON(input []byte) error {
|
|||||||
|
|
||||||
val, err := strconv.ParseFloat(s, 64)
|
val, err := strconv.ParseFloat(s, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warn("Error while parsing custom float")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*f = Float(val)
|
*f = Float(val)
|
||||||
|
@ -58,6 +58,15 @@ type Job struct {
|
|||||||
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
|
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
|
||||||
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
|
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
|
||||||
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||||
|
StartTime time.Time `json:"startTime"` // Start time as 'time.Time' data type
|
||||||
|
MemUsedMax float64 `json:"-" db:"mem_used_max"` // MemUsedMax as Float64
|
||||||
|
FlopsAnyAvg float64 `json:"-" db:"flops_any_avg"` // FlopsAnyAvg as Float64
|
||||||
|
MemBwAvg float64 `json:"-" db:"mem_bw_avg"` // MemBwAvg as Float64
|
||||||
|
LoadAvg float64 `json:"-" db:"load_avg"` // LoadAvg as Float64
|
||||||
|
NetBwAvg float64 `json:"-" db:"net_bw_avg"` // NetBwAvg as Float64
|
||||||
|
NetDataVolTotal float64 `json:"-" db:"net_data_vol_total"` // NetDataVolTotal as Float64
|
||||||
|
FileBwAvg float64 `json:"-" db:"file_bw_avg"` // FileBwAvg as Float64
|
||||||
|
FileDataVolTotal float64 `json:"-" db:"file_data_vol_total"` // FileDataVolTotal as Float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Non-Swaggered Comment: JobMeta
|
// Non-Swaggered Comment: JobMeta
|
||||||
@ -75,6 +84,7 @@ type JobMeta struct {
|
|||||||
BaseJob
|
BaseJob
|
||||||
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
|
StartTime int64 `json:"startTime" db:"start_time" example:"1649723812" minimum:"1"` // Start epoch time stamp in seconds (Min > 0)
|
||||||
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||||
|
Statistics map[string]JobStatistics `json:"statistics,omitempty"` // Metric statistics of job
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -106,15 +116,20 @@ type JobStatistics struct {
|
|||||||
// Tag model
|
// Tag model
|
||||||
// @Description Defines a tag using name and type.
|
// @Description Defines a tag using name and type.
|
||||||
type Tag struct {
|
type Tag struct {
|
||||||
|
// The unique DB identifier of a tag
|
||||||
// The unique DB identifier of a tag
|
// The unique DB identifier of a tag
|
||||||
ID int64 `json:"id" db:"id"`
|
ID int64 `json:"id" db:"id"`
|
||||||
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
||||||
|
Type string `json:"type" db:"tag_type" example:"Debug"` // Tag Type
|
||||||
Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name
|
Name string `json:"name" db:"tag_name" example:"Testjob"` // Tag Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resource model
|
// Resource model
|
||||||
// @Description A resource used by a job
|
// @Description A resource used by a job
|
||||||
type Resource struct {
|
type Resource struct {
|
||||||
|
Hostname string `json:"hostname"` // Name of the host (= node)
|
||||||
|
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
|
||||||
|
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
|
||||||
Hostname string `json:"hostname"` // Name of the host (= node)
|
Hostname string `json:"hostname"` // Name of the host (= node)
|
||||||
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
|
HWThreads []int `json:"hwthreads,omitempty"` // List of OS processor ids
|
||||||
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
|
Accelerators []string `json:"accelerators,omitempty"` // List of of accelerator device ids
|
||||||
@ -137,12 +152,12 @@ const (
|
|||||||
func (e *JobState) UnmarshalGQL(v interface{}) error {
|
func (e *JobState) UnmarshalGQL(v interface{}) error {
|
||||||
str, ok := v.(string)
|
str, ok := v.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("enums must be strings")
|
return fmt.Errorf("SCHEMA/JOB > enums must be strings")
|
||||||
}
|
}
|
||||||
|
|
||||||
*e = JobState(str)
|
*e = JobState(str)
|
||||||
if !e.Valid() {
|
if !e.Valid() {
|
||||||
return errors.New("invalid job state")
|
return errors.New("SCHEMA/JOB > invalid job state")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -91,12 +91,12 @@ func (e *MetricScope) Max(other MetricScope) MetricScope {
|
|||||||
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
|
func (e *MetricScope) UnmarshalGQL(v interface{}) error {
|
||||||
str, ok := v.(string)
|
str, ok := v.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("enums must be strings")
|
return fmt.Errorf("SCHEMA/METRICS > enums must be strings")
|
||||||
}
|
}
|
||||||
|
|
||||||
*e = MetricScope(str)
|
*e = MetricScope(str)
|
||||||
if !e.Valid() {
|
if !e.Valid() {
|
||||||
return fmt.Errorf("%s is not a valid MetricScope", str)
|
return fmt.Errorf("SCHEMA/METRICS > %s is not a valid MetricScope", str)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -297,7 +297,7 @@ func (jm *JobMetric) AddPercentiles(ps []int) bool {
|
|||||||
|
|
||||||
for _, p := range ps {
|
for _, p := range ps {
|
||||||
if p < 1 || p > 99 {
|
if p < 1 || p > 99 {
|
||||||
panic("invalid percentile")
|
panic("SCHEMA/METRICS > invalid percentile")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {
|
if _, ok := jm.StatisticsSeries.Percentiles[p]; ok {
|
||||||
|
@ -76,6 +76,10 @@
|
|||||||
"description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.",
|
"description": "If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job.",
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
|
"short-running-jobs-duration": {
|
||||||
|
"description": "Do not show running jobs shorter than X seconds.",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
"": {
|
"": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"type": "string"
|
"type": "string"
|
||||||
@ -138,7 +142,7 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": [
|
"enum": [
|
||||||
"influxdb-v2",
|
"influxdb",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"cc-metric-store",
|
"cc-metric-store",
|
||||||
"test"
|
"test"
|
||||||
@ -241,10 +245,6 @@
|
|||||||
"description": "Jobs shown per page in job lists",
|
"description": "Jobs shown per page in job lists",
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
"plot_list_hideShortRunningJobs": {
|
|
||||||
"description": "Do not show running jobs shorter than X seconds",
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"plot_view_plotsPerRow": {
|
"plot_view_plotsPerRow": {
|
||||||
"description": "Number of plots per row in single job view",
|
"description": "Number of plots per row in single job view",
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
@ -342,8 +342,7 @@
|
|||||||
"job_view_polarPlotMetrics",
|
"job_view_polarPlotMetrics",
|
||||||
"job_view_selectedMetrics",
|
"job_view_selectedMetrics",
|
||||||
"plot_general_colorscheme",
|
"plot_general_colorscheme",
|
||||||
"plot_list_selectedMetrics",
|
"plot_list_selectedMetrics"
|
||||||
"plot_list_hideShortRunningJobs"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -45,21 +45,22 @@ func Validate(k Kind, r io.Reader) (err error) {
|
|||||||
case Config:
|
case Config:
|
||||||
s, err = jsonschema.Compile("embedfs://config.schema.json")
|
s, err = jsonschema.Compile("embedfs://config.schema.json")
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unkown schema kind ")
|
return fmt.Errorf("SCHEMA/VALIDATE > unkown schema kind: %#v", k)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Errorf("Error while compiling json schema for kind '%#v'", k)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var v interface{}
|
var v interface{}
|
||||||
if err := json.NewDecoder(r).Decode(&v); err != nil {
|
if err := json.NewDecoder(r).Decode(&v); err != nil {
|
||||||
log.Errorf("schema.Validate() - Failed to decode %v", err)
|
log.Warnf("Error while decoding raw json schema: %#v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.Validate(v); err != nil {
|
if err = s.Validate(v); err != nil {
|
||||||
return fmt.Errorf("%#v", err)
|
return fmt.Errorf("SCHEMA/VALIDATE > %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -281,7 +281,7 @@ func GetUnitUnitFactor(in Unit, out Unit) (func(value interface{}) interface{},
|
|||||||
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
|
} else if in.getMeasure() == TemperatureF && out.getMeasure() == TemperatureC {
|
||||||
return convertTempF2TempC, nil
|
return convertTempF2TempC, nil
|
||||||
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
|
} else if in.getMeasure() != out.getMeasure() || in.getUnitDenominator() != out.getUnitDenominator() {
|
||||||
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("invalid measures in in and out Unit")
|
return func(value interface{}) interface{} { return 1.0 }, fmt.Errorf("UNITS/UNITS > invalid measures in in and out Unit")
|
||||||
}
|
}
|
||||||
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
|
return GetPrefixPrefixFactor(in.getPrefix(), out.getPrefix()), nil
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
|
||||||
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
"github.com/ClusterCockpit/cc-backend/internal/repository"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
"github.com/ClusterCockpit/cc-backend/pkg/archive"
|
||||||
|
"github.com/ClusterCockpit/cc-backend/pkg/log"
|
||||||
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
"github.com/ClusterCockpit/cc-backend/pkg/schema"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
||||||
@ -320,6 +321,7 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
]
|
]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
log.Init("info", true)
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
jobarchive := filepath.Join(tmpdir, "job-archive")
|
jobarchive := filepath.Join(tmpdir, "job-archive")
|
||||||
if err := os.Mkdir(jobarchive, 0777); err != nil {
|
if err := os.Mkdir(jobarchive, 0777); err != nil {
|
||||||
@ -346,11 +348,7 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
dbfilepath := filepath.Join(tmpdir, "test.db")
|
dbfilepath := filepath.Join(tmpdir, "test.db")
|
||||||
f, err := os.Create(dbfilepath)
|
repository.MigrateDB("sqlite3", dbfilepath)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
cfgFilePath := filepath.Join(tmpdir, "config.json")
|
||||||
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
if err := os.WriteFile(cfgFilePath, []byte(testconfig), 0666); err != nil {
|
||||||
@ -363,7 +361,7 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
repository.Connect("sqlite3", dbfilepath)
|
repository.Connect("sqlite3", dbfilepath)
|
||||||
db := repository.GetConnection()
|
db := repository.GetConnection()
|
||||||
|
|
||||||
if err := archive.Init(json.RawMessage(archiveCfg)); err != nil {
|
if err := archive.Init(json.RawMessage(archiveCfg), config.Keys.DisableArchive); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,10 +369,6 @@ func setup(t *testing.T) *api.RestApi {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := db.DB.Exec(repository.JobsDBSchema); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
jobRepo := repository.GetJobRepository()
|
jobRepo := repository.GetJobRepository()
|
||||||
resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo}
|
resolver := &graph.Resolver{DB: db.DB, Repo: jobRepo}
|
||||||
|
|
||||||
@ -500,6 +494,7 @@ func TestRestApi(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const stopJobBody string = `{
|
const stopJobBody string = `{
|
||||||
|
"jobId": 123,
|
||||||
"jobId": 123,
|
"jobId": 123,
|
||||||
"startTime": 123456789,
|
"startTime": 123456789,
|
||||||
"cluster": "testcluster",
|
"cluster": "testcluster",
|
||||||
@ -519,7 +514,7 @@ func TestRestApi(t *testing.T) {
|
|||||||
t.Fatal(response.Status, recorder.Body.String())
|
t.Fatal(response.Status, recorder.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
restapi.OngoingArchivings.Wait()
|
restapi.JobRepository.WaitForArchiving()
|
||||||
job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(dbid)))
|
job, err := restapi.Resolver.Query().Job(context.Background(), strconv.Itoa(int(dbid)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -633,15 +628,15 @@ func subtestLetJobFail(t *testing.T, restapi *api.RestApi, r *mux.Router) {
|
|||||||
t.Fatal(response.Status, recorder.Body.String())
|
t.Fatal(response.Status, recorder.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
restapi.OngoingArchivings.Wait()
|
restapi.JobRepository.WaitForArchiving()
|
||||||
jobid, cluster := int64(12345), "testcluster"
|
jobid, cluster := int64(12345), "testcluster"
|
||||||
job, err := restapi.JobRepository.Find(&jobid, &cluster, nil)
|
job, err := restapi.JobRepository.Find(&jobid, &cluster, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if job.State != schema.JobStateCompleted {
|
if job.State != schema.JobStateFailed {
|
||||||
t.Fatal("expected job to be completed")
|
t.Fatal("expected job to be failed")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if !ok {
|
if !ok {
|
||||||
|
BIN
test/test.db
BIN
test/test.db
Binary file not shown.
25
tools/convert-pem-pubkey-for-cc/Readme.md
Normal file
25
tools/convert-pem-pubkey-for-cc/Readme.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Convert a public Ed25519 key (in PEM format) for use in ClusterCockpit
|
||||||
|
|
||||||
|
Imagine you have externally generated JSON Web Tokens (JWT) that should be accepted by CC backend. This external provider shares its public key (used for JWT signing) in PEM format:
|
||||||
|
|
||||||
|
```
|
||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEA+51iXX8BdLFocrppRxIw52xCOf8xFSH/eNilN5IHVGc=
|
||||||
|
-----END PUBLIC KEY-----
|
||||||
|
```
|
||||||
|
|
||||||
|
Unfortunately, ClusterCockpit does not handle this format (yet). You can use this tool to convert the public PEM key into a representation for CC:
|
||||||
|
|
||||||
|
```
|
||||||
|
CROSS_LOGIN_JWT_PUBLIC_KEY="+51iXX8BdLFocrppRxIw52xCOf8xFSH/eNilN5IHVGc="
|
||||||
|
```
|
||||||
|
|
||||||
|
Instructions
|
||||||
|
|
||||||
|
- `cd tools/convert-pem-pubkey-for-cc/`
|
||||||
|
- Insert your public ed25519 PEM key into `dummy.pub`
|
||||||
|
- `go run . dummy.pub`
|
||||||
|
- Copy the result into ClusterCockpit's `.env`
|
||||||
|
- (Re)start ClusterCockpit backend
|
||||||
|
|
||||||
|
Now CC can validate generated JWTs from the external provider.
|
3
tools/convert-pem-pubkey-for-cc/dummy.pub
Normal file
3
tools/convert-pem-pubkey-for-cc/dummy.pub
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PUBLIC KEY-----
|
||||||
|
MCowBQYDK2VwAyEA+51iXX8BdLFocrppRxIw52xCOf8xFSH/eNilN5IHVGc=
|
||||||
|
-----END PUBLIC KEY-----
|
81
tools/convert-pem-pubkey-for-cc/main.go
Normal file
81
tools/convert-pem-pubkey-for-cc/main.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright (C) 2022 Paderborn Center for Parallel Computing, Paderborn University
|
||||||
|
// This code is released under MIT License:
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
filepath := ""
|
||||||
|
if len(os.Args) > 1 {
|
||||||
|
filepath = os.Args[1]
|
||||||
|
} else {
|
||||||
|
PrintUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, err := LoadEd255519PubkeyFromPEMFile(filepath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "error: %s\n", err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout,
|
||||||
|
"CROSS_LOGIN_JWT_PUBLIC_KEY=%#v\n",
|
||||||
|
base64.StdEncoding.EncodeToString(pubkey))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loads an ed25519 public key stored in a file in PEM format
|
||||||
|
func LoadEd255519PubkeyFromPEMFile(filePath string) (ed25519.PublicKey, error) {
|
||||||
|
buffer, err := os.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
block, _ := pem.Decode(buffer)
|
||||||
|
if block == nil {
|
||||||
|
return nil, fmt.Errorf("no pem block found")
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ed25519PublicKey, success := pubkey.(ed25519.PublicKey)
|
||||||
|
if !success {
|
||||||
|
return nil, fmt.Errorf("not an ed25519 key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ed25519PublicKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrintUsage() {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage: %s <filename>\n", os.Args[0])
|
||||||
|
fmt.Fprintf(os.Stderr, "where <filename> contains an Ed25519 public key in PEM format\n")
|
||||||
|
fmt.Fprintf(os.Stderr, "(starting with '-----BEGIN PUBLIC KEY-----')\n")
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
# cc-svelte-datatable
|
# cc-frontend
|
||||||
|
|
||||||
[![Build](https://github.com/ClusterCockpit/cc-svelte-datatable/actions/workflows/build.yml/badge.svg)](https://github.com/ClusterCockpit/cc-svelte-datatable/actions/workflows/build.yml)
|
[![Build](https://github.com/ClusterCockpit/cc-svelte-datatable/actions/workflows/build.yml/badge.svg)](https://github.com/ClusterCockpit/cc-svelte-datatable/actions/workflows/build.yml)
|
||||||
|
|
||||||
@ -21,11 +21,9 @@ Install the dependencies...
|
|||||||
yarn install
|
yarn install
|
||||||
```
|
```
|
||||||
|
|
||||||
...then start [Rollup](https://rollupjs.org):
|
...then build using [Rollup](https://rollupjs.org):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn run dev
|
yarn build
|
||||||
```
|
```
|
||||||
|
|
||||||
Edit a component file in `src`, save it, and reload the page to see your changes.
|
|
||||||
|
|
||||||
|
1704
web/frontend/public/bootstrap-icons.css
vendored
Normal file
1704
web/frontend/public/bootstrap-icons.css
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7
web/frontend/public/bootstrap.min.css
vendored
Normal file
7
web/frontend/public/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/frontend/public/bootstrap.min.css.map
Normal file
1
web/frontend/public/bootstrap.min.css.map
Normal file
File diff suppressed because one or more lines are too long
BIN
web/frontend/public/fonts/bootstrap-icons.woff
Normal file
BIN
web/frontend/public/fonts/bootstrap-icons.woff
Normal file
Binary file not shown.
BIN
web/frontend/public/fonts/bootstrap-icons.woff2
Normal file
BIN
web/frontend/public/fonts/bootstrap-icons.woff2
Normal file
Binary file not shown.
@ -10,11 +10,11 @@
|
|||||||
|
|
||||||
const ccconfig = getContext('cc-config')
|
const ccconfig = getContext('cc-config')
|
||||||
|
|
||||||
export let user
|
export let isAdmin
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
{#if user.IsAdmin}
|
{#if isAdmin == true}
|
||||||
<Card style="margin-bottom: 1.5em;">
|
<Card style="margin-bottom: 1.5em;">
|
||||||
<CardHeader>
|
<CardHeader>
|
||||||
<CardTitle class="mb-1">Admin Options</CardTitle>
|
<CardTitle class="mb-1">Admin Options</CardTitle>
|
||||||
|
@ -1,26 +1,48 @@
|
|||||||
<script>
|
<script>
|
||||||
import { Icon, Button, InputGroup, Input, Collapse,
|
import { Icon, Button, InputGroup, Input, Collapse,
|
||||||
Navbar, NavbarBrand, Nav, NavItem, NavLink, NavbarToggler,
|
Navbar, NavbarBrand, Nav, NavItem, NavLink, NavbarToggler,
|
||||||
Dropdown, DropdownToggle, DropdownMenu, DropdownItem } from 'sveltestrap'
|
Dropdown, DropdownToggle, DropdownMenu, DropdownItem, InputGroupText } from 'sveltestrap'
|
||||||
|
|
||||||
export let username // empty string if auth. is disabled, otherwise the username as string
|
export let username // empty string if auth. is disabled, otherwise the username as string
|
||||||
export let isAdmin // boolean
|
export let authlevel // Integer
|
||||||
export let clusters // array of names
|
export let clusters // array of names
|
||||||
|
export let roles // Role Enum-Like
|
||||||
|
|
||||||
let isOpen = false
|
let isOpen = false
|
||||||
|
|
||||||
const views = [
|
const userviews = [
|
||||||
isAdmin
|
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
|
||||||
? { title: 'Jobs', adminOnly: false, href: '/monitoring/jobs/', icon: 'card-list' }
|
{ title: `Job Search`, href: '/monitoring/jobs/', icon: 'card-list' },
|
||||||
: { title: 'My Jobs', adminOnly: false, href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
|
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
|
||||||
{ title: 'Users', adminOnly: true, href: '/monitoring/users/', icon: 'people-fill' },
|
|
||||||
{ title: 'Projects', adminOnly: true, href: '/monitoring/projects/', icon: 'folder' },
|
|
||||||
{ title: 'Tags', adminOnly: false, href: '/monitoring/tags/', icon: 'tags' }
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
const managerviews = [
|
||||||
|
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
|
||||||
|
{ title: `Managed Jobs`, href: '/monitoring/jobs/', icon: 'card-list' },
|
||||||
|
{ title: `Managed Users`, href: '/monitoring/users/', icon: 'people-fill' },
|
||||||
|
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
|
||||||
|
]
|
||||||
|
|
||||||
|
const supportviews = [
|
||||||
|
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
|
||||||
|
{ title: 'Jobs', href: '/monitoring/jobs/', icon: 'card-list' },
|
||||||
|
{ title: 'Users', href: '/monitoring/users/', icon: 'people-fill' },
|
||||||
|
{ title: 'Projects', href: '/monitoring/projects/', icon: 'folder' },
|
||||||
|
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
|
||||||
|
]
|
||||||
|
|
||||||
|
const adminviews = [
|
||||||
|
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
|
||||||
|
{ title: 'Jobs', href: '/monitoring/jobs/', icon: 'card-list' },
|
||||||
|
{ title: 'Users', href: '/monitoring/users/', icon: 'people-fill' },
|
||||||
|
{ title: 'Projects', href: '/monitoring/projects/', icon: 'folder' },
|
||||||
|
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
|
||||||
|
]
|
||||||
|
|
||||||
const viewsPerCluster = [
|
const viewsPerCluster = [
|
||||||
{ title: 'Analysis', adminOnly: true, href: '/monitoring/analysis/', icon: 'graph-up' },
|
{ title: 'Analysis', requiredRole: roles.support, href: '/monitoring/analysis/', icon: 'graph-up' },
|
||||||
{ title: 'Systems', adminOnly: true, href: '/monitoring/systems/', icon: 'cpu' },
|
{ title: 'Systems', requiredRole: roles.admin, href: '/monitoring/systems/', icon: 'cpu' },
|
||||||
{ title: 'Status', adminOnly: true, href: '/monitoring/status/', icon: 'cpu' },
|
{ title: 'Status', requiredRole: roles.admin, href: '/monitoring/status/', icon: 'cpu' },
|
||||||
]
|
]
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
@ -31,10 +53,26 @@
|
|||||||
<NavbarToggler on:click={() => (isOpen = !isOpen)} />
|
<NavbarToggler on:click={() => (isOpen = !isOpen)} />
|
||||||
<Collapse {isOpen} navbar expand="lg" on:update={({ detail }) => (isOpen = detail.isOpen)}>
|
<Collapse {isOpen} navbar expand="lg" on:update={({ detail }) => (isOpen = detail.isOpen)}>
|
||||||
<Nav pills>
|
<Nav pills>
|
||||||
{#each views.filter(item => isAdmin || !item.adminOnly) as item}
|
{#if authlevel == roles.admin}
|
||||||
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
|
{#each adminviews as item}
|
||||||
{/each}
|
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
|
||||||
{#each viewsPerCluster.filter(item => !item.adminOnly || isAdmin) as item}
|
{/each}
|
||||||
|
{:else if authlevel == roles.support}
|
||||||
|
{#each supportviews as item}
|
||||||
|
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
|
||||||
|
{/each}
|
||||||
|
{:else if authlevel == roles.manager}
|
||||||
|
{#each managerviews as item}
|
||||||
|
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
|
||||||
|
{/each}
|
||||||
|
{:else if authlevel == roles.user}
|
||||||
|
{#each userviews as item}
|
||||||
|
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
|
||||||
|
{/each}
|
||||||
|
{:else}
|
||||||
|
<p>API User or Unauthorized!</p>
|
||||||
|
{/if}
|
||||||
|
{#each viewsPerCluster.filter(item => item.requiredRole <= authlevel) as item}
|
||||||
<NavItem>
|
<NavItem>
|
||||||
<Dropdown nav inNavbar>
|
<Dropdown nav inNavbar>
|
||||||
<DropdownToggle nav caret>
|
<DropdownToggle nav caret>
|
||||||
@ -55,8 +93,9 @@
|
|||||||
<div class="d-flex">
|
<div class="d-flex">
|
||||||
<form method="GET" action="/search">
|
<form method="GET" action="/search">
|
||||||
<InputGroup>
|
<InputGroup>
|
||||||
<Input type="text" placeholder={isAdmin ? "Search jobId / username" : "Search jobId"} name="searchId"/>
|
<Input type="text" placeholder="Search 'type:<query>' ..." name="searchId"/>
|
||||||
<Button outline type="submit"><Icon name="search"/></Button>
|
<Button outline type="submit"><Icon name="search"/></Button>
|
||||||
|
<InputGroupText style="cursor:help;" title={(authlevel >= roles.support) ? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | username | name" : "Example: 'jobName:myjob', Types are jobId | jobName | projectId"}><Icon name="info-circle"/></InputGroupText>
|
||||||
</InputGroup>
|
</InputGroup>
|
||||||
</form>
|
</form>
|
||||||
{#if username}
|
{#if username}
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
const ccconfig = getContext('cc-config')
|
const ccconfig = getContext('cc-config')
|
||||||
|
|
||||||
export let filterPresets = {}
|
export let filterPresets = {}
|
||||||
|
export let authlevel
|
||||||
|
export let roles
|
||||||
|
|
||||||
let filters = []
|
let filters = []
|
||||||
let jobList, matchedJobs = null
|
let jobList, matchedJobs = null
|
||||||
@ -67,7 +69,7 @@
|
|||||||
</Col>
|
</Col>
|
||||||
|
|
||||||
<Col xs="3" style="margin-left: auto;">
|
<Col xs="3" style="margin-left: auto;">
|
||||||
<UserOrProject on:update={({ detail }) => filters.update(detail)}/>
|
<UserOrProject bind:authlevel={authlevel} bind:roles={roles} on:update={({ detail }) => filters.update(detail)}/>
|
||||||
</Col>
|
</Col>
|
||||||
<Col xs="2">
|
<Col xs="2">
|
||||||
<Refresher on:reload={() => jobList.update()} />
|
<Refresher on:reload={() => jobList.update()} />
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<!--
|
<!--
|
||||||
@component List of users or projects
|
@component List of users or projects
|
||||||
-->
|
-->
|
||||||
<script>
|
<script>
|
||||||
@ -20,6 +20,7 @@
|
|||||||
const stats = operationStore(`query($filter: [JobFilter!]!) {
|
const stats = operationStore(`query($filter: [JobFilter!]!) {
|
||||||
rows: jobsStatistics(filter: $filter, groupBy: ${type}) {
|
rows: jobsStatistics(filter: $filter, groupBy: ${type}) {
|
||||||
id
|
id
|
||||||
|
name
|
||||||
totalJobs
|
totalJobs
|
||||||
totalWalltime
|
totalWalltime
|
||||||
totalCoreHours
|
totalCoreHours
|
||||||
@ -54,7 +55,7 @@
|
|||||||
: (sorting.direction == 'up'
|
: (sorting.direction == 'up'
|
||||||
? (a, b) => a[sorting.field] - b[sorting.field]
|
? (a, b) => a[sorting.field] - b[sorting.field]
|
||||||
: (a, b) => b[sorting.field] - a[sorting.field])
|
: (a, b) => b[sorting.field] - a[sorting.field])
|
||||||
|
|
||||||
return stats.filter(u => u.id.includes(nameFilter)).sort(cmp)
|
return stats.filter(u => u.id.includes(nameFilter)).sort(cmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,6 +94,15 @@
|
|||||||
<Icon name="sort-numeric-down" />
|
<Icon name="sort-numeric-down" />
|
||||||
</Button>
|
</Button>
|
||||||
</th>
|
</th>
|
||||||
|
{#if type == 'USER'}
|
||||||
|
<th scope="col">
|
||||||
|
Name
|
||||||
|
<Button color="{sorting.field == 'name' ? 'primary' : 'light'}"
|
||||||
|
size="sm" on:click={e => changeSorting(e, 'name')}>
|
||||||
|
<Icon name="sort-numeric-down" />
|
||||||
|
</Button>
|
||||||
|
</th>
|
||||||
|
{/if}
|
||||||
<th scope="col">
|
<th scope="col">
|
||||||
Total Jobs
|
Total Jobs
|
||||||
<Button color="{sorting.field == 'totalJobs' ? 'primary' : 'light'}"
|
<Button color="{sorting.field == 'totalJobs' ? 'primary' : 'light'}"
|
||||||
@ -137,6 +147,9 @@
|
|||||||
{row.id}
|
{row.id}
|
||||||
{/if}
|
{/if}
|
||||||
</td>
|
</td>
|
||||||
|
{#if type == 'USER'}
|
||||||
|
<td>{row?.name ? row.name : ''}</td>
|
||||||
|
{/if}
|
||||||
<td>{row.totalJobs}</td>
|
<td>{row.totalJobs}</td>
|
||||||
<td>{row.totalWalltime}</td>
|
<td>{row.totalWalltime}</td>
|
||||||
<td>{row.totalCoreHours}</td>
|
<td>{row.totalCoreHours}</td>
|
||||||
@ -148,4 +161,4 @@
|
|||||||
{/each}
|
{/each}
|
||||||
{/if}
|
{/if}
|
||||||
</tbody>
|
</tbody>
|
||||||
</Table>
|
</Table>
|
||||||
|
@ -62,7 +62,7 @@
|
|||||||
|
|
||||||
query(nodesQuery)
|
query(nodesQuery)
|
||||||
|
|
||||||
$: console.log($nodesQuery?.data?.nodeMetrics[0].metrics)
|
// $: console.log($nodesQuery?.data?.nodeMetrics[0].metrics)
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row>
|
<Row>
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
if (s1 == null || s2 == null)
|
if (s1 == null || s2 == null)
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
return s.dir != 'up' ? s1[stat] - s2[stat] : s2[stat] - s1[stat]
|
return s.dir != 'up' ? s1[stat] - s2[stat] : s2[stat] - s1[stat]
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +61,7 @@
|
|||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>
|
<th>
|
||||||
<Button outline on:click={() => (isMetricSelectionOpen = true, console.log(isMetricSelectionOpen))}>
|
<Button outline on:click={() => (isMetricSelectionOpen = true)}> <!-- log to click ', console.log(isMetricSelectionOpen)' -->
|
||||||
Metrics
|
Metrics
|
||||||
</Button>
|
</Button>
|
||||||
</th>
|
</th>
|
||||||
|
@ -4,7 +4,7 @@ import Config from './Config.root.svelte'
|
|||||||
new Config({
|
new Config({
|
||||||
target: document.getElementById('svelte-app'),
|
target: document.getElementById('svelte-app'),
|
||||||
props: {
|
props: {
|
||||||
user: user
|
isAdmin: isAdmin
|
||||||
},
|
},
|
||||||
context: new Map([
|
context: new Map([
|
||||||
['cc-config', clusterCockpitConfig]
|
['cc-config', clusterCockpitConfig]
|
||||||
|
@ -2,11 +2,13 @@
|
|||||||
import { Row, Col } from 'sveltestrap'
|
import { Row, Col } from 'sveltestrap'
|
||||||
import { onMount } from 'svelte'
|
import { onMount } from 'svelte'
|
||||||
import EditRole from './admin/EditRole.svelte'
|
import EditRole from './admin/EditRole.svelte'
|
||||||
|
import EditProject from './admin/EditProject.svelte'
|
||||||
import AddUser from './admin/AddUser.svelte'
|
import AddUser from './admin/AddUser.svelte'
|
||||||
import ShowUsers from './admin/ShowUsers.svelte'
|
import ShowUsers from './admin/ShowUsers.svelte'
|
||||||
import Options from './admin/Options.svelte'
|
import Options from './admin/Options.svelte'
|
||||||
|
|
||||||
let users = []
|
let users = []
|
||||||
|
let roles = []
|
||||||
|
|
||||||
function getUserList() {
|
function getUserList() {
|
||||||
fetch('/api/users/?via-ldap=false¬-just-user=true')
|
fetch('/api/users/?via-ldap=false¬-just-user=true')
|
||||||
@ -16,19 +18,35 @@
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
onMount(() => getUserList())
|
function getValidRoles() {
|
||||||
|
fetch('/api/roles/')
|
||||||
|
.then(res => res.json())
|
||||||
|
.then(rolesRaw => {
|
||||||
|
roles = rolesRaw
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
function initAdmin() {
|
||||||
|
getUserList()
|
||||||
|
getValidRoles()
|
||||||
|
}
|
||||||
|
|
||||||
|
onMount(() => initAdmin())
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Row cols={2} class="p-2 g-2" >
|
<Row cols={2} class="p-2 g-2" >
|
||||||
<Col class="mb-1">
|
<Col class="mb-1">
|
||||||
<AddUser on:reload={getUserList}/>
|
<AddUser roles={roles} on:reload={getUserList}/>
|
||||||
</Col>
|
</Col>
|
||||||
<Col class="mb-1">
|
<Col class="mb-1">
|
||||||
<ShowUsers on:reload={getUserList} bind:users={users}/>
|
<ShowUsers on:reload={getUserList} bind:users={users}/>
|
||||||
</Col>
|
</Col>
|
||||||
<Col>
|
<Col>
|
||||||
<EditRole on:reload={getUserList}/>
|
<EditRole roles={roles} on:reload={getUserList}/>
|
||||||
|
</Col>
|
||||||
|
<Col>
|
||||||
|
<EditProject on:reload={getUserList}/>
|
||||||
</Col>
|
</Col>
|
||||||
<Col>
|
<Col>
|
||||||
<Options/>
|
<Options/>
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
let message = {msg: '', color: '#d63384'}
|
let message = {msg: '', color: '#d63384'}
|
||||||
let displayMessage = false
|
let displayMessage = false
|
||||||
|
|
||||||
|
export let roles = []
|
||||||
|
|
||||||
async function handleUserSubmit() {
|
async function handleUserSubmit() {
|
||||||
let form = document.querySelector('#create-user-form')
|
let form = document.querySelector('#create-user-form')
|
||||||
let formData = new FormData(form)
|
let formData = new FormData(form)
|
||||||
@ -45,17 +47,7 @@
|
|||||||
<form id="create-user-form" method="post" action="/api/users/" class="card-body" on:submit|preventDefault={handleUserSubmit}>
|
<form id="create-user-form" method="post" action="/api/users/" class="card-body" on:submit|preventDefault={handleUserSubmit}>
|
||||||
<CardTitle class="mb-3">Create User</CardTitle>
|
<CardTitle class="mb-3">Create User</CardTitle>
|
||||||
<div class="mb-3">
|
<div class="mb-3">
|
||||||
<label for="name" class="form-label">Name</label>
|
<label for="username" class="form-label">Username (ID)</label>
|
||||||
<input type="text" class="form-control" id="name" name="name" aria-describedby="nameHelp"/>
|
|
||||||
<div id="nameHelp" class="form-text">Optional, can be blank.</div>
|
|
||||||
</div>
|
|
||||||
<div class="mb-3">
|
|
||||||
<label for="email" class="form-label">Email address</label>
|
|
||||||
<input type="email" class="form-control" id="email" name="email" aria-describedby="emailHelp"/>
|
|
||||||
<div id="emailHelp" class="form-text">Optional, can be blank.</div>
|
|
||||||
</div>
|
|
||||||
<div class="mb-3">
|
|
||||||
<label for="username" class="form-label">Username</label>
|
|
||||||
<input type="text" class="form-control" id="username" name="username" aria-describedby="usernameHelp"/>
|
<input type="text" class="form-control" id="username" name="username" aria-describedby="usernameHelp"/>
|
||||||
<div id="usernameHelp" class="form-text">Must be unique.</div>
|
<div id="usernameHelp" class="form-text">Must be unique.</div>
|
||||||
</div>
|
</div>
|
||||||
@ -64,24 +56,38 @@
|
|||||||
<input type="password" class="form-control" id="password" name="password" aria-describedby="passwordHelp"/>
|
<input type="password" class="form-control" id="password" name="password" aria-describedby="passwordHelp"/>
|
||||||
<div id="passwordHelp" class="form-text">Only API users are allowed to have a blank password. Users with a blank password can only authenticate via Tokens.</div>
|
<div id="passwordHelp" class="form-text">Only API users are allowed to have a blank password. Users with a blank password can only authenticate via Tokens.</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="name" class="form-label">Project</label>
|
||||||
|
<input type="text" class="form-control" id="project" name="project" aria-describedby="projectHelp"/>
|
||||||
|
<div id="projectHelp" class="form-text">Only Manager users can have a project. Allows to inspect jobs and users of given project.</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="name" class="form-label">Name</label>
|
||||||
|
<input type="text" class="form-control" id="name" name="name" aria-describedby="nameHelp"/>
|
||||||
|
<div id="nameHelp" class="form-text">Optional, can be blank.</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="email" class="form-label">Email address</label>
|
||||||
|
<input type="email" class="form-control" id="email" name="email" aria-describedby="emailHelp"/>
|
||||||
|
<div id="emailHelp" class="form-text">Optional, can be blank.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
<div class="mb-3">
|
<div class="mb-3">
|
||||||
<p>Role:</p>
|
<p>Role:</p>
|
||||||
<div>
|
{#each roles as role, i}
|
||||||
<input type="radio" id="user" name="role" value="user" checked/>
|
{#if i == 0}
|
||||||
<label for="user">User (regular user, same as if created via LDAP sync.)</label>
|
<div>
|
||||||
</div>
|
<input type="radio" id={role} name="role" value={role} checked/>
|
||||||
<div>
|
<label for={role}>{role.charAt(0).toUpperCase() + role.slice(1)} (regular user, same as if created via LDAP sync.)</label>
|
||||||
<input type="radio" id="api" name="role" value="api"/>
|
</div>
|
||||||
<label for="api">API</label>
|
{:else}
|
||||||
</div>
|
<div>
|
||||||
<div>
|
<input type="radio" id={role} name="role" value={role}/>
|
||||||
<input type="radio" id="support" name="role" value="support"/>
|
<label for={role}>{role.charAt(0).toUpperCase() + role.slice(1)}</label>
|
||||||
<label for="support">Support</label>
|
</div>
|
||||||
</div>
|
{/if}
|
||||||
<div>
|
{/each}
|
||||||
<input type="radio" id="admin" name="role" value="admin"/>
|
|
||||||
<label for="admin">Admin</label>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
<p style="display: flex; align-items: center;">
|
<p style="display: flex; align-items: center;">
|
||||||
<Button type="submit" color="primary">Submit</Button>
|
<Button type="submit" color="primary">Submit</Button>
|
||||||
|
97
web/frontend/src/config/admin/EditProject.svelte
Normal file
97
web/frontend/src/config/admin/EditProject.svelte
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
<script>
|
||||||
|
import { Card, CardTitle, CardBody } from 'sveltestrap'
|
||||||
|
import { createEventDispatcher } from 'svelte'
|
||||||
|
import { fade } from 'svelte/transition'
|
||||||
|
|
||||||
|
const dispatch = createEventDispatcher()
|
||||||
|
|
||||||
|
let message = {msg: '', color: '#d63384'}
|
||||||
|
let displayMessage = false
|
||||||
|
|
||||||
|
async function handleAddProject() {
|
||||||
|
const username = document.querySelector('#project-username').value
|
||||||
|
const project = document.querySelector('#project-id').value
|
||||||
|
|
||||||
|
if (username == "" || project == "") {
|
||||||
|
alert('Please fill in a username and select a project.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let formData = new FormData()
|
||||||
|
formData.append('username', username)
|
||||||
|
formData.append('add-project', project)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const res = await fetch(`/api/user/${username}`, { method: 'POST', body: formData })
|
||||||
|
if (res.ok) {
|
||||||
|
let text = await res.text()
|
||||||
|
popMessage(text, '#048109')
|
||||||
|
reloadUserList()
|
||||||
|
} else {
|
||||||
|
let text = await res.text()
|
||||||
|
// console.log(res.statusText)
|
||||||
|
throw new Error('Response Code ' + res.status + '-> ' + text)
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
popMessage(err, '#d63384')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleRemoveProject() {
|
||||||
|
const username = document.querySelector('#project-username').value
|
||||||
|
const project = document.querySelector('#project-id').value
|
||||||
|
|
||||||
|
if (username == "" || project == "") {
|
||||||
|
alert('Please fill in a username and select a project.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let formData = new FormData()
|
||||||
|
formData.append('username', username)
|
||||||
|
formData.append('remove-project', project)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const res = await fetch(`/api/user/${username}`, { method: 'POST', body: formData })
|
||||||
|
if (res.ok) {
|
||||||
|
let text = await res.text()
|
||||||
|
popMessage(text, '#048109')
|
||||||
|
reloadUserList()
|
||||||
|
} else {
|
||||||
|
let text = await res.text()
|
||||||
|
// console.log(res.statusText)
|
||||||
|
throw new Error('Response Code ' + res.status + '-> ' + text)
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
popMessage(err, '#d63384')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function popMessage(response, rescolor) {
|
||||||
|
message = {msg: response, color: rescolor}
|
||||||
|
displayMessage = true
|
||||||
|
setTimeout(function() {
|
||||||
|
displayMessage = false
|
||||||
|
}, 3500)
|
||||||
|
}
|
||||||
|
|
||||||
|
function reloadUserList() {
|
||||||
|
dispatch('reload')
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<Card>
|
||||||
|
<CardBody>
|
||||||
|
<CardTitle class="mb-3">Edit Project Managed By User (Manager Only)</CardTitle>
|
||||||
|
<div class="input-group mb-3">
|
||||||
|
<input type="text" class="form-control" placeholder="username" id="project-username"/>
|
||||||
|
<input type="text" class="form-control" placeholder="project-id" id="project-id"/>
|
||||||
|
<!-- PreventDefault on Sveltestrap-Button more complex to achieve than just use good ol' html button -->
|
||||||
|
<!-- see: https://stackoverflow.com/questions/69630422/svelte-how-to-use-event-modifiers-in-my-own-components -->
|
||||||
|
<button class="btn btn-primary" type="button" id="add-project-button" on:click|preventDefault={handleAddProject}>Add</button>
|
||||||
|
<button class="btn btn-danger" type="button" id="remove-project-button" on:click|preventDefault={handleRemoveProject}>Remove</button>
|
||||||
|
</div>
|
||||||
|
<p>
|
||||||
|
{#if displayMessage}<b><code style="color: {message.color};" out:fade>Update: {message.msg}</code></b>{/if}
|
||||||
|
</p>
|
||||||
|
</CardBody>
|
||||||
|
</Card>
|
@ -8,6 +8,8 @@
|
|||||||
let message = {msg: '', color: '#d63384'}
|
let message = {msg: '', color: '#d63384'}
|
||||||
let displayMessage = false
|
let displayMessage = false
|
||||||
|
|
||||||
|
export let roles = []
|
||||||
|
|
||||||
async function handleAddRole() {
|
async function handleAddRole() {
|
||||||
const username = document.querySelector('#role-username').value
|
const username = document.querySelector('#role-username').value
|
||||||
const role = document.querySelector('#role-select').value
|
const role = document.querySelector('#role-select').value
|
||||||
@ -86,10 +88,9 @@
|
|||||||
<input type="text" class="form-control" placeholder="username" id="role-username"/>
|
<input type="text" class="form-control" placeholder="username" id="role-username"/>
|
||||||
<select class="form-select" id="role-select">
|
<select class="form-select" id="role-select">
|
||||||
<option selected value="">Role...</option>
|
<option selected value="">Role...</option>
|
||||||
<option value="user">User</option>
|
{#each roles as role}
|
||||||
<option value="support">Support</option>
|
<option value={role}>{role.charAt(0).toUpperCase() + role.slice(1)}</option>
|
||||||
<option value="admin">Admin</option>
|
{/each}
|
||||||
<option value="api">API</option>
|
|
||||||
</select>
|
</select>
|
||||||
<!-- PreventDefault on Sveltestrap-Button more complex to achieve than just use good ol' html button -->
|
<!-- PreventDefault on Sveltestrap-Button more complex to achieve than just use good ol' html button -->
|
||||||
<!-- see: https://stackoverflow.com/questions/69630422/svelte-how-to-use-event-modifiers-in-my-own-components -->
|
<!-- see: https://stackoverflow.com/questions/69630422/svelte-how-to-use-event-modifiers-in-my-own-components -->
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
<tr>
|
<tr>
|
||||||
<th>Username</th>
|
<th>Username</th>
|
||||||
<th>Name</th>
|
<th>Name</th>
|
||||||
|
<th>Project(s)</th>
|
||||||
<th>Email</th>
|
<th>Email</th>
|
||||||
<th>Roles</th>
|
<th>Roles</th>
|
||||||
<th>JWT</th>
|
<th>JWT</th>
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
<td>{user.username}</td>
|
<td>{user.username}</td>
|
||||||
<td>{user.name}</td>
|
<td>{user.name}</td>
|
||||||
|
<td>{user.projects}</td>
|
||||||
<td>{user.email}</td>
|
<td>{user.email}</td>
|
||||||
<td><code>{user.roles.join(', ')}</code></td>
|
<td><code>{user.roles.join(', ')}</code></td>
|
||||||
<td>
|
<td>
|
||||||
|
@ -45,6 +45,7 @@
|
|||||||
arrayJobId: filterPresets.arrayJobId || null,
|
arrayJobId: filterPresets.arrayJobId || null,
|
||||||
user: filterPresets.user || '',
|
user: filterPresets.user || '',
|
||||||
project: filterPresets.project || '',
|
project: filterPresets.project || '',
|
||||||
|
jobName: filterPresets.jobName || '',
|
||||||
|
|
||||||
numNodes: filterPresets.numNodes || { from: null, to: null },
|
numNodes: filterPresets.numNodes || { from: null, to: null },
|
||||||
numHWThreads: filterPresets.numHWThreads || { from: null, to: null },
|
numHWThreads: filterPresets.numHWThreads || { from: null, to: null },
|
||||||
@ -94,6 +95,8 @@
|
|||||||
items.push({ user: { [filters.userMatch]: filters.user } })
|
items.push({ user: { [filters.userMatch]: filters.user } })
|
||||||
if (filters.project)
|
if (filters.project)
|
||||||
items.push({ project: { [filters.projectMatch]: filters.project } })
|
items.push({ project: { [filters.projectMatch]: filters.project } })
|
||||||
|
if (filters.jobName)
|
||||||
|
items.push({ jobName: { contains: filters.jobName } })
|
||||||
for (let stat of filters.stats)
|
for (let stat of filters.stats)
|
||||||
items.push({ [stat.field]: { from: stat.from, to: stat.to } })
|
items.push({ [stat.field]: { from: stat.from, to: stat.to } })
|
||||||
|
|
||||||
@ -115,7 +118,7 @@
|
|||||||
opts.push(`state=${state}`)
|
opts.push(`state=${state}`)
|
||||||
if (filters.startTime.from && filters.startTime.to)
|
if (filters.startTime.from && filters.startTime.to)
|
||||||
opts.push(`startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`)
|
opts.push(`startTime=${dateToUnixEpoch(filters.startTime.from)}-${dateToUnixEpoch(filters.startTime.to)}`)
|
||||||
for (let tag of filters.tags)
|
for (let tag of filters.tags)
|
||||||
opts.push(`tag=${tag}`)
|
opts.push(`tag=${tag}`)
|
||||||
if (filters.duration.from && filters.duration.to)
|
if (filters.duration.from && filters.duration.to)
|
||||||
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`)
|
opts.push(`duration=${filters.duration.from}-${filters.duration.to}`)
|
||||||
@ -123,12 +126,19 @@
|
|||||||
opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`)
|
opts.push(`numNodes=${filters.numNodes.from}-${filters.numNodes.to}`)
|
||||||
if (filters.numAccelerators.from && filters.numAccelerators.to)
|
if (filters.numAccelerators.from && filters.numAccelerators.to)
|
||||||
opts.push(`numAccelerators=${filters.numAccelerators.from}-${filters.numAccelerators.to}`)
|
opts.push(`numAccelerators=${filters.numAccelerators.from}-${filters.numAccelerators.to}`)
|
||||||
if (filters.user)
|
if (filters.user.length != 0)
|
||||||
opts.push(`user=${filters.user}`)
|
if (filters.userMatch != 'in') {
|
||||||
|
opts.push(`user=${filters.user}`)
|
||||||
|
} else {
|
||||||
|
for (let singleUser of filters.user)
|
||||||
|
opts.push(`user=${singleUser}`)
|
||||||
|
}
|
||||||
if (filters.userMatch != 'contains')
|
if (filters.userMatch != 'contains')
|
||||||
opts.push(`userMatch=${filters.userMatch}`)
|
opts.push(`userMatch=${filters.userMatch}`)
|
||||||
if (filters.project)
|
if (filters.project)
|
||||||
opts.push(`project=${filters.project}`)
|
opts.push(`project=${filters.project}`)
|
||||||
|
if (filters.jobName)
|
||||||
|
opts.push(`jobName=${filters.jobName}`)
|
||||||
if (filters.projectMatch != 'contains')
|
if (filters.projectMatch != 'contains')
|
||||||
opts.push(`projectMatch=${filters.projectMatch}`)
|
opts.push(`projectMatch=${filters.projectMatch}`)
|
||||||
|
|
||||||
@ -214,7 +224,7 @@
|
|||||||
on:change={({ detail: { from, to } }) => {
|
on:change={({ detail: { from, to } }) => {
|
||||||
filters.startTime.from = from?.toISOString()
|
filters.startTime.from = from?.toISOString()
|
||||||
filters.startTime.to = to?.toISOString()
|
filters.startTime.to = to?.toISOString()
|
||||||
console.log(filters.startTime)
|
// console.log(filters.startTime)
|
||||||
update()
|
update()
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
<script>
|
<script>
|
||||||
import { createEventDispatcher, getContext } from 'svelte'
|
import { createEventDispatcher, getContext } from 'svelte'
|
||||||
import { Button, Modal, ModalBody, ModalHeader, ModalFooter } from 'sveltestrap'
|
import { Button, Modal, ModalBody, ModalHeader, ModalFooter } from 'sveltestrap'
|
||||||
import Header from '../Header.svelte';
|
import Header from '../Header.svelte';
|
||||||
import DoubleRangeSlider from './DoubleRangeSlider.svelte'
|
import DoubleRangeSlider from './DoubleRangeSlider.svelte'
|
||||||
|
|
||||||
const clusters = getContext('clusters'),
|
const clusters = getContext('clusters'),
|
||||||
@ -23,7 +23,7 @@ import Header from '../Header.svelte';
|
|||||||
const findMaxNumAccels = clusters => clusters.reduce((max, cluster) => Math.max(max,
|
const findMaxNumAccels = clusters => clusters.reduce((max, cluster) => Math.max(max,
|
||||||
cluster.subClusters.reduce((max, sc) => Math.max(max, sc.topology.accelerators?.length || 0), 0)), 0)
|
cluster.subClusters.reduce((max, sc) => Math.max(max, sc.topology.accelerators?.length || 0), 0)), 0)
|
||||||
|
|
||||||
console.log(header)
|
// console.log(header)
|
||||||
let minNumNodes = 1, maxNumNodes = 0, minNumHWThreads = 1, maxNumHWThreads = 0, minNumAccelerators = 0, maxNumAccelerators = 0
|
let minNumNodes = 1, maxNumNodes = 0, minNumHWThreads = 1, maxNumHWThreads = 0, minNumAccelerators = 0, maxNumAccelerators = 0
|
||||||
$: {
|
$: {
|
||||||
if ($initialized) {
|
if ($initialized) {
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
|
|
||||||
export let user = ''
|
export let user = ''
|
||||||
export let project = ''
|
export let project = ''
|
||||||
|
export let authlevel
|
||||||
|
export let roles
|
||||||
let mode = 'user', term = ''
|
let mode = 'user', term = ''
|
||||||
const throttle = 500
|
const throttle = 500
|
||||||
|
|
||||||
@ -22,30 +24,53 @@
|
|||||||
|
|
||||||
let timeoutId = null
|
let timeoutId = null
|
||||||
function termChanged(sleep = throttle) {
|
function termChanged(sleep = throttle) {
|
||||||
if (mode == 'user')
|
if (authlevel == roles.user) {
|
||||||
user = term
|
|
||||||
else
|
|
||||||
project = term
|
project = term
|
||||||
|
|
||||||
if (timeoutId != null)
|
if (timeoutId != null)
|
||||||
clearTimeout(timeoutId)
|
clearTimeout(timeoutId)
|
||||||
|
|
||||||
timeoutId = setTimeout(() => {
|
timeoutId = setTimeout(() => {
|
||||||
dispatch('update', {
|
dispatch('update', {
|
||||||
user,
|
project
|
||||||
project
|
})
|
||||||
})
|
}, sleep)
|
||||||
}, sleep)
|
} else if (authlevel >= roles.manager) {
|
||||||
|
if (mode == 'user')
|
||||||
|
user = term
|
||||||
|
else
|
||||||
|
project = term
|
||||||
|
|
||||||
|
if (timeoutId != null)
|
||||||
|
clearTimeout(timeoutId)
|
||||||
|
|
||||||
|
timeoutId = setTimeout(() => {
|
||||||
|
dispatch('update', {
|
||||||
|
user,
|
||||||
|
project
|
||||||
|
})
|
||||||
|
}, sleep)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<InputGroup>
|
{#if authlevel == roles.user}
|
||||||
<select style="max-width: 175px;" class="form-select"
|
<InputGroup>
|
||||||
bind:value={mode} on:change={modeChanged}>
|
<Input
|
||||||
<option value={'user'}>Search User</option>
|
type="text" bind:value={term} on:change={() => termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)} placeholder='filter project...'
|
||||||
<option value={'project'}>Search Project</option>
|
/>
|
||||||
</select>
|
</InputGroup>
|
||||||
<Input
|
{:else if authlevel >= roles.manager}
|
||||||
type="text" bind:value={term} on:change={() => termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)}
|
<InputGroup>
|
||||||
placeholder={mode == 'user' ? 'filter username...' : 'filter project...'} />
|
<select style="max-width: 175px;" class="form-select"
|
||||||
</InputGroup>
|
bind:value={mode} on:change={modeChanged}>
|
||||||
|
<option value={'user'}>Search User</option>
|
||||||
|
<option value={'project'}>Search Project</option>
|
||||||
|
</select>
|
||||||
|
<Input
|
||||||
|
type="text" bind:value={term} on:change={() => termChanged()} on:keyup={(event) => termChanged(event.key == 'Enter' ? 0 : throttle)}
|
||||||
|
placeholder={mode == 'user' ? 'filter username...' : 'filter project...'} />
|
||||||
|
</InputGroup>
|
||||||
|
{:else}
|
||||||
|
Unauthorized
|
||||||
|
{/if}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<!--
|
<!--
|
||||||
@component
|
@component
|
||||||
|
|
||||||
Properties:
|
Properties:
|
||||||
@ -31,7 +31,11 @@
|
|||||||
<span class="fw-bold"><a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a> ({job.cluster})</span>
|
<span class="fw-bold"><a href="/monitoring/job/{job.id}" target="_blank">{job.jobId}</a> ({job.cluster})</span>
|
||||||
{#if job.metaData?.jobName}
|
{#if job.metaData?.jobName}
|
||||||
<br/>
|
<br/>
|
||||||
{job.metaData.jobName}
|
{#if job.metaData?.jobName.length <= 25}
|
||||||
|
<div>{job.metaData.jobName}</div>
|
||||||
|
{:else}
|
||||||
|
<div class="truncate" style="cursor:help; width:230px;" title={job.metaData.jobName}>{job.metaData.jobName}</div>
|
||||||
|
{/if}
|
||||||
{/if}
|
{/if}
|
||||||
{#if job.arrayJobId}
|
{#if job.arrayJobId}
|
||||||
Array Job: <a href="/monitoring/jobs/?arrayJobId={job.arrayJobId}&cluster={job.cluster}" target="_blank">#{job.arrayJobId}</a>
|
Array Job: <a href="/monitoring/jobs/?arrayJobId={job.arrayJobId}&cluster={job.cluster}" target="_blank">#{job.arrayJobId}</a>
|
||||||
@ -48,12 +52,20 @@
|
|||||||
{/if}
|
{/if}
|
||||||
{#if job.project && job.project != 'no project'}
|
{#if job.project && job.project != 'no project'}
|
||||||
<br/>
|
<br/>
|
||||||
<Icon name="people-fill"/> {job.project}
|
<Icon name="people-fill"/>
|
||||||
|
<a class="fst-italic" href="/monitoring/jobs/?project={job.project}&projectMatch=eq" target="_blank">
|
||||||
|
{scrambleNames ? scramble(job.project) : job.project}
|
||||||
|
</a>
|
||||||
{/if}
|
{/if}
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
{job.numNodes} <Icon name="pc-horizontal"/>
|
{#if job.numNodes == 1}
|
||||||
|
{job.resources[0].hostname}
|
||||||
|
{:else}
|
||||||
|
{job.numNodes}
|
||||||
|
{/if}
|
||||||
|
<Icon name="pc-horizontal"/>
|
||||||
{#if job.exclusive != 1}
|
{#if job.exclusive != 1}
|
||||||
(shared)
|
(shared)
|
||||||
{/if}
|
{/if}
|
||||||
@ -63,6 +75,8 @@
|
|||||||
{#if job.numHWThreads > 0}
|
{#if job.numHWThreads > 0}
|
||||||
, {job.numHWThreads} <Icon name="cpu"/>
|
, {job.numHWThreads} <Icon name="cpu"/>
|
||||||
{/if}
|
{/if}
|
||||||
|
<br/>
|
||||||
|
{job.subCluster}
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
@ -86,3 +100,11 @@
|
|||||||
{/each}
|
{/each}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.truncate {
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user