Merge branch 'master' into 134-job-tagging

This commit is contained in:
Jan Eitzinger 2023-09-27 05:30:36 +02:00
commit 0e6c6937cd
52 changed files with 4139 additions and 1371 deletions

View File

@ -101,5 +101,6 @@ release:
draft: false
footer: |
Supports job archive version 1 and database version 6.
Please check out the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for further details on breaking changes.
# vim: set ts=2 sw=2 tw=0 fo=cnqoj

View File

@ -2,7 +2,7 @@ TARGET = ./cc-backend
VAR = ./var
CFG = config.json .env
FRONTEND = ./web/frontend
VERSION = 1.1.0
VERSION = 1.2.2
GIT_HASH := $(shell git rev-parse --short HEAD || echo 'development')
CURRENT_TIME = $(shell date +"%Y-%m-%d:T%H:%M:%S")
LD_FLAGS = '-s -X main.date=${CURRENT_TIME} -X main.version=${VERSION} -X main.commit=${GIT_HASH}'

View File

@ -1,3 +1,6 @@
# NOTE
Please have a look at the [Release Notes](https://github.com/ClusterCockpit/cc-backend/blob/master/ReleaseNotes.md) for breaking changes!
# ClusterCockpit REST and GraphQL API backend
[![Build](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml/badge.svg)](https://github.com/ClusterCockpit/cc-backend/actions/workflows/test.yml)
@ -165,7 +168,7 @@ If you start `cc-backend` with the `-dev` flag, the GraphQL Playground UI is ava
This project integrates [swagger ui] (https://swagger.io/tools/swagger-ui/) to document and test its REST API.
The swagger documentation files can be found in `./api/`.
You can generate the swagger-ui configuration by running `go run github.com/swaggo/swag/cmd/swag init -d ./internal/api,./pkg/schema -g rest.go -o ./api `.
You need to move the created `./api/doc.go` to `./internal/api/doc.go`.
You need to move the created `./api/docs.go` to `./internal/api/docs.go`.
If you start cc-backend with the `-dev` flag, the Swagger interface is available
at http://localhost:8080/swagger/.
You must enter a JWT key for a user with the API role.

View File

@ -1,4 +1,4 @@
# `cc-backend` version 1.2.0
# `cc-backend` version 1.2.2
Supports job archive version 1 and database version 6.
@ -7,12 +7,12 @@ implementation of ClusterCockpit.
** Breaking changes **
The LDAP configuration option user_filter was changed and now should not include
the wildcard. Example:
* Old: `"user_filter": "(&(objectclass=posixAccount)(uid=*))"`
* New: `"user_filter": "&(objectclass=posixAccount)"`
* The LDAP configuration option `user_filter` was changed and now should not include
the uid wildcard. Example:
- Old: `"user_filter": "(&(objectclass=posixAccount)(uid=*))"`
- New: `"user_filter": "(&(objectclass=posixAccount))"`
The aggregate job statistic core hours is now computed using the job table
* The aggregate job statistic core hours is now computed using the job table
column `num_hwthreads`. In a future release this column will be renamed to
`num_cores`. For correct display of core hours `num_hwthreads` must be correctly
filled on job start. If your existing jobs do not provide the correct value in
@ -21,6 +21,14 @@ if you have exclusive jobs, only. Please be aware that we treat this column as
it is the number of cores. In case you have SMT enabled and `num_hwthreads`
is not the number of cores the core hours will be too high by a factor!
* The jwts key is now mandatory in config.json. It has to set max-age for
validity. Some key names have changed, please refer to
[config documentation](./configs/README.md) for details.
* The following API endpoints are only accessible from IPs registered using the apiAllowedIPs configuration option:
- `/users/` [GET, POST, DELETE]
- `/user/{id}` [POST]
** NOTE **
If you are using the sqlite3 backend the `PRAGMA` option `foreign_keys` must be
explicitly set to ON. If using the sqlite3 console it is per default set to

View File

@ -156,12 +156,18 @@ type MetricFootprints {
}
type Footprints {
nodehours: [NullableFloat!]!
timeWeights: TimeWeights!
metrics: [MetricFootprints!]!
}
type TimeWeights {
nodeHours: [NullableFloat!]!
accHours: [NullableFloat!]!
coreHours: [NullableFloat!]!
}
enum Aggregate { USER, PROJECT, CLUSTER }
enum Weights { NODE_COUNT, NODE_HOURS }
enum SortByAggregate { TOTALWALLTIME, TOTALJOBS, TOTALNODES, TOTALNODEHOURS, TOTALCORES, TOTALCOREHOURS, TOTALACCS, TOTALACCHOURS }
type NodeMetrics {
host: String!
@ -192,8 +198,7 @@ type Query {
jobsFootprints(filter: [JobFilter!], metrics: [String!]!): Footprints
jobs(filter: [JobFilter!], page: PageRequest, order: OrderByInput): JobResultList!
jobsStatistics(filter: [JobFilter!], groupBy: Aggregate): [JobsStatistics!]!
jobsCount(filter: [JobFilter]!, groupBy: Aggregate!, weight: Weights, limit: Int): [Count!]!
jobsStatistics(filter: [JobFilter!], page: PageRequest, sortBy: SortByAggregate, groupBy: Aggregate): [JobsStatistics!]!
rooflineHeatmap(filter: [JobFilter!]!, rows: Int!, cols: Int!, minX: Float!, minY: Float!, maxX: Float!, maxY: Float!): [[Float!]!]!
@ -288,11 +293,16 @@ type JobsStatistics {
runningJobs: Int! # Number of running jobs
shortJobs: Int! # Number of jobs with a duration of less than duration
totalWalltime: Int! # Sum of the duration of all matched jobs in hours
totalNodes: Int! # Sum of the nodes of all matched jobs
totalNodeHours: Int! # Sum of the node hours of all matched jobs
totalCores: Int! # Sum of the cores of all matched jobs
totalCoreHours: Int! # Sum of the core hours of all matched jobs
totalAccs: Int! # Sum of the accs of all matched jobs
totalAccHours: Int! # Sum of the gpu hours of all matched jobs
histDuration: [HistoPoint!]! # value: hour, count: number of jobs with a rounded duration of value
histNumNodes: [HistoPoint!]! # value: number of nodes, count: number of jobs with that number of nodes
histNumCores: [HistoPoint!]! # value: number of cores, count: number of jobs with that number of cores
histNumAccs: [HistoPoint!]! # value: number of accs, count: number of jobs with that number of accs
}
input PageRequest {

View File

@ -12,7 +12,7 @@
"name": "MIT License",
"url": "https://opensource.org/licenses/MIT"
},
"version": "1"
"version": "1.0.0"
},
"host": "localhost:8080",
"basePath": "/api",
@ -29,7 +29,7 @@
"application/json"
],
"tags": [
"query"
"Job query"
],
"summary": "Lists all jobs",
"parameters": [
@ -127,7 +127,7 @@
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -199,7 +199,7 @@
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -269,7 +269,7 @@
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -342,7 +342,7 @@
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Adds a new job as \"running\"",
"parameters": [
@ -408,7 +408,7 @@
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Marks job as completed and triggers archiving",
"parameters": [
@ -483,7 +483,7 @@
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Marks job as completed and triggers archiving",
"parameters": [
@ -565,7 +565,7 @@
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Adds one or more tags to a job",
"parameters": [
@ -638,7 +638,7 @@
"application/json"
],
"tags": [
"query"
"Job query"
],
"summary": "Get complete job meta and metric data",
"parameters": [
@ -707,9 +707,367 @@
}
}
}
},
"/user/{id}": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates an existing user",
"parameters": [
{
"type": "string",
"description": "Database ID of User",
"name": "id",
"in": "path",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 1: Role to add",
"name": "add-role",
"in": "formData"
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 2: Role to remove",
"name": "remove-role",
"in": "formData"
},
{
"type": "string",
"description": "Priority 3: Project to add",
"name": "add-project",
"in": "formData"
},
{
"type": "string",
"description": "Priority 4: Project to remove",
"name": "remove-project",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/users/": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"produces": [
"application/json"
],
"tags": [
"User"
],
"summary": "Returns a list of users",
"parameters": [
{
"type": "boolean",
"description": "If returned list should contain all users or only users with additional special roles",
"name": "not-just-user",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "List of users returned successfully",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/api.ApiReturnedUser"
}
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
},
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Adds a new user",
"parameters": [
{
"type": "string",
"description": "Unique user ID",
"name": "username",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "User password",
"name": "password",
"in": "formData",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "User role",
"name": "role",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "Managed project, required for new manager role user",
"name": "project",
"in": "formData"
},
{
"type": "string",
"description": "Users name",
"name": "name",
"in": "formData"
},
{
"type": "string",
"description": "Users email",
"name": "email",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: creating user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
},
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Deletes a user",
"parameters": [
{
"type": "string",
"description": "User ID to delete",
"name": "username",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "User deleted successfully"
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: deleting user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
"api.ApiReturnedUser": {
"type": "object",
"properties": {
"email": {
"type": "string"
},
"name": {
"type": "string"
},
"projects": {
"type": "array",
"items": {
"type": "string"
}
},
"roles": {
"type": "array",
"items": {
"type": "string"
}
},
"username": {
"type": "string"
}
}
},
"api.ApiTag": {
"type": "object",
"properties": {
@ -1366,7 +1724,7 @@
"type": "object",
"properties": {
"id": {
"description": "The unique DB identifier of a tag\nThe unique DB identifier of a tag",
"description": "The unique DB identifier of a tag",
"type": "integer"
},
"name": {
@ -1399,10 +1757,5 @@
"name": "X-Auth-Token",
"in": "header"
}
},
"tags": [
{
"name": "Job API"
}
]
}
}

View File

@ -1,5 +1,22 @@
basePath: /api
definitions:
api.ApiReturnedUser:
properties:
email:
type: string
name:
type: string
projects:
items:
type: string
type: array
roles:
items:
type: string
type: array
username:
type: string
type: object
api.ApiTag:
properties:
name:
@ -495,9 +512,7 @@ definitions:
description: Defines a tag using name and type.
properties:
id:
description: |-
The unique DB identifier of a tag
The unique DB identifier of a tag
description: The unique DB identifier of a tag
type: integer
name:
description: Tag Name
@ -526,7 +541,7 @@ info:
name: MIT License
url: https://opensource.org/licenses/MIT
title: ClusterCockpit REST API
version: "1"
version: 1.0.0
paths:
/jobs/:
get:
@ -592,7 +607,7 @@ paths:
- ApiKeyAuth: []
summary: Lists all jobs
tags:
- query
- Job query
/jobs/{id}:
post:
consumes:
@ -650,7 +665,7 @@ paths:
- ApiKeyAuth: []
summary: Get complete job meta and metric data
tags:
- query
- Job query
/jobs/delete_job/:
delete:
consumes:
@ -700,7 +715,7 @@ paths:
- ApiKeyAuth: []
summary: Remove a job from the sql database
tags:
- remove
- Job remove
/jobs/delete_job/{id}:
delete:
description: Job to remove is specified by database ID. This will not remove
@ -747,7 +762,7 @@ paths:
- ApiKeyAuth: []
summary: Remove a job from the sql database
tags:
- remove
- Job remove
/jobs/delete_job_before/{ts}:
delete:
description: Remove all jobs with start time before timestamp. The jobs will
@ -794,7 +809,7 @@ paths:
- ApiKeyAuth: []
summary: Remove a job from the sql database
tags:
- remove
- Job remove
/jobs/start_job/:
post:
consumes:
@ -841,7 +856,7 @@ paths:
- ApiKeyAuth: []
summary: Adds a new job as "running"
tags:
- add and modify
- Job add and modify
/jobs/stop_job/:
post:
description: |-
@ -890,7 +905,7 @@ paths:
- ApiKeyAuth: []
summary: Marks job as completed and triggers archiving
tags:
- add and modify
- Job add and modify
/jobs/stop_job/{id}:
post:
consumes:
@ -946,7 +961,7 @@ paths:
- ApiKeyAuth: []
summary: Marks job as completed and triggers archiving
tags:
- add and modify
- Job add and modify
/jobs/tag_job/{id}:
post:
consumes:
@ -995,12 +1010,241 @@ paths:
- ApiKeyAuth: []
summary: Adds one or more tags to a job
tags:
- add and modify
- Job add and modify
/user/{id}:
post:
consumes:
- multipart/form-data
description: |-
Modifies user defined by username (id) in one of four possible ways.
If more than one formValue is set then only the highest priority field is used.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: Database ID of User
in: path
name: id
required: true
type: string
- description: 'Priority 1: Role to add'
enum:
- admin
- support
- manager
- user
- api
in: formData
name: add-role
type: string
- description: 'Priority 2: Role to remove'
enum:
- admin
- support
- manager
- user
- api
in: formData
name: remove-role
type: string
- description: 'Priority 3: Project to add'
in: formData
name: add-project
type: string
- description: 'Priority 4: Project to remove'
in: formData
name: remove-project
type: string
produces:
- text/plain
responses:
"200":
description: Success Response Message
schema:
type: string
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: The user could not be updated'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Updates an existing user
tags:
- User
/users/:
delete:
consumes:
- multipart/form-data
description: |-
User defined by username in form data will be deleted from database.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: User ID to delete
in: formData
name: username
required: true
type: string
produces:
- text/plain
responses:
"200":
description: User deleted successfully
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: deleting user failed'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Deletes a user
tags:
- User
get:
description: |-
Returns a JSON-encoded list of users.
Required query-parameter defines if all users or only users with additional special roles are returned.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: If returned list should contain all users or only users with
additional special roles
in: query
name: not-just-user
required: true
type: boolean
produces:
- application/json
responses:
"200":
description: List of users returned successfully
schema:
items:
$ref: '#/definitions/api.ApiReturnedUser'
type: array
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Returns a list of users
tags:
- User
post:
consumes:
- multipart/form-data
description: |-
User specified in form data will be saved to database.
Only accessible from IPs registered with apiAllowedIPs configuration option.
parameters:
- description: Unique user ID
in: formData
name: username
required: true
type: string
- description: User password
in: formData
name: password
required: true
type: string
- description: User role
enum:
- admin
- support
- manager
- user
- api
in: formData
name: role
required: true
type: string
- description: Managed project, required for new manager role user
in: formData
name: project
type: string
- description: Users name
in: formData
name: name
type: string
- description: Users email
in: formData
name: email
type: string
produces:
- text/plain
responses:
"200":
description: Success Response
schema:
type: string
"400":
description: Bad Request
schema:
type: string
"401":
description: Unauthorized
schema:
type: string
"403":
description: Forbidden
schema:
type: string
"422":
description: 'Unprocessable Entity: creating user failed'
schema:
type: string
"500":
description: Internal Server Error
schema:
type: string
security:
- ApiKeyAuth: []
summary: Adds a new user
tags:
- User
securityDefinitions:
ApiKeyAuth:
in: header
name: X-Auth-Token
type: apiKey
swagger: "2.0"
tags:
- name: Job API

View File

@ -9,6 +9,7 @@ It is supported to set these by means of a `.env` file in the project root.
## Configuration Options
* `addr`: Type string. Address where the http (or https) server will listen on (for example: 'localhost:80'). Default `:8080`.
* `apiAllowedIPs`: Type string array. Addresses from which the secured API endpoints (/users and other auth related endpoints) can be reached
* `user`: Type string. Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.
* `group`: Type string. Drop root permissions once .env was read and the port was taken. Only applicable if using privileged port.
* `disable-authentication`: Type bool. Disable authentication (for everything: API, Web-UI, ...). Default `false`.
@ -16,25 +17,41 @@ It is supported to set these by means of a `.env` file in the project root.
* `static-files`: Type string. Folder where static assets can be found, if `embed-static-files` is `false`. No default.
* `db-driver`: Type string. 'sqlite3' or 'mysql' (mysql will work for mariadb as well). Default `sqlite3`.
* `db`: Type string. For sqlite3 a filename, for mysql a DSN in this format: https://github.com/go-sql-driver/mysql#dsn-data-source-name (Without query parameters!). Default: `./var/job.db`.
* `job-archive`: Type string. Path to the job-archive. Default: `./var/job-archive`.
* `job-archive`: Type object.
- `kind`: Type string. At them moment only file is supported as value.
- `path`: Type string. Path to the job-archive. Default: `./var/job-archive`.
- `compression`: Type integer. Setup automatic compression for jobs older than number of days.
- `retention`: Type object.
- `policy`: Type string (required). Retention policy. Possible values none, delete,
move.
- `includeDB`: Type boolean. Also remove jobs from database.
- `age`: Type integer. Act on jobs with startTime older than age (in days).
- `location`: Type string. The target directory for retention. Only applicable for retention policy move.
* `disable-archive`: Type bool. Keep all metric data in the metric data repositories, do not write to the job-archive. Default `false`.
* `validate`: Type bool. Validate all input json documents against json schema.
* `session-max-age`: Type string. Specifies for how long a session shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `168h`.
* `jwt-max-age`: Type string. Specifies for how long a JWT token shall be valid as a string parsable by time.ParseDuration(). If 0 or empty, the session/token does not expire! Default `0`.
* `https-cert-file` and `https-key-file`: Type string. If both those options are not empty, use HTTPS using those certificates.
* `redirect-http-to`: Type string. If not the empty string and `addr` does not end in ":80", redirect every request incoming at port 80 to that url.
* `machine-state-dir`: Type string. Where to store MachineState files. TODO: Explain in more detail!
* `stop-jobs-exceeding-walltime`: Type int. If not zero, automatically mark jobs as stopped running X seconds longer than their walltime. Only applies if walltime is set for job. Default `0`.
* `short-running-jobs-duration`: Type int. Do not show running jobs shorter than X seconds. Default `300`.
* `jwts`: Type object (required). For JWT Authentication.
- `max-age`: Type string (required). Configure how long a token is valid. As string parsable by time.ParseDuration().
- `cookieName`: Type string. Cookie that should be checked for a JWT token.
- `vaidateUser`: Type boolean. Deny login for users not in database (but defined in JWT). Overwrite roles in JWT with database roles.
- `trustedIssuer`: Type string. Issuer that should be accepted when validating external JWTs.
- `syncUserOnLogin`: Type boolean. Add non-existent user to DB at login attempt with values provided in JWT.
* `ldap`: Type object. For LDAP Authentication and user synchronisation. Default `nil`.
- `url`: Type string. URL of LDAP directory server.
- `user_base`: Type string. Base DN of user tree root.
- `search_dn`: Type string. DN for authenticating LDAP admin account with general read rights.
- `user_bind`: Type string. Expression used to authenticate users via LDAP bind. Must contain `uid={username}`.
- `user_filter`: Type string. Filter to extract users for syncing.
- `url`: Type string (required). URL of LDAP directory server.
- `user_base`: Type string (required). Base DN of user tree root.
- `search_dn`: Type string (required). DN for authenticating LDAP admin account with general read rights.
- `user_bind`: Type string (required). Expression used to authenticate users via LDAP bind. Must contain `uid={username}`.
- `user_filter`: Type string (required). Filter to extract users for syncing.
- `username_attr`: Type string. Attribute with full user name. Defaults to `gecos` if not provided.
- `sync_interval`: Type string. Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.
- `sync_del_old_users`: Type bool. Delete obsolete users in database.
* `clusters`: Type array of objects
- `sync_del_old_users`: Type boolean. Delete obsolete users in database.
- `syncUserOnLogin`: Type boolean. Add non-existent user to DB at login attempt if user exists in Ldap directory.
* `clusters`: Type array of objects (required)
- `name`: Type string. The name of the cluster.
- `metricDataRepository`: Type object with properties: `kind` (Type string, can be one of `cc-metric-store`, `influxdb` ), `url` (Type string), `token` (Type string)
- `filterRanges` Type object. This option controls the slider ranges for the UI controls of numNodes, duration, and startTime. Example:

View File

@ -5,7 +5,7 @@
"path": "./var/job-archive"
},
"jwts": {
"max-age": "2m"
"max-age": "2000h"
},
"clusters": [
{

View File

@ -5,7 +5,7 @@
"user_base": "ou=people,ou=hpc,dc=test,dc=de",
"search_dn": "cn=hpcmonitoring,ou=roadm,ou=profile,ou=hpc,dc=test,dc=de",
"user_bind": "uid={username},ou=people,ou=hpc,dc=test,dc=de",
"user_filter": "(&(objectclass=posixAccount)(uid=*))"
"user_filter": "(&(objectclass=posixAccount))"
},
"https-cert-file": "/etc/letsencrypt/live/url/fullchain.pem",
"https-key-file": "/etc/letsencrypt/live/url/privkey.pem",
@ -43,7 +43,7 @@
"jwts": {
"cookieName": "",
"validateUser": false,
"max-age": "2m",
"max-age": "2000h",
"trustedIssuer": ""
},
"short-running-jobs-duration": 300

View File

@ -1,4 +1,5 @@
// Code generated by swaggo/swag. DO NOT EDIT
// Code generated by swaggo/swag. DO NOT EDIT.
package api
import "github.com/swaggo/swag"
@ -35,7 +36,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"query"
"Job query"
],
"summary": "Lists all jobs",
"parameters": [
@ -133,7 +134,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -205,7 +206,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -275,7 +276,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"remove"
"Job remove"
],
"summary": "Remove a job from the sql database",
"parameters": [
@ -348,7 +349,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Adds a new job as \"running\"",
"parameters": [
@ -414,7 +415,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Marks job as completed and triggers archiving",
"parameters": [
@ -489,7 +490,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Marks job as completed and triggers archiving",
"parameters": [
@ -571,7 +572,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"add and modify"
"Job add and modify"
],
"summary": "Adds one or more tags to a job",
"parameters": [
@ -644,7 +645,7 @@ const docTemplate = `{
"application/json"
],
"tags": [
"query"
"Job query"
],
"summary": "Get complete job meta and metric data",
"parameters": [
@ -713,9 +714,367 @@ const docTemplate = `{
}
}
}
},
"/user/{id}": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Modifies user defined by username (id) in one of four possible ways.\nIf more than one formValue is set then only the highest priority field is used.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Updates an existing user",
"parameters": [
{
"type": "string",
"description": "Database ID of User",
"name": "id",
"in": "path",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 1: Role to add",
"name": "add-role",
"in": "formData"
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "Priority 2: Role to remove",
"name": "remove-role",
"in": "formData"
},
{
"type": "string",
"description": "Priority 3: Project to add",
"name": "add-project",
"in": "formData"
},
{
"type": "string",
"description": "Priority 4: Project to remove",
"name": "remove-project",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response Message",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: The user could not be updated",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
},
"/users/": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Returns a JSON-encoded list of users.\nRequired query-parameter defines if all users or only users with additional special roles are returned.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"produces": [
"application/json"
],
"tags": [
"User"
],
"summary": "Returns a list of users",
"parameters": [
{
"type": "boolean",
"description": "If returned list should contain all users or only users with additional special roles",
"name": "not-just-user",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "List of users returned successfully",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/api.ApiReturnedUser"
}
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
},
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User specified in form data will be saved to database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Adds a new user",
"parameters": [
{
"type": "string",
"description": "Unique user ID",
"name": "username",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "User password",
"name": "password",
"in": "formData",
"required": true
},
{
"enum": [
"admin",
"support",
"manager",
"user",
"api"
],
"type": "string",
"description": "User role",
"name": "role",
"in": "formData",
"required": true
},
{
"type": "string",
"description": "Managed project, required for new manager role user",
"name": "project",
"in": "formData"
},
{
"type": "string",
"description": "Users name",
"name": "name",
"in": "formData"
},
{
"type": "string",
"description": "Users email",
"name": "email",
"in": "formData"
}
],
"responses": {
"200": {
"description": "Success Response",
"schema": {
"type": "string"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: creating user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
},
"delete": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "User defined by username in form data will be deleted from database.\nOnly accessible from IPs registered with apiAllowedIPs configuration option.",
"consumes": [
"multipart/form-data"
],
"produces": [
"text/plain"
],
"tags": [
"User"
],
"summary": "Deletes a user",
"parameters": [
{
"type": "string",
"description": "User ID to delete",
"name": "username",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "User deleted successfully"
},
"400": {
"description": "Bad Request",
"schema": {
"type": "string"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
},
"403": {
"description": "Forbidden",
"schema": {
"type": "string"
}
},
"422": {
"description": "Unprocessable Entity: deleting user failed",
"schema": {
"type": "string"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
"api.ApiReturnedUser": {
"type": "object",
"properties": {
"email": {
"type": "string"
},
"name": {
"type": "string"
},
"projects": {
"type": "array",
"items": {
"type": "string"
}
},
"roles": {
"type": "array",
"items": {
"type": "string"
}
},
"username": {
"type": "string"
}
}
},
"api.ApiTag": {
"type": "object",
"properties": {
@ -1372,7 +1731,7 @@ const docTemplate = `{
"type": "object",
"properties": {
"id": {
"description": "The unique DB identifier of a tag\nThe unique DB identifier of a tag",
"description": "The unique DB identifier of a tag",
"type": "integer"
},
"name": {
@ -1405,17 +1764,12 @@ const docTemplate = `{
"name": "X-Auth-Token",
"in": "header"
}
},
"tags": [
{
"name": "Job API"
}
]
}
}`
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = &swag.Spec{
Version: "1",
Version: "1.0.0",
Host: "localhost:8080",
BasePath: "/api",
Schemes: []string{},
@ -1423,6 +1777,8 @@ var SwaggerInfo = &swag.Spec{
Description: "API for batch job control.",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
LeftDelim: "{{",
RightDelim: "}}",
}
func init() {

View File

@ -37,8 +37,6 @@ import (
// @version 1.0.0
// @description API for batch job control.
// @tag.name Job API
// @contact.name ClusterCockpit Project
// @contact.url https://github.com/ClusterCockpit
// @contact.email support@clustercockpit.org
@ -77,8 +75,6 @@ func (api *RestApi) MountRoutes(r *mux.Router) {
r.HandleFunc("/jobs/delete_job/", api.deleteJobByRequest).Methods(http.MethodDelete)
r.HandleFunc("/jobs/delete_job/{id}", api.deleteJobById).Methods(http.MethodDelete)
r.HandleFunc("/jobs/delete_job_before/{ts}", api.deleteJobBefore).Methods(http.MethodDelete)
// r.HandleFunc("/secured/addProject/{id}/{project}", api.secureUpdateUser).Methods(http.MethodPost)
// r.HandleFunc("/secured/addRole/{id}/{role}", api.secureUpdateUser).Methods(http.MethodPost)
if api.MachineStateDir != "" {
r.HandleFunc("/machine_state/{cluster}/{host}", api.getMachineState).Methods(http.MethodGet)
@ -165,6 +161,14 @@ type JobMetricWithName struct {
Metric *schema.JobMetric `json:"metric"`
}
type ApiReturnedUser struct {
Username string `json:"username"`
Name string `json:"name"`
Roles []string `json:"roles"`
Email string `json:"email"`
Projects []string `json:"projects"`
}
func handleError(err error, statusCode int, rw http.ResponseWriter) {
log.Warnf("REST ERROR : %s", err.Error())
rw.Header().Add("Content-Type", "application/json")
@ -193,6 +197,10 @@ func securedCheck(r *http.Request) error {
return fmt.Errorf("missing configuration key ApiAllowedIPs")
}
if config.Keys.ApiAllowedIPs[0] == "*" {
return nil
}
// extract IP address
IPAddress := r.Header.Get("X-Real-Ip")
if IPAddress == "" {
@ -202,6 +210,10 @@ func securedCheck(r *http.Request) error {
IPAddress = r.RemoteAddr
}
if strings.Contains(IPAddress, ":") {
IPAddress = strings.Split(IPAddress, ":")[0]
}
// check if IP is allowed
if !util.Contains(config.Keys.ApiAllowedIPs, IPAddress) {
return fmt.Errorf("unknown ip: %v", IPAddress)
@ -213,7 +225,7 @@ func securedCheck(r *http.Request) error {
// getJobs godoc
// @summary Lists all jobs
// @tags query
// @tags Job query
// @description Get a list of all jobs. Filters can be applied using query parameters.
// @description Number of results can be limited by page. Results are sorted by descending startTime.
// @produce json
@ -359,7 +371,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) {
// getJobById godoc
// @summary Get complete job meta and metric data
// @tags query
// @tags Job query
// @description Job to get is specified by database ID
// @description Returns full job resource information according to 'JobMeta' scheme and all metrics according to 'JobData'.
// @accept json
@ -454,7 +466,7 @@ func (api *RestApi) getJobById(rw http.ResponseWriter, r *http.Request) {
// tagJob godoc
// @summary Adds one or more tags to a job
// @tags add and modify
// @tags Job add and modify
// @description Adds tag(s) to a job specified by DB ID. Name and Type of Tag(s) can be chosen freely.
// @description If tagged job is already finished: Tag will be written directly to respective archive files.
// @accept json
@ -521,7 +533,7 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) {
// startJob godoc
// @summary Adds a new job as "running"
// @tags add and modify
// @tags Job add and modify
// @description Job specified in request body will be saved to database as "running" with new DB ID.
// @description Job specifications follow the 'JobMeta' scheme, API will fail to execute if requirements are not met.
// @accept json
@ -602,7 +614,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) {
// stopJobById godoc
// @summary Marks job as completed and triggers archiving
// @tags add and modify
// @tags Job add and modify
// @description Job to stop is specified by database ID. Only stopTime and final state are required in request body.
// @description Returns full job resource information according to 'JobMeta' scheme.
// @accept json
@ -659,7 +671,7 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) {
// stopJobByRequest godoc
// @summary Marks job as completed and triggers archiving
// @tags add and modify
// @tags Job add and modify
// @description Job to stop is specified by request body. All fields are required in this case.
// @description Returns full job resource information according to 'JobMeta' scheme.
// @produce json
@ -708,7 +720,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) {
// deleteJobById godoc
// @summary Remove a job from the sql database
// @tags remove
// @tags Job remove
// @description Job to remove is specified by database ID. This will not remove the job from the job archive.
// @produce json
// @param id path int true "Database ID of Job"
@ -755,7 +767,7 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) {
// deleteJobByRequest godoc
// @summary Remove a job from the sql database
// @tags remove
// @tags Job remove
// @description Job to delete is specified by request body. All fields are required in this case.
// @accept json
// @produce json
@ -813,7 +825,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request)
// deleteJobBefore godoc
// @summary Remove a job from the sql database
// @tags remove
// @tags Job remove
// @description Remove all jobs with start time before timestamp. The jobs will not be removed from the job archive.
// @produce json
// @param ts path int true "Unix epoch timestamp"
@ -943,43 +955,32 @@ func (api *RestApi) getJobMetrics(rw http.ResponseWriter, r *http.Request) {
})
}
func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
}
rw.Header().Set("Content-Type", "text/plain")
username := r.FormValue("username")
me := repository.GetUserFromContext(r.Context())
if !me.HasRole(schema.RoleAdmin) {
if username != me.Username {
http.Error(rw, "Only admins are allowed to sign JWTs not for themselves",
http.StatusForbidden)
return
}
}
user, err := repository.GetUserRepository().GetUser(username)
if err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
jwt, err := api.Authentication.JwtAuth.ProvideJWT(user)
if err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(jwt))
}
// createUser godoc
// @summary Adds a new user
// @tags User
// @description User specified in form data will be saved to database.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param username formData string true "Unique user ID"
// @param password formData string true "User password"
// @param role formData string true "User role" Enums(admin, support, manager, user, api)
// @param project formData string false "Managed project, required for new manager role user"
// @param name formData string false "Users name"
// @param email formData string false "Users email"
// @success 200 {string} string "Success Response"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: creating user failed"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /users/ [post]
func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
rw.Header().Set("Content-Type", "text/plain")
@ -1022,10 +1023,27 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte(fmt.Sprintf("User %v successfully created!\n", username)))
}
// deleteUser godoc
// @summary Deletes a user
// @tags User
// @description User defined by username in form data will be deleted from database.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param username formData string true "User ID to delete"
// @success 200 "User deleted successfully"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: deleting user failed"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /users/ [delete]
func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
@ -1042,10 +1060,26 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
}
// getUsers godoc
// @summary Returns a list of users
// @tags User
// @description Returns a JSON-encoded list of users.
// @description Required query-parameter defines if all users or only users with additional special roles are returned.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @produce json
// @param not-just-user query bool true "If returned list should contain all users or only users with additional special roles"
// @success 200 {array} api.ApiReturnedUser "List of users returned successfully"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /users/ [get]
func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
@ -1062,31 +1096,32 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) {
json.NewEncoder(rw).Encode(users)
}
func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
}
user := repository.GetUserFromContext(r.Context())
if !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "only admins are allowed to fetch a list of roles", http.StatusForbidden)
return
}
roles, err := schema.GetValidRoles(user)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
json.NewEncoder(rw).Encode(roles)
}
// updateUser godoc
// @summary Updates an existing user
// @tags User
// @description Modifies user defined by username (id) in one of four possible ways.
// @description If more than one formValue is set then only the highest priority field is used.
// @description Only accessible from IPs registered with apiAllowedIPs configuration option.
// @accept mpfd
// @produce plain
// @param id path string true "Database ID of User"
// @param add-role formData string false "Priority 1: Role to add" Enums(admin, support, manager, user, api)
// @param remove-role formData string false "Priority 2: Role to remove" Enums(admin, support, manager, user, api)
// @param add-project formData string false "Priority 3: Project to add"
// @param remove-project formData string false "Priority 4: Project to remove"
// @success 200 {string} string "Success Response Message"
// @failure 400 {string} string "Bad Request"
// @failure 401 {string} string "Unauthorized"
// @failure 403 {string} string "Forbidden"
// @failure 422 {string} string "Unprocessable Entity: The user could not be updated"
// @failure 500 {string} string "Internal Server Error"
// @security ApiKeyAuth
// @router /user/{id} [post]
func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
if user := repository.GetUserFromContext(r.Context()); !user.HasRole(schema.RoleAdmin) {
@ -1130,70 +1165,61 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) {
}
}
// func (api *RestApi) secureUpdateUser(rw http.ResponseWriter, r *http.Request) {
// if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) {
// handleError(fmt.Errorf("missing role: %v", auth.GetRoleString(auth.RoleApi)), http.StatusForbidden, rw)
// return
// }
//
// // IP CHECK HERE (WIP)
// // Probably better as private routine
// IPAddress := r.Header.Get("X-Real-Ip")
// if IPAddress == "" {
// IPAddress = r.Header.Get("X-Forwarded-For")
// }
// if IPAddress == "" {
// IPAddress = r.RemoteAddr
// }
//
// // Also This
// ipOk := false
// for _, a := range config.Keys.ApiAllowedAddrs {
// if a == IPAddress {
// ipOk = true
// }
// }
//
// if IPAddress == "" || ipOk == false {
// handleError(fmt.Errorf("unknown ip: %v", IPAddress), http.StatusForbidden, rw)
// return
// }
// // IP CHECK END
//
// // Get Values
// id := mux.Vars(r)["id"]
// newproj := mux.Vars(r)["project"]
// newrole := mux.Vars(r)["role"]
//
// // TODO: Handle anything but roles...
// if newrole != "" {
// if err := api.Authentication.AddRole(r.Context(), id, newrole); err != nil {
// handleError(errors.New(err.Error()), http.StatusUnprocessableEntity, rw)
// return
// }
//
// rw.Header().Add("Content-Type", "application/json")
// rw.WriteHeader(http.StatusOK)
// json.NewEncoder(rw).Encode(UpdateUserApiResponse{
// Message: fmt.Sprintf("Successfully added role %s to %s", newrole, id),
// })
//
// } else if newproj != "" {
// if err := api.Authentication.AddProject(r.Context(), id, newproj); err != nil {
// handleError(errors.New(err.Error()), http.StatusUnprocessableEntity, rw)
// return
// }
//
// rw.Header().Add("Content-Type", "application/json")
// rw.WriteHeader(http.StatusOK)
// json.NewEncoder(rw).Encode(UpdateUserApiResponse{
// Message: fmt.Sprintf("Successfully added project %s to %s", newproj, id),
// })
//
// } else {
// handleError(errors.New("Not Add [role|project]?"), http.StatusBadRequest, rw)
// }
// }
func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
rw.Header().Set("Content-Type", "text/plain")
username := r.FormValue("username")
me := repository.GetUserFromContext(r.Context())
if !me.HasRole(schema.RoleAdmin) {
if username != me.Username {
http.Error(rw, "Only admins are allowed to sign JWTs not for themselves",
http.StatusForbidden)
return
}
}
user, err := repository.GetUserRepository().GetUser(username)
if err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
jwt, err := api.Authentication.JwtAuth.ProvideJWT(user)
if err != nil {
http.Error(rw, err.Error(), http.StatusUnprocessableEntity)
return
}
rw.WriteHeader(http.StatusOK)
rw.Write([]byte(jwt))
}
func (api *RestApi) getRoles(rw http.ResponseWriter, r *http.Request) {
err := securedCheck(r)
if err != nil {
http.Error(rw, err.Error(), http.StatusForbidden)
return
}
user := repository.GetUserFromContext(r.Context())
if !user.HasRole(schema.RoleAdmin) {
http.Error(rw, "only admins are allowed to fetch a list of roles", http.StatusForbidden)
return
}
roles, err := schema.GetValidRoles(user)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
json.NewEncoder(rw).Encode(roles)
}
func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "text/plain")

View File

@ -6,6 +6,7 @@ package auth
import (
"crypto/ed25519"
"database/sql"
"encoding/base64"
"errors"
"fmt"
@ -152,31 +153,35 @@ func (ja *JWTCookieSessionAuthenticator) Login(
claims := token.Claims.(jwt.MapClaims)
sub, _ := claims["sub"].(string)
var name string
if wrap, ok := claims["name"].(map[string]interface{}); ok {
if vals, ok := wrap["values"].([]interface{}); ok {
if len(vals) != 0 {
name = fmt.Sprintf("%v", vals[0])
for i := 1; i < len(vals); i++ {
name += fmt.Sprintf(" %v", vals[i])
}
}
}
}
var roles []string
projects := make([]string, 0)
if jc.ValidateUser {
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
log.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
// Take user roles from database instead of trusting the JWT
roles = user.Roles
} else {
var name string
if wrap, ok := claims["name"].(map[string]interface{}); ok {
if vals, ok := wrap["values"].([]interface{}); ok {
if len(vals) != 0 {
name = fmt.Sprintf("%v", vals[0])
for i := 1; i < len(vals); i++ {
name += fmt.Sprintf(" %v", vals[i])
}
}
}
}
// Extract roles from JWT (if present)
if rawroles, ok := claims["roles"].([]interface{}); ok {
for _, rr := range rawroles {
@ -185,20 +190,6 @@ func (ja *JWTCookieSessionAuthenticator) Login(
}
}
}
}
// (Ask browser to) Delete JWT cookie
deletedCookie := &http.Cookie{
Name: jc.CookieName,
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
}
http.SetCookie(rw, deletedCookie)
if user == nil {
projects := make([]string, 0)
user = &schema.User{
Username: sub,
Name: name,
@ -215,5 +206,15 @@ func (ja *JWTCookieSessionAuthenticator) Login(
}
}
// (Ask browser to) Delete JWT cookie
deletedCookie := &http.Cookie{
Name: jc.CookieName,
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
}
http.SetCookie(rw, deletedCookie)
return user, nil
}

View File

@ -5,6 +5,7 @@
package auth
import (
"database/sql"
"encoding/base64"
"errors"
"fmt"
@ -78,31 +79,35 @@ func (ja *JWTSessionAuthenticator) Login(
claims := token.Claims.(jwt.MapClaims)
sub, _ := claims["sub"].(string)
var name string
if wrap, ok := claims["name"].(map[string]interface{}); ok {
if vals, ok := wrap["values"].([]interface{}); ok {
if len(vals) != 0 {
name = fmt.Sprintf("%v", vals[0])
for i := 1; i < len(vals); i++ {
name += fmt.Sprintf(" %v", vals[i])
}
}
}
}
var roles []string
projects := make([]string, 0)
if config.Keys.JwtConfig.ValidateUser {
var err error
user, err = repository.GetUserRepository().GetUser(sub)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Error while loading user '%v'", sub)
}
// Deny any logins for unknown usernames
if user == nil {
log.Warn("Could not find user from JWT in internal database.")
return nil, errors.New("unknown user")
}
// Take user roles from database instead of trusting the JWT
roles = user.Roles
} else {
var name string
if wrap, ok := claims["name"].(map[string]interface{}); ok {
if vals, ok := wrap["values"].([]interface{}); ok {
if len(vals) != 0 {
name = fmt.Sprintf("%v", vals[0])
for i := 1; i < len(vals); i++ {
name += fmt.Sprintf(" %v", vals[i])
}
}
}
}
// Extract roles from JWT (if present)
if rawroles, ok := claims["roles"].([]interface{}); ok {
for _, rr := range rawroles {
@ -113,23 +118,17 @@ func (ja *JWTSessionAuthenticator) Login(
}
}
}
}
projects := make([]string, 0)
// Java/Grails Issued Token
// if rawprojs, ok := claims["projects"].([]interface{}); ok {
// for _, pp := range rawprojs {
// if p, ok := pp.(string); ok {
// projects = append(projects, p)
// }
// }
// } else if rawprojs, ok := claims["projects"]; ok {
// for _, p := range rawprojs.([]string) {
// projects = append(projects, p)
// }
// }
if rawprojs, ok := claims["projects"].([]interface{}); ok {
for _, pp := range rawprojs {
if p, ok := pp.(string); ok {
projects = append(projects, p)
}
}
} else if rawprojs, ok := claims["projects"]; ok {
projects = append(projects, rawprojs.([]string)...)
}
if user == nil {
user = &schema.User{
Username: sub,
Name: name,

View File

@ -21,6 +21,7 @@ import (
type LdapAuthenticator struct {
syncPassword string
UserAttr string
}
var _ Authenticator = (*LdapAuthenticator)(nil)
@ -31,11 +32,13 @@ func (la *LdapAuthenticator) Init() error {
log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)")
}
if config.Keys.LdapConfig.SyncInterval != "" {
interval, err := time.ParseDuration(config.Keys.LdapConfig.SyncInterval)
lc := config.Keys.LdapConfig
if lc.SyncInterval != "" {
interval, err := time.ParseDuration(lc.SyncInterval)
if err != nil {
log.Warnf("Could not parse duration for sync interval: %v",
config.Keys.LdapConfig.SyncInterval)
lc.SyncInterval)
return err
}
@ -58,6 +61,12 @@ func (la *LdapAuthenticator) Init() error {
log.Info("LDAP configuration key sync_interval invalid")
}
if lc.UserAttr != "" {
la.UserAttr = lc.UserAttr
} else {
la.UserAttr = "gecos"
}
return nil
}
@ -86,7 +95,7 @@ func (la *LdapAuthenticator) CanLogin(
lc.UserBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf("(&%s(uid=%s))", lc.UserFilter, username),
[]string{"dn", "uid", "gecos"}, nil)
[]string{"dn", "uid", la.UserAttr}, nil)
sr, err := l.Search(searchRequest)
if err != nil {
@ -100,7 +109,7 @@ func (la *LdapAuthenticator) CanLogin(
}
entry := sr.Entries[0]
name := entry.GetAttributeValue("gecos")
name := entry.GetAttributeValue(la.UserAttr)
var roles []string
roles = append(roles, schema.GetRoleString(schema.RoleUser))
projects := make([]string, 0)
@ -176,7 +185,7 @@ func (la *LdapAuthenticator) Sync() error {
lc.UserBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
lc.UserFilter,
[]string{"dn", "uid", "gecos"}, nil))
[]string{"dn", "uid", la.UserAttr}, nil))
if err != nil {
log.Warn("LDAP search error")
return err
@ -192,7 +201,7 @@ func (la *LdapAuthenticator) Sync() error {
_, ok := users[username]
if !ok {
users[username] = IN_LDAP
newnames[username] = entry.GetAttributeValue("gecos")
newnames[username] = entry.GetAttributeValue(la.UserAttr)
} else {
users[username] = IN_BOTH
}

View File

@ -26,21 +26,25 @@ var Keys schema.ProgramConfig = schema.ProgramConfig{
StopJobsExceedingWalltime: 0,
ShortRunningJobsDuration: 5 * 60,
UiDefaults: map[string]interface{}{
"analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}},
"job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"plot_general_colorBackground": true,
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
"plot_general_lineWidth": 3,
"plot_list_jobsPerPage": 50,
"plot_list_selectedMetrics": []string{"cpu_load", "mem_used", "flops_any", "mem_bw"},
"plot_view_plotsPerRow": 3,
"plot_view_showPolarplot": true,
"plot_view_showRoofline": true,
"plot_view_showStatTable": true,
"system_view_selectedMetric": "cpu_load",
"analysis_view_histogramMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"analysis_view_scatterPlotMetrics": [][]string{{"flops_any", "mem_bw"}, {"flops_any", "cpu_load"}, {"cpu_load", "mem_bw"}},
"job_view_nodestats_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_polarPlotMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"job_view_selectedMetrics": []string{"flops_any", "mem_bw", "mem_used"},
"plot_general_colorBackground": true,
"plot_general_colorscheme": []string{"#00bfff", "#0000ff", "#ff00ff", "#ff0000", "#ff8000", "#ffff00", "#80ff00"},
"plot_general_lineWidth": 3,
"plot_list_jobsPerPage": 50,
"plot_list_selectedMetrics": []string{"cpu_load", "mem_used", "flops_any", "mem_bw"},
"plot_view_plotsPerRow": 3,
"plot_view_showPolarplot": true,
"plot_view_showRoofline": true,
"plot_view_showStatTable": true,
"system_view_selectedMetric": "cpu_load",
"analysis_view_selectedTopEntity": "user",
"analysis_view_selectedTopCategory": "totalWalltime",
"status_view_selectedTopUserCategory": "totalJobs",
"status_view_selectedTopProjectCategory": "totalJobs",
},
}

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,8 @@ type FloatRange struct {
}
type Footprints struct {
Nodehours []schema.Float `json:"nodehours"`
Metrics []*MetricFootprints `json:"metrics"`
TimeWeights *TimeWeights `json:"timeWeights"`
Metrics []*MetricFootprints `json:"metrics"`
}
type HistoPoint struct {
@ -91,11 +91,16 @@ type JobsStatistics struct {
RunningJobs int `json:"runningJobs"`
ShortJobs int `json:"shortJobs"`
TotalWalltime int `json:"totalWalltime"`
TotalNodes int `json:"totalNodes"`
TotalNodeHours int `json:"totalNodeHours"`
TotalCores int `json:"totalCores"`
TotalCoreHours int `json:"totalCoreHours"`
TotalAccs int `json:"totalAccs"`
TotalAccHours int `json:"totalAccHours"`
HistDuration []*HistoPoint `json:"histDuration"`
HistNumNodes []*HistoPoint `json:"histNumNodes"`
HistNumCores []*HistoPoint `json:"histNumCores"`
HistNumAccs []*HistoPoint `json:"histNumAccs"`
}
type MetricFootprints struct {
@ -133,6 +138,12 @@ type TimeRangeOutput struct {
To time.Time `json:"to"`
}
type TimeWeights struct {
NodeHours []schema.Float `json:"nodeHours"`
AccHours []schema.Float `json:"accHours"`
CoreHours []schema.Float `json:"coreHours"`
}
type User struct {
Username string `json:"username"`
Name string `json:"name"`
@ -182,6 +193,59 @@ func (e Aggregate) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}
type SortByAggregate string
const (
SortByAggregateTotalwalltime SortByAggregate = "TOTALWALLTIME"
SortByAggregateTotaljobs SortByAggregate = "TOTALJOBS"
SortByAggregateTotalnodes SortByAggregate = "TOTALNODES"
SortByAggregateTotalnodehours SortByAggregate = "TOTALNODEHOURS"
SortByAggregateTotalcores SortByAggregate = "TOTALCORES"
SortByAggregateTotalcorehours SortByAggregate = "TOTALCOREHOURS"
SortByAggregateTotalaccs SortByAggregate = "TOTALACCS"
SortByAggregateTotalacchours SortByAggregate = "TOTALACCHOURS"
)
var AllSortByAggregate = []SortByAggregate{
SortByAggregateTotalwalltime,
SortByAggregateTotaljobs,
SortByAggregateTotalnodes,
SortByAggregateTotalnodehours,
SortByAggregateTotalcores,
SortByAggregateTotalcorehours,
SortByAggregateTotalaccs,
SortByAggregateTotalacchours,
}
func (e SortByAggregate) IsValid() bool {
switch e {
case SortByAggregateTotalwalltime, SortByAggregateTotaljobs, SortByAggregateTotalnodes, SortByAggregateTotalnodehours, SortByAggregateTotalcores, SortByAggregateTotalcorehours, SortByAggregateTotalaccs, SortByAggregateTotalacchours:
return true
}
return false
}
func (e SortByAggregate) String() string {
return string(e)
}
func (e *SortByAggregate) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = SortByAggregate(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid SortByAggregate", str)
}
return nil
}
func (e SortByAggregate) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}
type SortDirectionEnum string
const (
@ -222,44 +286,3 @@ func (e *SortDirectionEnum) UnmarshalGQL(v interface{}) error {
func (e SortDirectionEnum) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}
type Weights string
const (
WeightsNodeCount Weights = "NODE_COUNT"
WeightsNodeHours Weights = "NODE_HOURS"
)
var AllWeights = []Weights{
WeightsNodeCount,
WeightsNodeHours,
}
func (e Weights) IsValid() bool {
switch e {
case WeightsNodeCount, WeightsNodeHours:
return true
}
return false
}
func (e Weights) String() string {
return string(e)
}
func (e *Weights) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = Weights(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid Weights", str)
}
return nil
}
func (e Weights) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
}

View File

@ -244,34 +244,34 @@ func (r *queryResolver) Jobs(ctx context.Context, filter []*model.JobFilter, pag
}
// JobsStatistics is the resolver for the jobsStatistics field.
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobFilter, page *model.PageRequest, sortBy *model.SortByAggregate, groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
var err error
var stats []*model.JobsStatistics
if requireField(ctx, "totalJobs") {
if requireField(ctx, "totalJobs") || requireField(ctx, "totalWalltime") || requireField(ctx, "totalNodes") || requireField(ctx, "totalCores") ||
requireField(ctx, "totalAccs") || requireField(ctx, "totalNodeHours") || requireField(ctx, "totalCoreHours") || requireField(ctx, "totalAccHours") {
if groupBy == nil {
stats, err = r.Repo.JobsStats(ctx, filter)
} else {
stats, err = r.Repo.JobsStatsGrouped(ctx, filter, groupBy)
stats, err = r.Repo.JobsStatsGrouped(ctx, filter, page, sortBy, groupBy)
}
} else {
stats = make([]*model.JobsStatistics, 0, 1)
stats = append(stats,
&model.JobsStatistics{})
stats = append(stats, &model.JobsStatistics{})
}
if groupBy != nil {
if requireField(ctx, "shortJobs") {
stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "short")
}
if requireField(ctx, "RunningJobs") {
if requireField(ctx, "runningJobs") {
stats, err = r.Repo.AddJobCountGrouped(ctx, filter, groupBy, stats, "running")
}
} else {
if requireField(ctx, "shortJobs") {
stats, err = r.Repo.AddJobCount(ctx, filter, stats, "short")
}
if requireField(ctx, "RunningJobs") {
if requireField(ctx, "runningJobs") {
stats, err = r.Repo.AddJobCount(ctx, filter, stats, "running")
}
}
@ -280,7 +280,7 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
return nil, err
}
if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") {
if requireField(ctx, "histDuration") || requireField(ctx, "histNumNodes") || requireField(ctx, "histNumCores") || requireField(ctx, "histNumAccs") {
if groupBy == nil {
stats[0], err = r.Repo.AddHistograms(ctx, filter, stats[0])
if err != nil {
@ -294,24 +294,6 @@ func (r *queryResolver) JobsStatistics(ctx context.Context, filter []*model.JobF
return stats, nil
}
// JobsCount is the resolver for the jobsCount field.
func (r *queryResolver) JobsCount(ctx context.Context, filter []*model.JobFilter, groupBy model.Aggregate, weight *model.Weights, limit *int) ([]*model.Count, error) {
counts, err := r.Repo.CountGroupedJobs(ctx, groupBy, filter, weight, limit)
if err != nil {
log.Warn("Error while counting grouped jobs")
return nil, err
}
res := make([]*model.Count, 0, len(counts))
for name, count := range counts {
res = append(res, &model.Count{
Name: name,
Count: count,
})
}
return res, nil
}
// RooflineHeatmap is the resolver for the rooflineHeatmap field.
func (r *queryResolver) RooflineHeatmap(ctx context.Context, filter []*model.JobFilter, rows int, cols int, minX float64, minY float64, maxX float64, maxY float64) ([][]float64, error) {
return r.rooflineHeatmap(ctx, filter, rows, cols, minX, minY, maxX, maxY)

View File

@ -6,7 +6,6 @@ package graph
import (
"context"
"errors"
"fmt"
"math"
@ -15,6 +14,7 @@ import (
"github.com/ClusterCockpit/cc-backend/internal/metricdata"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
// "github.com/ClusterCockpit/cc-backend/pkg/archive"
)
const MAX_JOBS_FOR_ANALYSIS = 500
@ -32,7 +32,7 @@ func (r *queryResolver) rooflineHeatmap(
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
return nil, fmt.Errorf("GRAPH/UTIL > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
}
fcols, frows := float64(cols), float64(rows)
@ -49,20 +49,24 @@ func (r *queryResolver) rooflineHeatmap(
jobdata, err := metricdata.LoadData(job, []string{"flops_any", "mem_bw"}, []schema.MetricScope{schema.MetricScopeNode}, ctx)
if err != nil {
log.Error("Error while loading metrics for roofline")
log.Errorf("Error while loading roofline metrics for job %d", job.ID)
return nil, err
}
flops_, membw_ := jobdata["flops_any"], jobdata["mem_bw"]
if flops_ == nil && membw_ == nil {
return nil, fmt.Errorf("GRAPH/STATS > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
log.Infof("rooflineHeatmap(): 'flops_any' or 'mem_bw' missing for job %d", job.ID)
continue
// return nil, fmt.Errorf("GRAPH/UTIL > 'flops_any' or 'mem_bw' missing for job %d", job.ID)
}
flops, ok1 := flops_["node"]
membw, ok2 := membw_["node"]
if !ok1 || !ok2 {
log.Info("rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
continue
// TODO/FIXME:
return nil, errors.New("GRAPH/STATS > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
// return nil, errors.New("GRAPH/UTIL > todo: rooflineHeatmap() query not implemented for where flops_any or mem_bw not available at 'node' level")
}
for n := 0; n < len(flops.Series); n++ {
@ -98,7 +102,7 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
return nil, err
}
if len(jobs) > MAX_JOBS_FOR_ANALYSIS {
return nil, fmt.Errorf("GRAPH/STATS > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
return nil, fmt.Errorf("GRAPH/UTIL > too many jobs matched (max: %d)", MAX_JOBS_FOR_ANALYSIS)
}
avgs := make([][]schema.Float, len(metrics))
@ -106,7 +110,11 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
avgs[i] = make([]schema.Float, 0, len(jobs))
}
nodehours := make([]schema.Float, 0, len(jobs))
timeweights := new(model.TimeWeights)
timeweights.NodeHours = make([]schema.Float, 0, len(jobs))
timeweights.AccHours = make([]schema.Float, 0, len(jobs))
timeweights.CoreHours = make([]schema.Float, 0, len(jobs))
for _, job := range jobs {
if job.MonitoringStatus == schema.MonitoringStatusDisabled || job.MonitoringStatus == schema.MonitoringStatusArchivingFailed {
continue
@ -117,7 +125,18 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
return nil, err
}
nodehours = append(nodehours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
// #166 collect arrays: Null values or no null values?
timeweights.NodeHours = append(timeweights.NodeHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumNodes)))
if job.NumAcc > 0 {
timeweights.AccHours = append(timeweights.AccHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumAcc)))
} else {
timeweights.AccHours = append(timeweights.AccHours, schema.Float(1.0))
}
if job.NumHWThreads > 0 {
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(float64(job.Duration)/60.0*float64(job.NumHWThreads))) // SQLite HWThreads == Cores; numCoresForJob(job)
} else {
timeweights.CoreHours = append(timeweights.CoreHours, schema.Float(1.0))
}
}
res := make([]*model.MetricFootprints, len(avgs))
@ -129,11 +148,34 @@ func (r *queryResolver) jobsFootprints(ctx context.Context, filter []*model.JobF
}
return &model.Footprints{
Nodehours: nodehours,
Metrics: res,
TimeWeights: timeweights,
Metrics: res,
}, nil
}
// func numCoresForJob(job *schema.Job) (numCores int) {
// subcluster, scerr := archive.GetSubCluster(job.Cluster, job.SubCluster)
// if scerr != nil {
// return 1
// }
// totalJobCores := 0
// topology := subcluster.Topology
// for _, host := range job.Resources {
// hwthreads := host.HWThreads
// if hwthreads == nil {
// hwthreads = topology.Node
// }
// hostCores, _ := topology.GetCoresFromHWThreads(hwthreads)
// totalJobCores += len(hostCores)
// }
// return totalJobCores
// }
func requireField(ctx context.Context, name string) bool {
fields := graphql.CollectAllFields(ctx)

View File

@ -506,7 +506,7 @@ func (ccms *CCMetricStore) LoadStats(
metrics []string,
ctx context.Context) (map[string]map[string]schema.MetricStatistics, error) {
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode})
queries, _, err := ccms.buildQueries(job, metrics, []schema.MetricScope{schema.MetricScopeNode}) // #166 Add scope shere for analysis view accelerator normalization?
if err != nil {
log.Warn("Error while building query")
return nil, err
@ -533,7 +533,9 @@ func (ccms *CCMetricStore) LoadStats(
metric := ccms.toLocalName(query.Metric)
data := res[0]
if data.Error != nil {
return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
log.Infof("fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
continue
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, *data.Error)
}
metricdata, ok := stats[metric]
@ -543,7 +545,9 @@ func (ccms *CCMetricStore) LoadStats(
}
if data.Avg.IsNaN() || data.Min.IsNaN() || data.Max.IsNaN() {
return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
log.Infof("fetching %s for node %s failed: one of avg/min/max is NaN", metric, query.Hostname)
continue
// return nil, fmt.Errorf("METRICDATA/CCMS > fetching %s for node %s failed: %s", metric, query.Hostname, "avg/min/max is NaN")
}
metricdata[query.Hostname] = schema.MetricStatistics{

View File

@ -182,7 +182,7 @@ func LoadAverages(
ctx context.Context) error {
if job.State != schema.JobStateRunning && useArchive {
return archive.LoadAveragesFromArchive(job, metrics, data)
return archive.LoadAveragesFromArchive(job, metrics, data) // #166 change also here?
}
repo, ok := metricDataRepos[job.Cluster]
@ -190,7 +190,7 @@ func LoadAverages(
return fmt.Errorf("METRICDATA/METRICDATA > no metric data repository configured for '%s'", job.Cluster)
}
stats, err := repo.LoadStats(job, metrics, ctx)
stats, err := repo.LoadStats(job, metrics, ctx) // #166 how to handle stats for acc normalizazion?
if err != nil {
log.Errorf("Error while loading statistics for job %v (User %v, Project %v)", job.JobID, job.User, job.Project)
return err

View File

@ -455,69 +455,6 @@ func (r *JobRepository) DeleteJobById(id int64) error {
return err
}
// TODO: Use node hours instead: SELECT job.user, sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN CAST(strftime('%s', 'now') AS INTEGER) - job.start_time ELSE job.duration END)) as x FROM job GROUP BY user ORDER BY x DESC;
func (r *JobRepository) CountGroupedJobs(
ctx context.Context,
aggreg model.Aggregate,
filters []*model.JobFilter,
weight *model.Weights,
limit *int) (map[string]int, error) {
start := time.Now()
if !aggreg.IsValid() {
return nil, errors.New("invalid aggregate")
}
runner := (sq.BaseRunner)(r.stmtCache)
count := "count(*) as count"
if weight != nil {
switch *weight {
case model.WeightsNodeCount:
count = "sum(job.num_nodes) as count"
case model.WeightsNodeHours:
now := time.Now().Unix()
count = fmt.Sprintf(`sum(job.num_nodes * (CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) as count`, now)
runner = r.DB
default:
log.Debugf("CountGroupedJobs() Weight %v unknown.", *weight)
}
}
q, qerr := SecurityCheck(ctx, sq.Select("job."+string(aggreg), count).From("job").GroupBy("job."+string(aggreg)).OrderBy("count DESC"))
if qerr != nil {
return nil, qerr
}
for _, f := range filters {
q = BuildWhereClause(f, q)
}
if limit != nil {
q = q.Limit(uint64(*limit))
}
counts := map[string]int{}
rows, err := q.RunWith(runner).Query()
if err != nil {
log.Error("Error while running query")
return nil, err
}
for rows.Next() {
var group string
var count int
if err := rows.Scan(&group, &count); err != nil {
log.Warn("Error while scanning rows")
return nil, err
}
counts[group] = count
}
log.Debugf("Timer CountGroupedJobs %s", time.Since(start))
return counts, nil
}
func (r *JobRepository) UpdateMonitoringStatus(job int64, monitoringStatus int32) (err error) {
stmt := sq.Update("job").
Set("monitoring_status", monitoringStatus).

View File

@ -18,13 +18,17 @@ import (
sq "github.com/Masterminds/squirrel"
)
// SecurityCheck-less, private: Returns a list of jobs matching the provided filters. page and order are optional-
func (r *JobRepository) queryJobs(
query sq.SelectBuilder,
func (r *JobRepository) QueryJobs(
ctx context.Context,
filters []*model.JobFilter,
page *model.PageRequest,
order *model.OrderByInput) ([]*schema.Job, error) {
query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job"))
if qerr != nil {
return nil, qerr
}
if order != nil {
field := toSnakeCase(order.Field)
@ -67,34 +71,15 @@ func (r *JobRepository) queryJobs(
return jobs, nil
}
// testFunction for queryJobs
func (r *JobRepository) testQueryJobs(
filters []*model.JobFilter,
page *model.PageRequest,
order *model.OrderByInput) ([]*schema.Job, error) {
return r.queryJobs(sq.Select(jobColumns...).From("job"), filters, page, order)
}
// Public function with added securityCheck, calls private queryJobs function above
func (r *JobRepository) QueryJobs(
func (r *JobRepository) CountJobs(
ctx context.Context,
filters []*model.JobFilter,
page *model.PageRequest,
order *model.OrderByInput) ([]*schema.Job, error) {
query, qerr := SecurityCheck(ctx, sq.Select(jobColumns...).From("job"))
if qerr != nil {
return nil, qerr
}
return r.queryJobs(query, filters, page, order)
}
// SecurityCheck-less, private: Returns the number of jobs matching the filters
func (r *JobRepository) countJobs(query sq.SelectBuilder,
filters []*model.JobFilter) (int, error) {
query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job"))
if qerr != nil {
return 0, qerr
}
for _, f := range filters {
query = BuildWhereClause(f, query)
}
@ -107,27 +92,6 @@ func (r *JobRepository) countJobs(query sq.SelectBuilder,
return count, nil
}
// testFunction for countJobs
func (r *JobRepository) testCountJobs(
filters []*model.JobFilter) (int, error) {
return r.countJobs(sq.Select("count(*)").From("job"), filters)
}
// Public function with added securityCheck, calls private countJobs function above
func (r *JobRepository) CountJobs(
ctx context.Context,
filters []*model.JobFilter) (int, error) {
query, qerr := SecurityCheck(ctx, sq.Select("count(*)").From("job"))
if qerr != nil {
return 0, qerr
}
return r.countJobs(query, filters)
}
func SecurityCheck(ctx context.Context, query sq.SelectBuilder) (sq.SelectBuilder, error) {
user := GetUserFromContext(ctx)
if user == nil {

View File

@ -5,10 +5,12 @@
package repository
import (
"context"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
"github.com/ClusterCockpit/cc-backend/pkg/log"
"github.com/ClusterCockpit/cc-backend/pkg/schema"
_ "github.com/mattn/go-sqlite3"
)
@ -94,7 +96,7 @@ func BenchmarkDB_CountJobs(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := db.testCountJobs([]*model.JobFilter{filter})
_, err := db.CountJobs(getContext(b), []*model.JobFilter{filter})
noErr(b, err)
}
})
@ -118,20 +120,37 @@ func BenchmarkDB_QueryJobs(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := db.testQueryJobs([]*model.JobFilter{filter}, page, order)
_, err := db.QueryJobs(getContext(b), []*model.JobFilter{filter}, page, order)
noErr(b, err)
}
})
})
}
func getContext(tb testing.TB) context.Context {
tb.Helper()
var roles []string
roles = append(roles, schema.GetRoleString(schema.RoleAdmin))
projects := make([]string, 0)
user := &schema.User{
Username: "demo",
Name: "The man",
Roles: roles,
Projects: projects,
AuthSource: schema.AuthViaLDAP,
}
ctx := context.Background()
return context.WithValue(ctx, ContextUserKey, user)
}
func setup(tb testing.TB) *JobRepository {
tb.Helper()
log.Init("warn", true)
dbfile := "testdata/job.db"
err := MigrateDB("sqlite3", dbfile)
noErr(tb, err)
Connect("sqlite3", dbfile)
return GetJobRepository()
}

View File

@ -23,6 +23,17 @@ var groupBy2column = map[model.Aggregate]string{
model.AggregateCluster: "job.cluster",
}
var sortBy2column = map[model.SortByAggregate]string{
model.SortByAggregateTotaljobs: "totalJobs",
model.SortByAggregateTotalwalltime: "totalWalltime",
model.SortByAggregateTotalnodes: "totalNodes",
model.SortByAggregateTotalnodehours: "totalNodeHours",
model.SortByAggregateTotalcores: "totalCores",
model.SortByAggregateTotalcorehours: "totalCoreHours",
model.SortByAggregateTotalaccs: "totalAccs",
model.SortByAggregateTotalacchours: "totalAccHours",
}
func (r *JobRepository) buildCountQuery(
filter []*model.JobFilter,
kind string,
@ -59,21 +70,30 @@ func (r *JobRepository) buildStatsQuery(
var query sq.SelectBuilder
castType := r.getCastType()
// fmt.Sprintf(`CAST(ROUND((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) / 3600) as %s) as value`, time.Now().Unix(), castType)
if col != "" {
// Scan columns: id, totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours
query = sq.Select(col, "COUNT(job.id)",
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType),
// Scan columns: id, totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select(col, "COUNT(job.id) as totalJobs",
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s) as totalWalltime`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s) as totalNodes`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s) as totalNodeHours`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as %s) as totalCores`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s) as totalCoreHours`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_acc) as %s) as totalAccs`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s) as totalAccHours`, time.Now().Unix(), castType),
).From("job").GroupBy(col)
} else {
// Scan columns: totalJobs, totalWalltime, totalNodeHours, totalCoreHours, totalAccHours
// Scan columns: totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours
query = sq.Select("COUNT(job.id)",
fmt.Sprintf("CAST(ROUND(SUM(job.duration) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_nodes) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_hwthreads) / 3600) as %s)", castType),
fmt.Sprintf("CAST(ROUND(SUM(job.duration * job.num_acc) / 3600) as %s)", castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END)) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_nodes) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_nodes) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_hwthreads) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_hwthreads) / 3600) as %s)`, time.Now().Unix(), castType),
fmt.Sprintf(`CAST(SUM(job.num_acc) as %s)`, castType),
fmt.Sprintf(`CAST(ROUND(SUM((CASE WHEN job.job_state = "running" THEN %d - job.start_time ELSE job.duration END) * job.num_acc) / 3600) as %s)`, time.Now().Unix(), castType),
).From("job")
}
@ -112,16 +132,28 @@ func (r *JobRepository) getCastType() string {
func (r *JobRepository) JobsStatsGrouped(
ctx context.Context,
filter []*model.JobFilter,
page *model.PageRequest,
sortBy *model.SortByAggregate,
groupBy *model.Aggregate) ([]*model.JobsStatistics, error) {
start := time.Now()
col := groupBy2column[*groupBy]
query := r.buildStatsQuery(filter, col)
query, err := SecurityCheck(ctx, query)
if err != nil {
return nil, err
}
if sortBy != nil {
sortBy := sortBy2column[*sortBy]
query = query.OrderBy(fmt.Sprintf("%s DESC", sortBy))
}
if page != nil && page.ItemsPerPage != -1 {
limit := uint64(page.ItemsPerPage)
query = query.Offset((uint64(page.Page) - 1) * limit).Limit(limit)
}
rows, err := query.RunWith(r.DB).Query()
if err != nil {
log.Warn("Error while querying DB for job statistics")
@ -132,15 +164,36 @@ func (r *JobRepository) JobsStatsGrouped(
for rows.Next() {
var id sql.NullString
var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64
if err := rows.Scan(&id, &jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil {
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := rows.Scan(&id, &jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
log.Warn("Error while scanning rows")
return nil, err
}
if id.Valid {
var totalCoreHours, totalAccHours int
var totalJobs, totalWalltime, totalNodes, totalNodeHours, totalCores, totalCoreHours, totalAccs, totalAccHours int
if jobs.Valid {
totalJobs = int(jobs.Int64)
}
if walltime.Valid {
totalWalltime = int(walltime.Int64)
}
if nodes.Valid {
totalNodes = int(nodes.Int64)
}
if cores.Valid {
totalCores = int(cores.Int64)
}
if accs.Valid {
totalAccs = int(accs.Int64)
}
if nodeHours.Valid {
totalNodeHours = int(nodeHours.Int64)
}
if coreHours.Valid {
totalCoreHours = int(coreHours.Int64)
}
@ -154,9 +207,13 @@ func (r *JobRepository) JobsStatsGrouped(
&model.JobsStatistics{
ID: id.String,
Name: name,
TotalJobs: int(jobs.Int64),
TotalWalltime: int(walltime.Int64),
TotalJobs: totalJobs,
TotalWalltime: totalWalltime,
TotalNodes: totalNodes,
TotalNodeHours: totalNodeHours,
TotalCores: totalCores,
TotalCoreHours: totalCoreHours,
TotalAccs: totalAccs,
TotalAccHours: totalAccHours})
} else {
stats = append(stats,
@ -164,7 +221,11 @@ func (r *JobRepository) JobsStatsGrouped(
ID: id.String,
TotalJobs: int(jobs.Int64),
TotalWalltime: int(walltime.Int64),
TotalNodes: totalNodes,
TotalNodeHours: totalNodeHours,
TotalCores: totalCores,
TotalCoreHours: totalCoreHours,
TotalAccs: totalAccs,
TotalAccHours: totalAccHours})
}
}
@ -188,15 +249,18 @@ func (r *JobRepository) JobsStats(
row := query.RunWith(r.DB).QueryRow()
stats := make([]*model.JobsStatistics, 0, 1)
var jobs, walltime, nodeHours, coreHours, accHours sql.NullInt64
if err := row.Scan(&jobs, &walltime, &nodeHours, &coreHours, &accHours); err != nil {
var jobs, walltime, nodes, nodeHours, cores, coreHours, accs, accHours sql.NullInt64
if err := row.Scan(&jobs, &walltime, &nodes, &nodeHours, &cores, &coreHours, &accs, &accHours); err != nil {
log.Warn("Error while scanning rows")
return nil, err
}
if jobs.Valid {
var totalCoreHours, totalAccHours int
var totalNodeHours, totalCoreHours, totalAccHours int
if nodeHours.Valid {
totalNodeHours = int(nodeHours.Int64)
}
if coreHours.Valid {
totalCoreHours = int(coreHours.Int64)
}
@ -207,6 +271,7 @@ func (r *JobRepository) JobsStats(
&model.JobsStatistics{
TotalJobs: int(jobs.Int64),
TotalWalltime: int(walltime.Int64),
TotalNodeHours: totalNodeHours,
TotalCoreHours: totalCoreHours,
TotalAccHours: totalAccHours})
}
@ -321,7 +386,7 @@ func (r *JobRepository) AddJobCount(
return nil, err
}
counts := make(map[string]int)
var count int
for rows.Next() {
var cnt sql.NullInt64
@ -329,20 +394,22 @@ func (r *JobRepository) AddJobCount(
log.Warn("Error while scanning rows")
return nil, err
}
count = int(cnt.Int64)
}
switch kind {
case "running":
for _, s := range stats {
s.RunningJobs = counts[s.ID]
s.RunningJobs = count
}
case "short":
for _, s := range stats {
s.ShortJobs = counts[s.ID]
s.ShortJobs = count
}
}
log.Debugf("Timer JobJobCount %s", time.Since(start))
log.Debugf("Timer AddJobCount %s", time.Since(start))
return stats, nil
}
@ -367,6 +434,18 @@ func (r *JobRepository) AddHistograms(
return nil, err
}
stat.HistNumCores, err = r.jobsStatisticsHistogram(ctx, "job.num_hwthreads as value", filter)
if err != nil {
log.Warn("Error while loading job statistics histogram: num hwthreads")
return nil, err
}
stat.HistNumAccs, err = r.jobsStatisticsHistogram(ctx, "job.num_acc as value", filter)
if err != nil {
log.Warn("Error while loading job statistics histogram: num acc")
return nil, err
}
log.Debugf("Timer AddHistograms %s", time.Since(start))
return stat, nil
}

View File

@ -7,6 +7,8 @@ package repository
import (
"fmt"
"testing"
"github.com/ClusterCockpit/cc-backend/internal/graph/model"
)
func TestBuildJobStatsQuery(t *testing.T) {
@ -19,3 +21,15 @@ func TestBuildJobStatsQuery(t *testing.T) {
fmt.Printf("SQL: %s\n", sql)
}
func TestJobStats(t *testing.T) {
r := setup(t)
filter := &model.JobFilter{}
stats, err := r.JobsStats(getContext(t), []*model.JobFilter{filter})
noErr(t, err)
if stats[0].TotalJobs != 6 {
t.Fatalf("Want 98, Got %d", stats[0].TotalJobs)
}
}

View File

@ -134,8 +134,12 @@ func (r *UserRepository) AddUser(user *schema.User) error {
func (r *UserRepository) DelUser(username string) error {
_, err := r.DB.Exec(`DELETE FROM user WHERE user.username = ?`, username)
log.Errorf("Error while deleting user '%s' from DB", username)
return err
if err != nil {
log.Errorf("Error while deleting user '%s' from DB", username)
return err
}
log.Infof("deleted user '%s' from DB", username)
return nil
}
func (r *UserRepository) ListUsers(specialsOnly bool) ([]*schema.User, error) {

View File

@ -15,6 +15,7 @@ type LdapConfig struct {
SearchDN string `json:"search_dn"`
UserBind string `json:"user_bind"`
UserFilter string `json:"user_filter"`
UserAttr string `json:"username_attr"`
SyncInterval string `json:"sync_interval"` // Parsed using time.ParseDuration.
SyncDelOldUsers bool `json:"sync_del_old_users"`
@ -75,7 +76,7 @@ type ProgramConfig struct {
// Address where the http (or https) server will listen on (for example: 'localhost:80').
Addr string `json:"addr"`
// Addresses from which the /api/secured/* API endpoints can be reached
// Addresses from which secured API endpoints can be reached
ApiAllowedIPs []string `json:"apiAllowedIPs"`
// Drop root permissions once .env was read and the port was taken.

View File

@ -180,6 +180,10 @@
"description": "Filter to extract users for syncing.",
"type": "string"
},
"username_attr": {
"description": "Attribute with full username. Default: gecos",
"type": "string"
},
"sync_interval": {
"description": "Interval used for syncing local user table with LDAP directory. Parsed using time.ParseDuration.",
"type": "string"

View File

@ -14,7 +14,7 @@
"chart.js": "^4.3.3",
"graphql": "^16.6.0",
"svelte-chartjs": "^3.1.2",
"sveltestrap": "^5.10.0",
"sveltestrap": "^5.11.1",
"uplot": "^1.6.24",
"wonka": "^6.3.2"
},
@ -22,6 +22,7 @@
"@rollup/plugin-commonjs": "^24.1.0",
"@rollup/plugin-node-resolve": "^15.0.2",
"@rollup/plugin-terser": "^0.4.1",
"@timohausmann/quadtree-js": "^1.2.5",
"rollup": "^3.21.0",
"rollup-plugin-css-only": "^4.3.0",
"rollup-plugin-svelte": "^7.1.4",
@ -138,9 +139,9 @@
}
},
"node_modules/@rollup/plugin-node-resolve": {
"version": "15.2.0",
"resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.0.tgz",
"integrity": "sha512-mKur03xNGT8O9ODO6FtT43ITGqHWZbKPdVJHZb+iV9QYcdlhUUB0wgknvA4KCUmC5oHJF6O2W1EgmyOQyVUI4Q==",
"version": "15.2.1",
"resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.1.tgz",
"integrity": "sha512-nsbUg588+GDSu8/NS8T4UAshO6xeaOfINNuXeVHcKV02LJtoRaM1SiOacClw4kws1SFiNhdLGxlbMY9ga/zs/w==",
"dev": true,
"dependencies": {
"@rollup/pluginutils": "^5.0.1",
@ -225,6 +226,12 @@
}
}
},
"node_modules/@timohausmann/quadtree-js": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/@timohausmann/quadtree-js/-/quadtree-js-1.2.5.tgz",
"integrity": "sha512-WcH3pouYtpyLjTCRvNP0WuSV4m7mRyYhLzW44egveFryT7pJhpDsdIJASEe37iCFNA0vmEpqTYGoG0siyXEthA==",
"dev": true
},
"node_modules/@types/estree": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz",
@ -346,9 +353,9 @@
"dev": true
},
"node_modules/fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
"dev": true,
"hasInstallScript": true,
"optional": true,
@ -547,9 +554,9 @@
}
},
"node_modules/rollup": {
"version": "3.28.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-3.28.0.tgz",
"integrity": "sha512-d7zhvo1OUY2SXSM6pfNjgD5+d0Nz87CUp4mt8l/GgVP3oBsPwzNvSzyu1me6BSG9JIgWNTVcafIXBIyM8yQ3yw==",
"version": "3.28.1",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-3.28.1.tgz",
"integrity": "sha512-R9OMQmIHJm9znrU3m3cpE8uhN0fGdXiawME7aZIpQqvpS/85+Vt1Hq1/yVIcYfOmaQiHjvXkQAoJukvLpau6Yw==",
"devOptional": true,
"bin": {
"rollup": "dist/bin/rollup"

View File

@ -10,6 +10,7 @@
"@rollup/plugin-commonjs": "^24.1.0",
"@rollup/plugin-node-resolve": "^15.0.2",
"@rollup/plugin-terser": "^0.4.1",
"@timohausmann/quadtree-js": "^1.2.5",
"rollup": "^3.21.0",
"rollup-plugin-css-only": "^4.3.0",
"rollup-plugin-svelte": "^7.1.4",
@ -21,7 +22,7 @@
"chart.js": "^4.3.3",
"graphql": "^16.6.0",
"svelte-chartjs": "^3.1.2",
"sveltestrap": "^5.10.0",
"sveltestrap": "^5.11.1",
"uplot": "^1.6.24",
"wonka": "^6.3.2"
}

View File

@ -1,7 +1,14 @@
/*!
* Bootstrap Icons v1.10.5 (https://icons.getbootstrap.com/)
* Copyright 2019-2023 The Bootstrap Authors
* Licensed under MIT (https://github.com/twbs/icons/blob/main/LICENSE)
*/
@font-face {
font-display: block;
font-family: "bootstrap-icons";
src: url("./fonts/bootstrap-icons.woff2?524846017b983fc8ded9325d94ed40f3") format("woff2"),
url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("woff");
src: url("./fonts/bootstrap-icons.woff2?1fa40e8900654d2863d011707b9fb6f2") format("woff2"),
url("./fonts/bootstrap-icons.woff?1fa40e8900654d2863d011707b9fb6f2") format("woff");
}
.bi::before,
@ -440,7 +447,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-cloud-fog2::before { content: "\f2a2"; }
.bi-cloud-hail-fill::before { content: "\f2a3"; }
.bi-cloud-hail::before { content: "\f2a4"; }
.bi-cloud-haze-1::before { content: "\f2a5"; }
.bi-cloud-haze-fill::before { content: "\f2a6"; }
.bi-cloud-haze::before { content: "\f2a7"; }
.bi-cloud-haze2-fill::before { content: "\f2a8"; }
@ -1436,21 +1442,16 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-dpad::before { content: "\f687"; }
.bi-ear-fill::before { content: "\f688"; }
.bi-ear::before { content: "\f689"; }
.bi-envelope-check-1::before { content: "\f68a"; }
.bi-envelope-check-fill::before { content: "\f68b"; }
.bi-envelope-check::before { content: "\f68c"; }
.bi-envelope-dash-1::before { content: "\f68d"; }
.bi-envelope-dash-fill::before { content: "\f68e"; }
.bi-envelope-dash::before { content: "\f68f"; }
.bi-envelope-exclamation-1::before { content: "\f690"; }
.bi-envelope-exclamation-fill::before { content: "\f691"; }
.bi-envelope-exclamation::before { content: "\f692"; }
.bi-envelope-plus-fill::before { content: "\f693"; }
.bi-envelope-plus::before { content: "\f694"; }
.bi-envelope-slash-1::before { content: "\f695"; }
.bi-envelope-slash-fill::before { content: "\f696"; }
.bi-envelope-slash::before { content: "\f697"; }
.bi-envelope-x-1::before { content: "\f698"; }
.bi-envelope-x-fill::before { content: "\f699"; }
.bi-envelope-x::before { content: "\f69a"; }
.bi-explicit-fill::before { content: "\f69b"; }
@ -1460,8 +1461,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-list-columns-reverse::before { content: "\f69f"; }
.bi-list-columns::before { content: "\f6a0"; }
.bi-meta::before { content: "\f6a1"; }
.bi-mortorboard-fill::before { content: "\f6a2"; }
.bi-mortorboard::before { content: "\f6a3"; }
.bi-nintendo-switch::before { content: "\f6a4"; }
.bi-pc-display-horizontal::before { content: "\f6a5"; }
.bi-pc-display::before { content: "\f6a6"; }
@ -1480,7 +1479,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-send-check::before { content: "\f6b3"; }
.bi-send-dash-fill::before { content: "\f6b4"; }
.bi-send-dash::before { content: "\f6b5"; }
.bi-send-exclamation-1::before { content: "\f6b6"; }
.bi-send-exclamation-fill::before { content: "\f6b7"; }
.bi-send-exclamation::before { content: "\f6b8"; }
.bi-send-fill::before { content: "\f6b9"; }
@ -1492,7 +1490,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-send-x::before { content: "\f6bf"; }
.bi-send::before { content: "\f6c0"; }
.bi-steam::before { content: "\f6c1"; }
.bi-terminal-dash-1::before { content: "\f6c2"; }
.bi-terminal-dash::before { content: "\f6c3"; }
.bi-terminal-plus::before { content: "\f6c4"; }
.bi-terminal-split::before { content: "\f6c5"; }
@ -1522,7 +1519,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-usb-symbol::before { content: "\f6dd"; }
.bi-usb::before { content: "\f6de"; }
.bi-boombox-fill::before { content: "\f6df"; }
.bi-displayport-1::before { content: "\f6e0"; }
.bi-displayport::before { content: "\f6e1"; }
.bi-gpu-card::before { content: "\f6e2"; }
.bi-memory::before { content: "\f6e3"; }
@ -1535,8 +1531,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-pci-card::before { content: "\f6ea"; }
.bi-router-fill::before { content: "\f6eb"; }
.bi-router::before { content: "\f6ec"; }
.bi-ssd-fill::before { content: "\f6ed"; }
.bi-ssd::before { content: "\f6ee"; }
.bi-thunderbolt-fill::before { content: "\f6ef"; }
.bi-thunderbolt::before { content: "\f6f0"; }
.bi-usb-drive-fill::before { content: "\f6f1"; }
@ -1643,7 +1637,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-filetype-pdf::before { content: "\f756"; }
.bi-filetype-php::before { content: "\f757"; }
.bi-filetype-png::before { content: "\f758"; }
.bi-filetype-ppt-1::before { content: "\f759"; }
.bi-filetype-ppt::before { content: "\f75a"; }
.bi-filetype-psd::before { content: "\f75b"; }
.bi-filetype-py::before { content: "\f75c"; }
@ -1659,7 +1652,6 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-filetype-txt::before { content: "\f766"; }
.bi-filetype-wav::before { content: "\f767"; }
.bi-filetype-woff::before { content: "\f768"; }
.bi-filetype-xls-1::before { content: "\f769"; }
.bi-filetype-xls::before { content: "\f76a"; }
.bi-filetype-xml::before { content: "\f76b"; }
.bi-filetype-yml::before { content: "\f76c"; }
@ -1702,3 +1694,288 @@ url("./fonts/bootstrap-icons.woff?524846017b983fc8ded9325d94ed40f3") format("wof
.bi-filetype-json::before { content: "\f791"; }
.bi-filetype-pptx::before { content: "\f792"; }
.bi-filetype-xlsx::before { content: "\f793"; }
.bi-1-circle-fill::before { content: "\f796"; }
.bi-1-circle::before { content: "\f797"; }
.bi-1-square-fill::before { content: "\f798"; }
.bi-1-square::before { content: "\f799"; }
.bi-2-circle-fill::before { content: "\f79c"; }
.bi-2-circle::before { content: "\f79d"; }
.bi-2-square-fill::before { content: "\f79e"; }
.bi-2-square::before { content: "\f79f"; }
.bi-3-circle-fill::before { content: "\f7a2"; }
.bi-3-circle::before { content: "\f7a3"; }
.bi-3-square-fill::before { content: "\f7a4"; }
.bi-3-square::before { content: "\f7a5"; }
.bi-4-circle-fill::before { content: "\f7a8"; }
.bi-4-circle::before { content: "\f7a9"; }
.bi-4-square-fill::before { content: "\f7aa"; }
.bi-4-square::before { content: "\f7ab"; }
.bi-5-circle-fill::before { content: "\f7ae"; }
.bi-5-circle::before { content: "\f7af"; }
.bi-5-square-fill::before { content: "\f7b0"; }
.bi-5-square::before { content: "\f7b1"; }
.bi-6-circle-fill::before { content: "\f7b4"; }
.bi-6-circle::before { content: "\f7b5"; }
.bi-6-square-fill::before { content: "\f7b6"; }
.bi-6-square::before { content: "\f7b7"; }
.bi-7-circle-fill::before { content: "\f7ba"; }
.bi-7-circle::before { content: "\f7bb"; }
.bi-7-square-fill::before { content: "\f7bc"; }
.bi-7-square::before { content: "\f7bd"; }
.bi-8-circle-fill::before { content: "\f7c0"; }
.bi-8-circle::before { content: "\f7c1"; }
.bi-8-square-fill::before { content: "\f7c2"; }
.bi-8-square::before { content: "\f7c3"; }
.bi-9-circle-fill::before { content: "\f7c6"; }
.bi-9-circle::before { content: "\f7c7"; }
.bi-9-square-fill::before { content: "\f7c8"; }
.bi-9-square::before { content: "\f7c9"; }
.bi-airplane-engines-fill::before { content: "\f7ca"; }
.bi-airplane-engines::before { content: "\f7cb"; }
.bi-airplane-fill::before { content: "\f7cc"; }
.bi-airplane::before { content: "\f7cd"; }
.bi-alexa::before { content: "\f7ce"; }
.bi-alipay::before { content: "\f7cf"; }
.bi-android::before { content: "\f7d0"; }
.bi-android2::before { content: "\f7d1"; }
.bi-box-fill::before { content: "\f7d2"; }
.bi-box-seam-fill::before { content: "\f7d3"; }
.bi-browser-chrome::before { content: "\f7d4"; }
.bi-browser-edge::before { content: "\f7d5"; }
.bi-browser-firefox::before { content: "\f7d6"; }
.bi-browser-safari::before { content: "\f7d7"; }
.bi-c-circle-fill::before { content: "\f7da"; }
.bi-c-circle::before { content: "\f7db"; }
.bi-c-square-fill::before { content: "\f7dc"; }
.bi-c-square::before { content: "\f7dd"; }
.bi-capsule-pill::before { content: "\f7de"; }
.bi-capsule::before { content: "\f7df"; }
.bi-car-front-fill::before { content: "\f7e0"; }
.bi-car-front::before { content: "\f7e1"; }
.bi-cassette-fill::before { content: "\f7e2"; }
.bi-cassette::before { content: "\f7e3"; }
.bi-cc-circle-fill::before { content: "\f7e6"; }
.bi-cc-circle::before { content: "\f7e7"; }
.bi-cc-square-fill::before { content: "\f7e8"; }
.bi-cc-square::before { content: "\f7e9"; }
.bi-cup-hot-fill::before { content: "\f7ea"; }
.bi-cup-hot::before { content: "\f7eb"; }
.bi-currency-rupee::before { content: "\f7ec"; }
.bi-dropbox::before { content: "\f7ed"; }
.bi-escape::before { content: "\f7ee"; }
.bi-fast-forward-btn-fill::before { content: "\f7ef"; }
.bi-fast-forward-btn::before { content: "\f7f0"; }
.bi-fast-forward-circle-fill::before { content: "\f7f1"; }
.bi-fast-forward-circle::before { content: "\f7f2"; }
.bi-fast-forward-fill::before { content: "\f7f3"; }
.bi-fast-forward::before { content: "\f7f4"; }
.bi-filetype-sql::before { content: "\f7f5"; }
.bi-fire::before { content: "\f7f6"; }
.bi-google-play::before { content: "\f7f7"; }
.bi-h-circle-fill::before { content: "\f7fa"; }
.bi-h-circle::before { content: "\f7fb"; }
.bi-h-square-fill::before { content: "\f7fc"; }
.bi-h-square::before { content: "\f7fd"; }
.bi-indent::before { content: "\f7fe"; }
.bi-lungs-fill::before { content: "\f7ff"; }
.bi-lungs::before { content: "\f800"; }
.bi-microsoft-teams::before { content: "\f801"; }
.bi-p-circle-fill::before { content: "\f804"; }
.bi-p-circle::before { content: "\f805"; }
.bi-p-square-fill::before { content: "\f806"; }
.bi-p-square::before { content: "\f807"; }
.bi-pass-fill::before { content: "\f808"; }
.bi-pass::before { content: "\f809"; }
.bi-prescription::before { content: "\f80a"; }
.bi-prescription2::before { content: "\f80b"; }
.bi-r-circle-fill::before { content: "\f80e"; }
.bi-r-circle::before { content: "\f80f"; }
.bi-r-square-fill::before { content: "\f810"; }
.bi-r-square::before { content: "\f811"; }
.bi-repeat-1::before { content: "\f812"; }
.bi-repeat::before { content: "\f813"; }
.bi-rewind-btn-fill::before { content: "\f814"; }
.bi-rewind-btn::before { content: "\f815"; }
.bi-rewind-circle-fill::before { content: "\f816"; }
.bi-rewind-circle::before { content: "\f817"; }
.bi-rewind-fill::before { content: "\f818"; }
.bi-rewind::before { content: "\f819"; }
.bi-train-freight-front-fill::before { content: "\f81a"; }
.bi-train-freight-front::before { content: "\f81b"; }
.bi-train-front-fill::before { content: "\f81c"; }
.bi-train-front::before { content: "\f81d"; }
.bi-train-lightrail-front-fill::before { content: "\f81e"; }
.bi-train-lightrail-front::before { content: "\f81f"; }
.bi-truck-front-fill::before { content: "\f820"; }
.bi-truck-front::before { content: "\f821"; }
.bi-ubuntu::before { content: "\f822"; }
.bi-unindent::before { content: "\f823"; }
.bi-unity::before { content: "\f824"; }
.bi-universal-access-circle::before { content: "\f825"; }
.bi-universal-access::before { content: "\f826"; }
.bi-virus::before { content: "\f827"; }
.bi-virus2::before { content: "\f828"; }
.bi-wechat::before { content: "\f829"; }
.bi-yelp::before { content: "\f82a"; }
.bi-sign-stop-fill::before { content: "\f82b"; }
.bi-sign-stop-lights-fill::before { content: "\f82c"; }
.bi-sign-stop-lights::before { content: "\f82d"; }
.bi-sign-stop::before { content: "\f82e"; }
.bi-sign-turn-left-fill::before { content: "\f82f"; }
.bi-sign-turn-left::before { content: "\f830"; }
.bi-sign-turn-right-fill::before { content: "\f831"; }
.bi-sign-turn-right::before { content: "\f832"; }
.bi-sign-turn-slight-left-fill::before { content: "\f833"; }
.bi-sign-turn-slight-left::before { content: "\f834"; }
.bi-sign-turn-slight-right-fill::before { content: "\f835"; }
.bi-sign-turn-slight-right::before { content: "\f836"; }
.bi-sign-yield-fill::before { content: "\f837"; }
.bi-sign-yield::before { content: "\f838"; }
.bi-ev-station-fill::before { content: "\f839"; }
.bi-ev-station::before { content: "\f83a"; }
.bi-fuel-pump-diesel-fill::before { content: "\f83b"; }
.bi-fuel-pump-diesel::before { content: "\f83c"; }
.bi-fuel-pump-fill::before { content: "\f83d"; }
.bi-fuel-pump::before { content: "\f83e"; }
.bi-0-circle-fill::before { content: "\f83f"; }
.bi-0-circle::before { content: "\f840"; }
.bi-0-square-fill::before { content: "\f841"; }
.bi-0-square::before { content: "\f842"; }
.bi-rocket-fill::before { content: "\f843"; }
.bi-rocket-takeoff-fill::before { content: "\f844"; }
.bi-rocket-takeoff::before { content: "\f845"; }
.bi-rocket::before { content: "\f846"; }
.bi-stripe::before { content: "\f847"; }
.bi-subscript::before { content: "\f848"; }
.bi-superscript::before { content: "\f849"; }
.bi-trello::before { content: "\f84a"; }
.bi-envelope-at-fill::before { content: "\f84b"; }
.bi-envelope-at::before { content: "\f84c"; }
.bi-regex::before { content: "\f84d"; }
.bi-text-wrap::before { content: "\f84e"; }
.bi-sign-dead-end-fill::before { content: "\f84f"; }
.bi-sign-dead-end::before { content: "\f850"; }
.bi-sign-do-not-enter-fill::before { content: "\f851"; }
.bi-sign-do-not-enter::before { content: "\f852"; }
.bi-sign-intersection-fill::before { content: "\f853"; }
.bi-sign-intersection-side-fill::before { content: "\f854"; }
.bi-sign-intersection-side::before { content: "\f855"; }
.bi-sign-intersection-t-fill::before { content: "\f856"; }
.bi-sign-intersection-t::before { content: "\f857"; }
.bi-sign-intersection-y-fill::before { content: "\f858"; }
.bi-sign-intersection-y::before { content: "\f859"; }
.bi-sign-intersection::before { content: "\f85a"; }
.bi-sign-merge-left-fill::before { content: "\f85b"; }
.bi-sign-merge-left::before { content: "\f85c"; }
.bi-sign-merge-right-fill::before { content: "\f85d"; }
.bi-sign-merge-right::before { content: "\f85e"; }
.bi-sign-no-left-turn-fill::before { content: "\f85f"; }
.bi-sign-no-left-turn::before { content: "\f860"; }
.bi-sign-no-parking-fill::before { content: "\f861"; }
.bi-sign-no-parking::before { content: "\f862"; }
.bi-sign-no-right-turn-fill::before { content: "\f863"; }
.bi-sign-no-right-turn::before { content: "\f864"; }
.bi-sign-railroad-fill::before { content: "\f865"; }
.bi-sign-railroad::before { content: "\f866"; }
.bi-building-add::before { content: "\f867"; }
.bi-building-check::before { content: "\f868"; }
.bi-building-dash::before { content: "\f869"; }
.bi-building-down::before { content: "\f86a"; }
.bi-building-exclamation::before { content: "\f86b"; }
.bi-building-fill-add::before { content: "\f86c"; }
.bi-building-fill-check::before { content: "\f86d"; }
.bi-building-fill-dash::before { content: "\f86e"; }
.bi-building-fill-down::before { content: "\f86f"; }
.bi-building-fill-exclamation::before { content: "\f870"; }
.bi-building-fill-gear::before { content: "\f871"; }
.bi-building-fill-lock::before { content: "\f872"; }
.bi-building-fill-slash::before { content: "\f873"; }
.bi-building-fill-up::before { content: "\f874"; }
.bi-building-fill-x::before { content: "\f875"; }
.bi-building-fill::before { content: "\f876"; }
.bi-building-gear::before { content: "\f877"; }
.bi-building-lock::before { content: "\f878"; }
.bi-building-slash::before { content: "\f879"; }
.bi-building-up::before { content: "\f87a"; }
.bi-building-x::before { content: "\f87b"; }
.bi-buildings-fill::before { content: "\f87c"; }
.bi-buildings::before { content: "\f87d"; }
.bi-bus-front-fill::before { content: "\f87e"; }
.bi-bus-front::before { content: "\f87f"; }
.bi-ev-front-fill::before { content: "\f880"; }
.bi-ev-front::before { content: "\f881"; }
.bi-globe-americas::before { content: "\f882"; }
.bi-globe-asia-australia::before { content: "\f883"; }
.bi-globe-central-south-asia::before { content: "\f884"; }
.bi-globe-europe-africa::before { content: "\f885"; }
.bi-house-add-fill::before { content: "\f886"; }
.bi-house-add::before { content: "\f887"; }
.bi-house-check-fill::before { content: "\f888"; }
.bi-house-check::before { content: "\f889"; }
.bi-house-dash-fill::before { content: "\f88a"; }
.bi-house-dash::before { content: "\f88b"; }
.bi-house-down-fill::before { content: "\f88c"; }
.bi-house-down::before { content: "\f88d"; }
.bi-house-exclamation-fill::before { content: "\f88e"; }
.bi-house-exclamation::before { content: "\f88f"; }
.bi-house-gear-fill::before { content: "\f890"; }
.bi-house-gear::before { content: "\f891"; }
.bi-house-lock-fill::before { content: "\f892"; }
.bi-house-lock::before { content: "\f893"; }
.bi-house-slash-fill::before { content: "\f894"; }
.bi-house-slash::before { content: "\f895"; }
.bi-house-up-fill::before { content: "\f896"; }
.bi-house-up::before { content: "\f897"; }
.bi-house-x-fill::before { content: "\f898"; }
.bi-house-x::before { content: "\f899"; }
.bi-person-add::before { content: "\f89a"; }
.bi-person-down::before { content: "\f89b"; }
.bi-person-exclamation::before { content: "\f89c"; }
.bi-person-fill-add::before { content: "\f89d"; }
.bi-person-fill-check::before { content: "\f89e"; }
.bi-person-fill-dash::before { content: "\f89f"; }
.bi-person-fill-down::before { content: "\f8a0"; }
.bi-person-fill-exclamation::before { content: "\f8a1"; }
.bi-person-fill-gear::before { content: "\f8a2"; }
.bi-person-fill-lock::before { content: "\f8a3"; }
.bi-person-fill-slash::before { content: "\f8a4"; }
.bi-person-fill-up::before { content: "\f8a5"; }
.bi-person-fill-x::before { content: "\f8a6"; }
.bi-person-gear::before { content: "\f8a7"; }
.bi-person-lock::before { content: "\f8a8"; }
.bi-person-slash::before { content: "\f8a9"; }
.bi-person-up::before { content: "\f8aa"; }
.bi-scooter::before { content: "\f8ab"; }
.bi-taxi-front-fill::before { content: "\f8ac"; }
.bi-taxi-front::before { content: "\f8ad"; }
.bi-amd::before { content: "\f8ae"; }
.bi-database-add::before { content: "\f8af"; }
.bi-database-check::before { content: "\f8b0"; }
.bi-database-dash::before { content: "\f8b1"; }
.bi-database-down::before { content: "\f8b2"; }
.bi-database-exclamation::before { content: "\f8b3"; }
.bi-database-fill-add::before { content: "\f8b4"; }
.bi-database-fill-check::before { content: "\f8b5"; }
.bi-database-fill-dash::before { content: "\f8b6"; }
.bi-database-fill-down::before { content: "\f8b7"; }
.bi-database-fill-exclamation::before { content: "\f8b8"; }
.bi-database-fill-gear::before { content: "\f8b9"; }
.bi-database-fill-lock::before { content: "\f8ba"; }
.bi-database-fill-slash::before { content: "\f8bb"; }
.bi-database-fill-up::before { content: "\f8bc"; }
.bi-database-fill-x::before { content: "\f8bd"; }
.bi-database-fill::before { content: "\f8be"; }
.bi-database-gear::before { content: "\f8bf"; }
.bi-database-lock::before { content: "\f8c0"; }
.bi-database-slash::before { content: "\f8c1"; }
.bi-database-up::before { content: "\f8c2"; }
.bi-database-x::before { content: "\f8c3"; }
.bi-database::before { content: "\f8c4"; }
.bi-houses-fill::before { content: "\f8c5"; }
.bi-houses::before { content: "\f8c6"; }
.bi-nvidia::before { content: "\f8c7"; }
.bi-person-vcard-fill::before { content: "\f8c8"; }
.bi-person-vcard::before { content: "\f8c9"; }
.bi-sina-weibo::before { content: "\f8ca"; }
.bi-tencent-qq::before { content: "\f8cb"; }
.bi-wikipedia::before { content: "\f8cc"; }

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,7 +1,7 @@
<script>
import { init, convert2uplot } from './utils.js'
import { getContext, onMount } from 'svelte'
import { queryStore, gql, getContextClient } from '@urql/svelte'
import { queryStore, gql, getContextClient, mutationStore } from '@urql/svelte'
import { Row, Col, Spinner, Card, Table, Icon } from 'sveltestrap'
import Filters from './filters/Filters.svelte'
import PlotSelection from './PlotSelection.svelte'
@ -10,7 +10,7 @@
import { binsFromFootprint } from './utils.js'
import ScatterPlot from './plots/Scatter.svelte'
import PlotTable from './PlotTable.svelte'
import Roofline from './plots/Roofline.svelte'
import RooflineHeatmap from './plots/RooflineHeatmap.svelte'
const { query: initq } = init()
@ -42,6 +42,20 @@
$: metrics = [...new Set([...metricsInHistograms, ...metricsInScatterplots.flat()])]
const sortOptions = [
{key: 'totalWalltime', label: 'Walltime'},
{key: 'totalNodeHours', label: 'Node Hours'},
{key: 'totalCoreHours', label: 'Core Hours'},
{key: 'totalAccHours', label: 'Accelerator Hours'}
]
const groupOptions = [
{key: 'user', label: 'User Name'},
{key: 'project', label: 'Project ID'}
]
let sortSelection = sortOptions.find((option) => option.key == ccconfig[`analysis_view_selectedTopCategory:${filterPresets.cluster}`]) || sortOptions.find((option) => option.key == ccconfig.analysis_view_selectedTopCategory)
let groupSelection = groupOptions.find((option) => option.key == ccconfig[`analysis_view_selectedTopEntity:${filterPresets.cluster}`]) || groupOptions.find((option) => option.key == ccconfig.analysis_view_selectedTopEntity)
getContext('on-init')(({ data }) => {
if (data != null) {
cluster = data.clusters.find(c => c.name == filterPresets.cluster)
@ -62,23 +76,39 @@
totalJobs
shortJobs
totalWalltime
totalNodeHours
totalCoreHours
totalAccHours
histDuration { count, value }
histNumNodes { count, value }
histNumCores { count, value }
}
topUsers: jobsCount(filter: $jobFilters, groupBy: USER, weight: NODE_HOURS, limit: 5) { name, count }
}
`,
variables: { jobFilters }
})
$: topQuery = queryStore({
client: client,
query: gql`
query($jobFilters: [JobFilter!]!, $paging: PageRequest!, $sortBy: SortByAggregate!, $groupBy: Aggregate!) {
topList: jobsStatistics(filter: $jobFilters, page: $paging, sortBy: $sortBy, groupBy: $groupBy) {
id
totalWalltime
totalNodeHours
totalCoreHours
totalAccHours
}
}
`,
variables: { jobFilters, paging: { itemsPerPage: 10, page: 1 }, sortBy: sortSelection.key.toUpperCase(), groupBy: groupSelection.key.toUpperCase() }
})
$: footprintsQuery = queryStore({
client: client,
query: gql`
query($jobFilters: [JobFilter!]!, $metrics: [String!]!) {
footprints: jobsFootprints(filter: $jobFilters, metrics: $metrics) {
nodehours,
timeWeights { nodeHours, accHours, coreHours },
metrics { metric, data }
}
}`,
@ -97,6 +127,53 @@
variables: { jobFilters, rows: 50, cols: 50, minX: 0.01, minY: 1., maxX: 1000., maxY }
})
const updateConfigurationMutation = ({ name, value }) => {
return mutationStore({
client: client,
query: gql`
mutation ($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}
`,
variables: { name, value }
});
}
function updateEntityConfiguration(select) {
if (ccconfig[`analysis_view_selectedTopEntity:${filterPresets.cluster}`] != select) {
updateConfigurationMutation({ name: `analysis_view_selectedTopEntity:${filterPresets.cluster}`, value: JSON.stringify(select) })
.subscribe(res => {
if (res.fetching === false && !res.error) {
// console.log(`analysis_view_selectedTopEntity:${filterPresets.cluster}` + ' -> Updated!')
} else if (res.fetching === false && res.error) {
throw res.error
}
})
} else {
// console.log('No Mutation Required: Entity')
}
};
function updateCategoryConfiguration(select) {
if (ccconfig[`analysis_view_selectedTopCategory:${filterPresets.cluster}`] != select) {
updateConfigurationMutation({ name: `analysis_view_selectedTopCategory:${filterPresets.cluster}`, value: JSON.stringify(select) })
.subscribe(res => {
if (res.fetching === false && !res.error) {
// console.log(`analysis_view_selectedTopCategory:${filterPresets.cluster}` + ' -> Updated!')
} else if (res.fetching === false && res.error) {
throw res.error
}
})
} else {
// console.log('No Mutation Required: Category')
}
};
$: updateEntityConfiguration(groupSelection.key)
$: updateCategoryConfiguration(sortSelection.key)
onMount(() => filterComponent.update())
</script>
@ -151,36 +228,82 @@
<th scope="col">Total Walltime</th>
<td>{$statsQuery.data.stats[0].totalWalltime}</td>
</tr>
<tr>
<th scope="col">Total Node Hours</th>
<td>{$statsQuery.data.stats[0].totalNodeHours}</td>
</tr>
<tr>
<th scope="col">Total Core Hours</th>
<td>{$statsQuery.data.stats[0].totalCoreHours}</td>
</tr>
<tr>
<th scope="col">Total Accelerator Hours</th>
<td>{$statsQuery.data.stats[0].totalAccHours}</td>
</tr>
</Table>
</Col>
<Col>
<div bind:clientWidth={colWidth1}>
<h5>Top Users</h5>
{#key $statsQuery.data.topUsers}
<Pie
size={colWidth1}
sliceLabel='Hours'
quantities={$statsQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.count)}
entities={$statsQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)}
/>
<h5>Top
<select class="p-0" bind:value={groupSelection}>
{#each groupOptions as option}
<option value={option}>
{option.key.charAt(0).toUpperCase() + option.key.slice(1)}s
</option>
{/each}
</select>
</h5>
{#key $topQuery.data}
{#if $topQuery.fetching}
<Spinner/>
{:else if $topQuery.error}
<Card body color="danger">{$topQuery.error.message}</Card>
{:else}
<Pie
size={colWidth1}
sliceLabel={sortSelection.label}
quantities={$topQuery.data.topList.map((t) => t[sortSelection.key])}
entities={$topQuery.data.topList.map((t) => t.id)}
/>
{/if}
{/key}
</div>
</Col>
<Col>
<Table>
<tr class="mb-2"><th>Legend</th><th>User Name</th><th>Node Hours</th></tr>
{#each $statsQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i}
<tr>
<td><Icon name="circle-fill" style="color: {colors[i]};"/></td>
<th scope="col"><a href="/monitoring/user/{name}?cluster={cluster.name}">{name}</a></th>
<td>{count}</td>
</tr>
{/each}
</Table>
{#key $topQuery.data}
{#if $topQuery.fetching}
<Spinner/>
{:else if $topQuery.error}
<Card body color="danger">{$topQuery.error.message}</Card>
{:else}
<Table>
<tr class="mb-2">
<th>Legend</th>
<th>{groupSelection.label}</th>
<th>
<select class="p-0" bind:value={sortSelection}>
{#each sortOptions as option}
<option value={option}>
{option.label}
</option>
{/each}
</select>
</th>
</tr>
{#each $topQuery.data.topList as te, i}
<tr>
<td><Icon name="circle-fill" style="color: {colors[i]};"/></td>
{#if groupSelection.key == 'user'}
<th scope="col"><a href="/monitoring/user/{te.id}?cluster={cluster.name}">{te.id}</a></th>
{:else}
<th scope="col"><a href="/monitoring/jobs/?cluster={cluster.name}&project={te.id}&projectMatch=eq">{te.id}</a></th>
{/if}
<td>{te[sortSelection.key]}</td>
</tr>
{/each}
</Table>
{/if}
{/key}
</Col>
</Row>
<Row cols={3} class="mb-2">
@ -192,7 +315,7 @@
{:else if $rooflineQuery.data && cluster}
<div bind:clientWidth={colWidth2}>
{#key $rooflineQuery.data}
<Roofline
<RooflineHeatmap
width={colWidth2} height={300}
tiles={$rooflineQuery.data.rooflineHeatmap}
cluster={cluster.subClusters.length == 1 ? cluster.subClusters[0] : null}
@ -217,13 +340,13 @@
</Col>
<Col>
<div bind:clientWidth={colWidth4}>
{#key $statsQuery.data.stats[0].histNumNodes}
{#key $statsQuery.data.stats[0].histNumCores}
<Histogram
width={colWidth4} height={300}
data={convert2uplot($statsQuery.data.stats[0].histNumNodes)}
title="Number of Nodes Distribution"
xlabel="Allocated Nodes"
xunit="Nodes"
data={convert2uplot($statsQuery.data.stats[0].histNumCores)}
title="Number of Cores Distribution"
xlabel="Allocated Cores"
xunit="Cores"
ylabel="Number of Jobs"
yunit="Jobs"/>
{/key}
@ -244,8 +367,9 @@
<Row>
<Col>
<Card body>
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours.
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
These histograms show the distribution of the averages of all jobs matching the filters. Each job/average is weighted by its node hours by default
(Accelerator hours for native accelerator scope metrics, coreHours for native core scope metrics).
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
</Card>
<br/>
</Col>
@ -257,7 +381,8 @@
let:width
renderFor="analysis"
items={metricsInHistograms.map(metric => ({ metric, ...binsFromFootprint(
$footprintsQuery.data.footprints.nodehours,
$footprintsQuery.data.footprints.timeWeights,
metricConfig(cluster.name, metric)?.scope,
$footprintsQuery.data.footprints.metrics.find(f => f.metric == metric).data, numBins) }))}
itemsPerRow={ccconfig.plot_view_plotsPerRow}>
@ -265,11 +390,11 @@
data={convert2uplot(item.bins)}
width={width} height={250}
title="Average Distribution of '{item.metric}'"
xlabel={`${item.metric} average [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
xlabel={`${item.metric} bin maximum [${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}]`}
xunit={`${(metricConfig(cluster.name, item.metric)?.unit?.prefix ? metricConfig(cluster.name, item.metric)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.metric)?.unit?.base ? metricConfig(cluster.name, item.metric)?.unit?.base : '')}`}
ylabel="Node Hours"
ylabel="Normalized Hours"
yunit="Hours"/>
</PlotTable>
</Col>
@ -279,7 +404,7 @@
<Col>
<Card body>
Each circle represents one job. The size of a circle is proportional to its node hours. Darker circles mean multiple jobs have the same averages for the respective metrics.
Note that some metrics could be disabled for specific subclusters as per metriConfig and thus could affect shown average values.
Note that some metrics could be disabled for specific subclusters as per metricConfig and thus could affect shown average values.
</Card>
<br/>
</Col>
@ -301,7 +426,7 @@
(metricConfig(cluster.name, item.m1)?.unit?.base ? metricConfig(cluster.name, item.m1)?.unit?.base : '')}]`}
yLabel={`${item.m2} [${(metricConfig(cluster.name, item.m2)?.unit?.prefix ? metricConfig(cluster.name, item.m2)?.unit?.prefix : '') +
(metricConfig(cluster.name, item.m2)?.unit?.base ? metricConfig(cluster.name, item.m2)?.unit?.base : '')}]`}
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.nodehours} />
X={item.f1} Y={item.f2} S={$footprintsQuery.data.footprints.timeWeights.nodeHours} />
</PlotTable>
</Col>
</Row>

View File

@ -1,110 +1,178 @@
<script>
import { Icon, Button, InputGroup, Input, Collapse,
Navbar, NavbarBrand, Nav, NavItem, NavLink, NavbarToggler,
Dropdown, DropdownToggle, DropdownMenu, DropdownItem, InputGroupText } from 'sveltestrap'
import {
Icon,
Collapse,
Navbar,
NavbarBrand,
Nav,
NavbarToggler,
Dropdown,
DropdownToggle,
DropdownMenu,
} from "sveltestrap";
import NavbarLinks from "./NavbarLinks.svelte";
import NavbarTools from "./NavbarTools.svelte";
export let username // empty string if auth. is disabled, otherwise the username as string
export let authlevel // Integer
export let clusters // array of names
export let roles // Role Enum-Like
export let username; // empty string if auth. is disabled, otherwise the username as string
export let authlevel; // Integer
export let clusters; // array of names
export let roles; // Role Enum-Like
let isOpen = false
let isOpen = false;
let screenSize;
const userviews = [
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
{ title: `Job Search`, href: '/monitoring/jobs/', icon: 'card-list' },
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
]
const jobsTitle = new Map();
jobsTitle.set(2, "Job Search");
jobsTitle.set(3, "Managed Jobs");
jobsTitle.set(4, "Jobs");
jobsTitle.set(5, "Jobs");
const usersTitle = new Map();
usersTitle.set(3, "Managed Users");
usersTitle.set(4, "Users");
usersTitle.set(5, "Users");
const managerviews = [
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
{ title: `Managed Jobs`, href: '/monitoring/jobs/', icon: 'card-list' },
{ title: `Managed Users`, href: '/monitoring/users/', icon: 'people-fill' },
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
]
const supportviews = [
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
{ title: 'Jobs', href: '/monitoring/jobs/', icon: 'card-list' },
{ title: 'Users', href: '/monitoring/users/', icon: 'people-fill' },
{ title: 'Projects', href: '/monitoring/projects/', icon: 'folder' },
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
]
const adminviews = [
{ title: 'My Jobs', href: `/monitoring/user/${username}`, icon: 'bar-chart-line-fill' },
{ title: 'Jobs', href: '/monitoring/jobs/', icon: 'card-list' },
{ title: 'Users', href: '/monitoring/users/', icon: 'people-fill' },
{ title: 'Projects', href: '/monitoring/projects/', icon: 'folder' },
{ title: 'Tags', href: '/monitoring/tags/', icon: 'tags' }
]
const viewsPerCluster = [
{ title: 'Analysis', requiredRole: roles.support, href: '/monitoring/analysis/', icon: 'graph-up' },
{ title: 'Systems', requiredRole: roles.admin, href: '/monitoring/systems/', icon: 'cpu' },
{ title: 'Status', requiredRole: roles.admin, href: '/monitoring/status/', icon: 'cpu' },
]
const views = [
{
title: "My Jobs",
requiredRole: roles.user,
href: `/monitoring/user/${username}`,
icon: "bar-chart-line-fill",
perCluster: false,
menu: "none",
},
{
title: jobsTitle.get(authlevel),
requiredRole: roles.user,
href: `/monitoring/jobs/`,
icon: "card-list",
perCluster: false,
menu: "none",
},
{
title: usersTitle.get(authlevel),
requiredRole: roles.manager,
href: "/monitoring/users/",
icon: "people-fill",
perCluster: false,
menu: "Groups",
},
{
title: "Projects",
requiredRole: roles.support,
href: "/monitoring/projects/",
icon: "folder",
perCluster: false,
menu: "Groups",
},
{
title: "Tags",
requiredRole: roles.user,
href: "/monitoring/tags/",
icon: "tags",
perCluster: false,
menu: "Groups",
},
{
title: "Analysis",
requiredRole: roles.support,
href: "/monitoring/analysis/",
icon: "graph-up",
perCluster: true,
menu: "Stats",
},
{
title: "Nodes",
requiredRole: roles.admin,
href: "/monitoring/systems/",
icon: "cpu",
perCluster: true,
menu: "Groups",
},
{
title: "Status",
requiredRole: roles.admin,
href: "/monitoring/status/",
icon: "cpu",
perCluster: true,
menu: "Stats",
},
];
</script>
<Navbar color="light" light expand="lg" fixed="top">
<svelte:window bind:innerWidth={screenSize} />
<Navbar color="light" light expand="md" fixed="top">
<NavbarBrand href="/">
<img alt="ClusterCockpit Logo" src="/img/logo.png" height="25rem">
<img alt="ClusterCockpit Logo" src="/img/logo.png" height="25rem" />
</NavbarBrand>
<NavbarToggler on:click={() => (isOpen = !isOpen)} />
<Collapse {isOpen} navbar expand="lg" on:update={({ detail }) => (isOpen = detail.isOpen)}>
<Nav pills>
{#if authlevel == roles.admin}
{#each adminviews as item}
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
{/each}
{:else if authlevel == roles.support}
{#each supportviews as item}
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
{/each}
{:else if authlevel == roles.manager}
{#each managerviews as item}
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
{/each}
{:else} <!-- Compatibility: Handle "user role" or "no role" as identical-->
{#each userviews as item}
<NavLink href={item.href} active={window.location.pathname == item.href}><Icon name={item.icon}/> {item.title}</NavLink>
{/each}
{/if}
{#each viewsPerCluster.filter(item => item.requiredRole <= authlevel) as item}
<NavItem>
<Dropdown nav inNavbar>
<Collapse
style="justify-content: space-between"
{isOpen}
navbar
expand="md"
on:update={({ detail }) => (isOpen = detail.isOpen)}
>
<Nav navbar>
{#if screenSize > 1500 || screenSize < 768}
<NavbarLinks
{clusters}
links={views.filter(
(item) => item.requiredRole <= authlevel
)}
/>
{:else if screenSize > 1300}
<NavbarLinks
{clusters}
links={views.filter(
(item) =>
item.requiredRole <= authlevel &&
item.menu != "Stats"
)}
/>
<Dropdown nav>
<DropdownToggle nav caret>
<Icon name="graph-up" />
Stats
</DropdownToggle>
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
links={views.filter(
(item) =>
item.requiredRole <= authlevel &&
item.menu == "Stats"
)}
/>
</DropdownMenu>
</Dropdown>
{:else}
<NavbarLinks
{clusters}
links={views.filter(
(item) =>
item.requiredRole <= authlevel &&
item.menu == "none"
)}
/>
{#each Array("Groups", "Stats") as menu}
<Dropdown nav>
<DropdownToggle nav caret>
<Icon name={item.icon}/> {item.title}
{menu}
</DropdownToggle>
<DropdownMenu>
{#each clusters as cluster}
<DropdownItem href={item.href + cluster.name} active={window.location.pathname == item.href + cluster.name}>
{cluster.name}
</DropdownItem>
{/each}
<DropdownMenu class="dropdown-menu-lg-end">
<NavbarLinks
{clusters}
links={views.filter(
(item) =>
item.requiredRole <= authlevel &&
item.menu == menu
)}
/>
</DropdownMenu>
</Dropdown>
</NavItem>
{/each}
{/each}
{/if}
</Nav>
<NavbarTools {username} {authlevel} {roles} {screenSize} />
</Collapse>
<div class="d-flex">
<form method="GET" action="/search">
<InputGroup>
<Input type="text" placeholder="Search 'type:<query>' ..." name="searchId"/>
<Button outline type="submit"><Icon name="search"/></Button>
<InputGroupText style="cursor:help;" title={(authlevel >= roles.support) ? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | arrayJobId | username | name" : "Example: 'jobName:myjob', Types are jobId | jobName | projectId | arrayJobId "}><Icon name="info-circle"/></InputGroupText>
</InputGroup>
</form>
{#if username}
<form method="POST" action="/logout">
<Button outline color="success" type="submit" style="margin-left: 10px;">
<Icon name="box-arrow-right"/> Logout {username}
</Button>
</form>
{/if}
<Button outline on:click={() => window.location.href = '/config'} style="margin-left: 10px;">
<Icon name="gear"/>
</Button>
</div>
</Navbar>

View File

@ -4,6 +4,7 @@
groupByScope,
fetchMetricsStore,
checkMetricDisabled,
transformDataForRoofline
} from "./utils.js";
import {
Row,
@ -131,7 +132,6 @@
let plots = {},
jobTags,
fullWidth,
statsTable;
$: document.title = $initq.fetching
? "Loading..."
@ -190,7 +190,6 @@
}));
</script>
<div class="row" bind:clientWidth={fullWidth} />
<Row>
<Col>
{#if $initq.error}
@ -245,7 +244,6 @@
{/if}
<Col>
<Polar
size={fullWidth / 4.1}
metrics={ccconfig[
`job_view_polarPlotMetrics:${$initq.data.job.cluster}`
] || ccconfig[`job_view_polarPlotMetrics`]}
@ -255,19 +253,18 @@
</Col>
<Col>
<Roofline
width={fullWidth / 3 - 10}
height={fullWidth / 5}
renderTime={true}
cluster={clusters
.find((c) => c.name == $initq.data.job.cluster)
.subClusters.find(
(sc) => sc.name == $initq.data.job.subCluster
)}
flopsAny={$jobMetrics.data.jobMetrics.find(
(m) => m.name == "flops_any" && m.scope == "node"
)}
memBw={$jobMetrics.data.jobMetrics.find(
(m) => m.name == "mem_bw" && m.scope == "node"
)}
data={
transformDataForRoofline (
$jobMetrics.data.jobMetrics.find((m) => m.name == "flops_any" && m.scope == "node").metric,
$jobMetrics.data.jobMetrics.find((m) => m.name == "mem_bw" && m.scope == "node").metric
)
}
/>
</Col>
{:else}
@ -275,8 +272,7 @@
<Col />
{/if}
</Row>
<br />
<Row>
<Row class="mb-3">
<Col xs="auto">
{#if $initq.data}
<TagManagement job={$initq.data.job} bind:jobTags />
@ -293,7 +289,6 @@
<Zoom timeseriesPlots={plots} />
</Col> -->
</Row>
<br />
<Row>
<Col>
{#if $jobMetrics.error}
@ -340,8 +335,7 @@
{/if}
</Col>
</Row>
<br />
<Row>
<Row class="mt-2">
<Col>
{#if $initq.data}
<TabContent>

View File

@ -0,0 +1,39 @@
<script>
import {
Icon,
NavLink,
Dropdown,
DropdownToggle,
DropdownMenu,
DropdownItem,
} from "sveltestrap";
export let clusters; // array of names
export let links; // array of nav links
</script>
{#each links as item}
{#if !item.perCluster}
<NavLink href={item.href} active={window.location.pathname == item.href}
><Icon name={item.icon} /> {item.title}</NavLink
>
{:else}
<Dropdown nav inNavbar>
<DropdownToggle nav caret>
<Icon name={item.icon} />
{item.title}
</DropdownToggle>
<DropdownMenu class="dropdown-menu-lg-end">
{#each clusters as cluster}
<DropdownItem
href={item.href + cluster.name}
active={window.location.pathname ==
item.href + cluster.name}
>
{cluster.name}
</DropdownItem>
{/each}
</DropdownMenu>
</Dropdown>
{/if}
{/each}

View File

@ -0,0 +1,127 @@
<script>
import {
Icon,
Nav,
NavItem,
InputGroup,
Input,
Button,
InputGroupText,
Container,
Row,
Col,
} from "sveltestrap";
export let username; // empty string if auth. is disabled, otherwise the username as string
export let authlevel; // Integer
export let roles; // Role Enum-Like
export let screenSize; // screensize
</script>
<Nav navbar>
{#if screenSize >= 768}
<NavItem>
<form method="GET" action="/search">
<InputGroup>
<Input
type="text"
placeholder="Search 'type:<query>' ..."
name="searchId"
style="margin-left: 10px;"
/>
<!-- bootstrap classes w/o effect -->
<Button outline type="submit"><Icon name="search" /></Button
>
<InputGroupText
style="cursor:help;"
title={authlevel >= roles.support
? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | arrayJobId | username | name"
: "Example: 'jobName:myjob', Types are jobId | jobName | projectId | arrayJobId "}
><Icon name="info-circle" /></InputGroupText
>
</InputGroup>
</form>
</NavItem>
{#if username}
<NavItem>
<form method="POST" action="/logout">
<Button
outline
color="success"
type="submit"
style="margin-left: 10px;"
>
{#if screenSize > 1630}
<Icon name="box-arrow-right" /> Logout {username}
{:else}
<Icon name="box-arrow-right" />
{/if}
</Button>
</form>
</NavItem>
{/if}
<NavItem>
<Button
outline
on:click={() => (window.location.href = "/config")}
style="margin-left: 10px;"
>
<Icon name="gear" />
</Button>
</NavItem>
{:else}
<NavItem>
<Container>
<Row cols={2}>
<Col xs="6">
<form method="POST" action="/logout">
<Button
outline
color="success"
type="submit"
size="sm"
class="my-2 w-100"
>
<Icon name="box-arrow-right" /> Logout {username}
</Button>
</form>
</Col>
<Col xs="6">
<Button
outline
on:click={() => (window.location.href = "/config")}
size="sm"
class="my-2 w-100"
>
{#if authlevel >= roles.admin}
<Icon name="gear" /> Admin Settings
{:else}
<Icon name="gear" /> Plotting Options
{/if}
</Button>
</Col>
</Row>
</Container>
</NavItem>
<NavItem style="margin-left: 10px; margin-right:10px;">
<form method="GET" action="/search">
<InputGroup>
<Input
type="text"
placeholder="Search 'type:<query>' ..."
name="searchId"
/>
<Button outline type="submit"><Icon name="search" /></Button
>
<InputGroupText
style="cursor:help;"
title={authlevel >= roles.support
? "Example: 'projectId:a100cd', Types are: jobId | jobName | projectId | arrayJobId | username | name"
: "Example: 'jobName:myjob', Types are jobId | jobName | projectId | arrayJobId "}
><Icon name="info-circle" /></InputGroupText
>
</InputGroup>
</form>
</NavItem>
{/if}
</Nav>

View File

@ -212,6 +212,7 @@
.subCluster}
series={item.metric.series}
resources={[{hostname: hostname}]}
forNode={true}
/>
{:else if item.disabled === true && item.metric}
<Card

View File

@ -1,82 +1,319 @@
<script>
import Refresher from './joblist/Refresher.svelte'
import Roofline, { transformPerNodeData } from './plots/Roofline.svelte'
import Pie, { colors } from './plots/Pie.svelte'
import Histogram from './plots/Histogram.svelte'
import { Row, Col, Spinner, Card, CardHeader, CardTitle, CardBody, Table, Progress, Icon } from 'sveltestrap'
import { init, convert2uplot } from './utils.js'
import { scaleNumbers } from './units.js'
import { queryStore, gql, getContextClient } from '@urql/svelte'
import { getContext } from "svelte";
import Refresher from "./joblist/Refresher.svelte";
import Roofline from "./plots/Roofline.svelte";
import Pie, { colors } from "./plots/Pie.svelte";
import Histogram from "./plots/Histogram.svelte";
import {
Row,
Col,
Spinner,
Card,
CardHeader,
CardTitle,
CardBody,
Table,
Progress,
Icon,
} from "sveltestrap";
import { init, convert2uplot, transformPerNodeDataForRoofline } from "./utils.js";
import { scaleNumbers } from "./units.js";
import {
queryStore,
gql,
getContextClient,
mutationStore,
} from "@urql/svelte";
const { query: initq } = init()
const { query: initq } = init();
const ccconfig = getContext("cc-config");
export let cluster
export let cluster;
let plotWidths = [], colWidth1 = 0, colWidth2
let from = new Date(Date.now() - 5 * 60 * 1000), to = new Date(Date.now())
let plotWidths = [],
colWidth1,
colWidth2
let from = new Date(Date.now() - 5 * 60 * 1000),
to = new Date(Date.now());
const topOptions = [
{ key: "totalJobs", label: "Jobs" },
{ key: "totalNodes", label: "Nodes" },
{ key: "totalCores", label: "Cores" },
{ key: "totalAccs", label: "Accelerators" },
];
let topProjectSelection =
topOptions.find(
(option) =>
option.key ==
ccconfig[`status_view_selectedTopProjectCategory:${cluster}`]
) ||
topOptions.find(
(option) =>
option.key == ccconfig.status_view_selectedTopProjectCategory
);
let topUserSelection =
topOptions.find(
(option) =>
option.key ==
ccconfig[`status_view_selectedTopUserCategory:${cluster}`]
) ||
topOptions.find(
(option) =>
option.key == ccconfig.status_view_selectedTopUserCategory
);
const client = getContextClient();
$: mainQuery = queryStore({
client: client,
query: gql`query($cluster: String!, $filter: [JobFilter!]!, $metrics: [String!], $from: Time!, $to: Time!) {
nodeMetrics(cluster: $cluster, metrics: $metrics, from: $from, to: $to) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit { base, prefix }
series { data }
query: gql`
query (
$cluster: String!
$filter: [JobFilter!]!
$metrics: [String!]
$from: Time!
$to: Time!
) {
nodeMetrics(
cluster: $cluster
metrics: $metrics
from: $from
to: $to
) {
host
subCluster
metrics {
name
scope
metric {
timestep
unit {
base
prefix
}
series {
data
}
}
}
}
stats: jobsStatistics(filter: $filter) {
histDuration {
count
value
}
histNumNodes {
count
value
}
histNumCores {
count
value
}
histNumAccs {
count
value
}
}
allocatedNodes(cluster: $cluster) {
name
count
}
}
}
`,
variables: {
cluster: cluster,
metrics: ["flops_any", "mem_bw"],
from: from.toISOString(),
to: to.toISOString(),
filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
},
});
stats: jobsStatistics(filter: $filter) {
histDuration { count, value }
histNumNodes { count, value }
}
const paging = { itemsPerPage: 10, page: 1 }; // Top 10
$: topUserQuery = queryStore({
client: client,
query: gql`
query (
$filter: [JobFilter!]!
$paging: PageRequest!
$sortBy: SortByAggregate!
) {
topUser: jobsStatistics(
filter: $filter
page: $paging
sortBy: $sortBy
groupBy: USER
) {
id
totalJobs
totalNodes
totalCores
totalAccs
}
}
`,
variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
paging,
sortBy: topUserSelection.key.toUpperCase(),
},
});
allocatedNodes(cluster: $cluster) { name, count }
topUsers: jobsCount(filter: $filter, groupBy: USER, weight: NODE_COUNT, limit: 10) { name, count }
topProjects: jobsCount(filter: $filter, groupBy: PROJECT, weight: NODE_COUNT, limit: 10) { name, count }
}`,
variables: {
cluster: cluster, metrics: ['flops_any', 'mem_bw'], from: from.toISOString(), to: to.toISOString(),
filter: [{ state: ['running'] }, { cluster: { eq: cluster } }]
}
})
$: topProjectQuery = queryStore({
client: client,
query: gql`
query (
$filter: [JobFilter!]!
$paging: PageRequest!
$sortBy: SortByAggregate!
) {
topProjects: jobsStatistics(
filter: $filter
page: $paging
sortBy: $sortBy
groupBy: PROJECT
) {
id
totalJobs
totalNodes
totalCores
totalAccs
}
}
`,
variables: {
filter: [{ state: ["running"] }, { cluster: { eq: cluster } }],
paging,
sortBy: topProjectSelection.key.toUpperCase(),
},
});
const sumUp = (data, subcluster, metric) => data.reduce((sum, node) => node.subCluster == subcluster
? sum + (node.metrics.find(m => m.name == metric)?.metric.series.reduce((sum, series) => sum + series.data[series.data.length - 1], 0) || 0)
: sum, 0)
const sumUp = (data, subcluster, metric) =>
data.reduce(
(sum, node) =>
node.subCluster == subcluster
? sum +
(node.metrics
.find((m) => m.name == metric)
?.metric.series.reduce(
(sum, series) =>
sum + series.data[series.data.length - 1],
0
) || 0)
: sum,
0
);
let allocatedNodes = {}, flopRate = {}, flopRateUnitPrefix = {}, flopRateUnitBase = {}, memBwRate = {}, memBwRateUnitPrefix = {}, memBwRateUnitBase = {}
let allocatedNodes = {},
flopRate = {},
flopRateUnitPrefix = {},
flopRateUnitBase = {},
memBwRate = {},
memBwRateUnitPrefix = {},
memBwRateUnitBase = {};
$: if ($initq.data && $mainQuery.data) {
let subClusters = $initq.data.clusters.find(c => c.name == cluster).subClusters
let subClusters = $initq.data.clusters.find(
(c) => c.name == cluster
).subClusters;
for (let subCluster of subClusters) {
allocatedNodes[subCluster.name] = $mainQuery.data.allocatedNodes.find(({ name }) => name == subCluster.name)?.count || 0
flopRate[subCluster.name] = Math.floor(sumUp($mainQuery.data.nodeMetrics, subCluster.name, 'flops_any') * 100) / 100
flopRateUnitPrefix[subCluster.name] = subCluster.flopRateSimd.unit.prefix
flopRateUnitBase[subCluster.name] = subCluster.flopRateSimd.unit.base
memBwRate[subCluster.name] = Math.floor(sumUp($mainQuery.data.nodeMetrics, subCluster.name, 'mem_bw') * 100) / 100
memBwRateUnitPrefix[subCluster.name] = subCluster.memoryBandwidth.unit.prefix
memBwRateUnitBase[subCluster.name] = subCluster.memoryBandwidth.unit.base
allocatedNodes[subCluster.name] =
$mainQuery.data.allocatedNodes.find(
({ name }) => name == subCluster.name
)?.count || 0;
flopRate[subCluster.name] =
Math.floor(
sumUp(
$mainQuery.data.nodeMetrics,
subCluster.name,
"flops_any"
) * 100
) / 100;
flopRateUnitPrefix[subCluster.name] =
subCluster.flopRateSimd.unit.prefix;
flopRateUnitBase[subCluster.name] =
subCluster.flopRateSimd.unit.base;
memBwRate[subCluster.name] =
Math.floor(
sumUp(
$mainQuery.data.nodeMetrics,
subCluster.name,
"mem_bw"
) * 100
) / 100;
memBwRateUnitPrefix[subCluster.name] =
subCluster.memoryBandwidth.unit.prefix;
memBwRateUnitBase[subCluster.name] =
subCluster.memoryBandwidth.unit.base;
}
}
const updateConfigurationMutation = ({ name, value }) => {
return mutationStore({
client: client,
query: gql`
mutation ($name: String!, $value: String!) {
updateConfiguration(name: $name, value: $value)
}
`,
variables: { name, value },
});
};
function updateTopUserConfiguration(select) {
if (
ccconfig[`status_view_selectedTopUserCategory:${cluster}`] != select
) {
updateConfigurationMutation({
name: `status_view_selectedTopUserCategory:${cluster}`,
value: JSON.stringify(select),
}).subscribe((res) => {
if (res.fetching === false && !res.error) {
// console.log(`status_view_selectedTopUserCategory:${cluster}` + ' -> Updated!')
} else if (res.fetching === false && res.error) {
throw res.error;
}
});
} else {
// console.log('No Mutation Required: Top User')
}
}
function updateTopProjectConfiguration(select) {
if (
ccconfig[`status_view_selectedTopProjectCategory:${cluster}`] !=
select
) {
updateConfigurationMutation({
name: `status_view_selectedTopProjectCategory:${cluster}`,
value: JSON.stringify(select),
}).subscribe((res) => {
if (res.fetching === false && !res.error) {
// console.log(`status_view_selectedTopProjectCategory:${cluster}` + ' -> Updated!')
} else if (res.fetching === false && res.error) {
throw res.error;
}
});
} else {
// console.log('No Mutation Required: Top Project')
}
}
$: updateTopUserConfiguration(topUserSelection.key);
$: updateTopProjectConfiguration(topProjectSelection.key);
</script>
<!-- Loading indicator & Refresh -->
<Row>
<Col xs="auto" style="align-self: flex-end;">
<h4 class="mb-0" >Current utilization of cluster "{cluster}"</h4>
<h4 class="mb-0">Current utilization of cluster "{cluster}"</h4>
</Col>
<Col xs="auto">
{#if $initq.fetching || $mainQuery.fetching}
<Spinner/>
<Spinner />
{:else if $initq.error}
<Card body color="danger">{$initq.error.message}</Card>
{:else}
@ -84,10 +321,13 @@
{/if}
</Col>
<Col xs="auto" style="margin-left: auto;">
<Refresher initially={120} on:reload={() => {
from = new Date(Date.now() - 5 * 60 * 1000)
to = new Date(Date.now())
}} />
<Refresher
initially={120}
on:reload={() => {
from = new Date(Date.now() - 5 * 60 * 1000);
to = new Date(Date.now());
}}
/>
</Col>
</Row>
{#if $mainQuery.error}
@ -98,43 +338,85 @@
</Row>
{/if}
<hr>
<hr />
<!-- Gauges & Roofline per Subcluster-->
{#if $initq.data && $mainQuery.data}
{#each $initq.data.clusters.find(c => c.name == cluster).subClusters as subCluster, i}
<Row cols={2} class="mb-3 justify-content-center">
<Col xs="4" class="px-3">
{#each $initq.data.clusters.find((c) => c.name == cluster).subClusters as subCluster, i}
<Row class="mb-3 justify-content-center">
<Col md="4" class="px-3">
<Card class="h-auto mt-1">
<CardHeader>
<CardTitle class="mb-0">SubCluster "{subCluster.name}"</CardTitle>
<CardTitle class="mb-0"
>SubCluster "{subCluster.name}"</CardTitle
>
</CardHeader>
<CardBody>
<Table borderless>
<tr class="py-2">
<th scope="col">Allocated Nodes</th>
<td style="min-width: 100px;"><div class="col"><Progress value={allocatedNodes[subCluster.name]} max={subCluster.numberOfNodes}/></div></td>
<td>{allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes} Nodes</td>
<td style="min-width: 100px;"
><div class="col">
<Progress
value={allocatedNodes[
subCluster.name
]}
max={subCluster.numberOfNodes}
/>
</div></td
>
<td
>{allocatedNodes[subCluster.name]} / {subCluster.numberOfNodes}
Nodes</td
>
</tr>
<tr class="py-2">
<th scope="col">Flop Rate (Any) <Icon name="info-circle" class="p-1" style="cursor: help;" title="Flops[Any] = (Flops[Double] x 2) + Flops[Single]"/></th>
<td style="min-width: 100px;"><div class="col"><Progress value={flopRate[subCluster.name]} max={subCluster.flopRateSimd.value * subCluster.numberOfNodes}/></div></td>
<th scope="col"
>Flop Rate (Any) <Icon
name="info-circle"
class="p-1"
style="cursor: help;"
title="Flops[Any] = (Flops[Double] x 2) + Flops[Single]"
/></th
>
<td style="min-width: 100px;"
><div class="col">
<Progress
value={flopRate[subCluster.name]}
max={subCluster.flopRateSimd.value *
subCluster.numberOfNodes}
/>
</div></td
>
<td>
{scaleNumbers(flopRate[subCluster.name],
(subCluster.flopRateSimd.value * subCluster.numberOfNodes),
flopRateUnitPrefix[subCluster.name])
}{flopRateUnitBase[subCluster.name]} [Max]
{scaleNumbers(
flopRate[subCluster.name],
subCluster.flopRateSimd.value *
subCluster.numberOfNodes,
flopRateUnitPrefix[subCluster.name]
)}{flopRateUnitBase[subCluster.name]} [Max]
</td>
</tr>
<tr class="py-2">
<th scope="col">MemBw Rate</th>
<td style="min-width: 100px;"><div class="col"><Progress value={memBwRate[subCluster.name]} max={subCluster.memoryBandwidth.value * subCluster.numberOfNodes}/></div></td>
<td style="min-width: 100px;"
><div class="col">
<Progress
value={memBwRate[subCluster.name]}
max={subCluster.memoryBandwidth
.value *
subCluster.numberOfNodes}
/>
</div></td
>
<td>
{scaleNumbers(memBwRate[subCluster.name],
(subCluster.memoryBandwidth.value * subCluster.numberOfNodes),
memBwRateUnitPrefix[subCluster.name])
}{memBwRateUnitBase[subCluster.name]} [Max]
{scaleNumbers(
memBwRate[subCluster.name],
subCluster.memoryBandwidth.value *
subCluster.numberOfNodes,
memBwRateUnitPrefix[subCluster.name]
)}{memBwRateUnitBase[subCluster.name]} [Max]
</td>
</tr>
</Table>
@ -145,82 +427,196 @@
<div bind:clientWidth={plotWidths[i]}>
{#key $mainQuery.data.nodeMetrics}
<Roofline
width={plotWidths[i] - 10} height={300} colorDots={true} showTime={false} cluster={subCluster}
data={transformPerNodeData($mainQuery.data.nodeMetrics.filter(data => data.subCluster == subCluster.name))} />
allowSizeChange={true}
width={plotWidths[i] - 10}
height={300}
cluster={subCluster}
data={
transformPerNodeDataForRoofline(
$mainQuery.data.nodeMetrics.filter(
(data) => data.subCluster == subCluster.name
)
)
}
/>
{/key}
</div>
</Col>
</Row>
{/each}
<hr style="margin-top: -1em;">
<hr/>
<!-- Usage Stats as Histograms -->
<Row cols={4}>
<Row>
<Col class="p-2">
<div bind:clientWidth={colWidth1}>
<h4 class="text-center">Top Users</h4>
{#key $mainQuery.data}
<Pie
size={colWidth1}
sliceLabel='Jobs'
quantities={$mainQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.count)}
entities={$mainQuery.data.topUsers.sort((a, b) => b.count - a.count).map((tu) => tu.name)}
/>
<h4 class="text-center">
Top Users on {cluster.charAt(0).toUpperCase() +
cluster.slice(1)}
</h4>
{#key $topUserQuery.data}
{#if $topUserQuery.fetching}
<Spinner />
{:else if $topUserQuery.error}
<Card body color="danger"
>{$topUserQuery.error.message}</Card
>
{:else}
<Pie
size={colWidth1}
sliceLabel={topUserSelection.label}
quantities={$topUserQuery.data.topUser.map(
(tu) => tu[topUserSelection.key]
)}
entities={$topUserQuery.data.topUser.map(
(tu) => tu.id
)}
/>
{/if}
{/key}
</div>
</Col>
<Col class="px-4 py-2">
<Table>
<tr class="mb-2"><th>Legend</th><th>User Name</th><th>Number of Nodes</th></tr>
{#each $mainQuery.data.topUsers.sort((a, b) => b.count - a.count) as { name, count }, i}
<tr>
<td><Icon name="circle-fill" style="color: {colors[i]};"/></td>
<th scope="col"><a href="/monitoring/user/{name}?cluster={cluster}&state=running">{name}</a></th>
<td>{count}</td>
</tr>
{/each}
</Table>
{#key $topUserQuery.data}
{#if $topUserQuery.fetching}
<Spinner />
{:else if $topUserQuery.error}
<Card body color="danger"
>{$topUserQuery.error.message}</Card
>
{:else}
<Table>
<tr class="mb-2">
<th>Legend</th>
<th>User Name</th>
<th
>Number of
<select
class="p-0"
bind:value={topUserSelection}
>
{#each topOptions as option}
<option value={option}>
{option.label}
</option>
{/each}
</select>
</th>
</tr>
{#each $topUserQuery.data.topUser as tu, i}
<tr>
<td
><Icon
name="circle-fill"
style="color: {colors[i]};"
/></td
>
<th scope="col"
><a
href="/monitoring/user/{tu.id}?cluster={cluster}&state=running"
>{tu.id}</a
></th
>
<td>{tu[topUserSelection.key]}</td>
</tr>
{/each}
</Table>
{/if}
{/key}
</Col>
<Col class="p-2">
<h4 class="text-center">Top Projects</h4>
{#key $mainQuery.data}
<Pie
size={colWidth1}
sliceLabel='Jobs'
quantities={$mainQuery.data.topProjects.sort((a, b) => b.count - a.count).map((tp) => tp.count)}
entities={$mainQuery.data.topProjects.sort((a, b) => b.count - a.count).map((tp) => tp.name)}
/>
<h4 class="text-center">
Top Projects on {cluster.charAt(0).toUpperCase() +
cluster.slice(1)}
</h4>
{#key $topProjectQuery.data}
{#if $topProjectQuery.fetching}
<Spinner />
{:else if $topProjectQuery.error}
<Card body color="danger"
>{$topProjectQuery.error.message}</Card
>
{:else}
<Pie
size={colWidth1}
sliceLabel={topProjectSelection.label}
quantities={$topProjectQuery.data.topProjects.map(
(tp) => tp[topProjectSelection.key]
)}
entities={$topProjectQuery.data.topProjects.map(
(tp) => tp.id
)}
/>
{/if}
{/key}
</Col>
<Col class="px-4 py-2">
<Table>
<tr class="mb-2"><th>Legend</th><th>Project Code</th><th>Number of Nodes</th></tr>
{#each $mainQuery.data.topProjects.sort((a, b) => b.count - a.count) as { name, count }, i}
<tr>
<td><Icon name="circle-fill" style="color: {colors[i]};"/></td>
<th scope="col"><a href="/monitoring/jobs/?cluster={cluster}&state=running&project={name}&projectMatch=eq">{name}</a></th>
<td>{count}</td>
</tr>
{/each}
</Table>
{#key $topProjectQuery.data}
{#if $topProjectQuery.fetching}
<Spinner />
{:else if $topProjectQuery.error}
<Card body color="danger"
>{$topProjectQuery.error.message}</Card
>
{:else}
<Table>
<tr class="mb-2">
<th>Legend</th>
<th>Project Code</th>
<th
>Number of
<select
class="p-0"
bind:value={topProjectSelection}
>
{#each topOptions as option}
<option value={option}>
{option.label}
</option>
{/each}
</select>
</th>
</tr>
{#each $topProjectQuery.data.topProjects as tp, i}
<tr>
<td
><Icon
name="circle-fill"
style="color: {colors[i]};"
/></td
>
<th scope="col"
><a
href="/monitoring/jobs/?cluster={cluster}&state=running&project={tp.id}&projectMatch=eq"
>{tp.id}</a
></th
>
<td>{tp[topProjectSelection.key]}</td>
</tr>
{/each}
</Table>
{/if}
{/key}
</Col>
</Row>
<hr class="my-2"/>
<Row cols={2}>
<hr class="my-2" />
<Row>
<Col class="p-2">
<div bind:clientWidth={colWidth2}>
{#key $mainQuery.data.stats}
<Histogram
data={convert2uplot($mainQuery.data.stats[0].histDuration)}
data={convert2uplot(
$mainQuery.data.stats[0].histDuration
)}
width={colWidth2 - 25}
title="Duration Distribution"
xlabel="Current Runtimes"
xunit="Hours"
xunit="Hours"
ylabel="Number of Jobs"
yunit="Jobs"/>
yunit="Jobs"
/>
{/key}
</div>
</Col>
@ -231,10 +627,43 @@
width={colWidth2 - 25}
title="Number of Nodes Distribution"
xlabel="Allocated Nodes"
xunit="Nodes"
xunit="Nodes"
ylabel="Number of Jobs"
yunit="Jobs"/>
yunit="Jobs"
/>
{/key}
</Col>
</Row>
{/if}
<Row cols={2}>
<Col class="p-2">
<div bind:clientWidth={colWidth2}>
{#key $mainQuery.data.stats}
<Histogram
data={convert2uplot(
$mainQuery.data.stats[0].histNumCores
)}
width={colWidth2 - 25}
title="Number of Cores Distribution"
xlabel="Allocated Cores"
xunit="Cores"
ylabel="Number of Jobs"
yunit="Jobs"
/>
{/key}
</div>
</Col>
<Col class="p-2">
{#key $mainQuery.data.stats}
<Histogram
data={convert2uplot($mainQuery.data.stats[0].histNumAccs)}
width={colWidth2 - 25}
title="Number of Accelerators Distribution"
xlabel="Allocated Accs"
xunit="Accs"
ylabel="Number of Jobs"
yunit="Jobs"
/>
{/key}
</Col>
</Row>
{/if}

View File

@ -145,7 +145,8 @@
metric={item.data.name}
cluster={clusters.find(c => c.name == cluster)}
subCluster={item.subCluster}
resources={[{hostname: item.host}]}/>
resources={[{hostname: item.host}]}
forNode={true}/>
{:else if item.disabled === true && item.data}
<Card style="margin-left: 2rem;margin-right: 2rem;" body color="info">Metric disabled for subcluster <code>{selectedMetric}:{item.subCluster}</code></Card>
{:else}

View File

@ -166,7 +166,7 @@
}
}
const plotSeries = [{label: 'Runtime', value: (u, ts, sidx, didx) => didx == null ? null : formatTime(ts)}]
const plotSeries = [{label: 'Runtime', value: (u, ts, sidx, didx) => didx == null ? null : formatTime(ts, forNode)}]
const plotData = [new Array(longestSeries)]
if (forNode === true) {
@ -227,7 +227,7 @@
scale: 'x',
space: 35,
incrs: timeIncrs(timestep, maxX, forNode),
values: (_, vals) => vals.map(v => formatTime(v))
values: (_, vals) => vals.map(v => formatTime(v, forNode))
},
{
scale: 'y',
@ -349,19 +349,21 @@
}
</script>
<script context="module">
export function formatTime(t) {
export function formatTime(t, forNode = false) {
if (t !== null) {
if (isNaN(t)) {
return t
} else {
let h = Math.floor(t / 3600)
let m = Math.floor((t % 3600) / 60)
const tAbs = Math.abs(t)
const h = Math.floor(tAbs / 3600)
const m = Math.floor((tAbs % 3600) / 60)
// Re-Add "negativity" to time ticks only as string, so that if-cases work as intended
if (h == 0)
return `${m}m`
return `${forNode && m != 0 ? '-' : ''}${m}m`
else if (m == 0)
return `${h}h`
return `${forNode?'-':''}${h}h`
else
return `${h}:${m}h`
return `${forNode?'-':''}${h}:${m}h`
}
}
}

View File

@ -43,14 +43,14 @@
export let entities
export let displayLegend = false
const data = {
$: data = {
labels: entities,
datasets: [
{
label: sliceLabel,
data: quantities,
fill: 1,
backgroundColor: colors.slice(0, quantities.length),
backgroundColor: colors.slice(0, quantities.length)
}
]
}

View File

@ -22,10 +22,10 @@
LineElement
);
export let size
export let metrics
export let cluster
export let jobMetrics
export let height = 365
const metricConfig = getContext('metrics')
@ -89,13 +89,19 @@
// No custom defined options but keep for clarity
const options = {
maintainAspectRatio: false,
animation: false
animation: false,
scales: { // fix scale
r: {
suggestedMin: 0.0,
suggestedMax: 1.0
}
}
}
</script>
<div class="chart-container">
<Radar {data} {options} width={size} height={size}/>
<Radar {data} {options} {height}/>
</div>
<style>

View File

@ -1,42 +1,54 @@
<div class="cc-plot">
<canvas bind:this={canvasElement} width="{prevWidth}" height="{prevHeight}"></canvas>
</div>
<script>
import uPlot from 'uplot'
import { formatNumber } from '../units.js'
import { onMount, onDestroy } from 'svelte'
import { Card } from 'sveltestrap'
<script context="module">
const axesColor = '#aaaaaa'
const tickFontSize = 10
const labelFontSize = 12
const fontFamily = 'system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"'
const paddingLeft = 40,
paddingRight = 10,
paddingTop = 10,
paddingBottom = 50
export let data = null
export let renderTime = false
export let allowSizeChange = false
export let cluster = null
export let width = 600
export let height = 350
let plotWrapper = null
let uplot = null
let timeoutId = null
const lineWidth = clusterCockpitConfig.plot_general_lineWidth
/* Data Format
* data = [null, [], []] // 0: null-axis required for scatter, 1: Array of XY-Array for Scatter, 2: Optional Time Info
* data[1][0] = [100, 200, 500, ...] // X Axis -> Intensity (Vals up to clusters' flopRateScalar value)
* data[1][1] = [1000, 2000, 1500, ...] // Y Axis -> Performance (Vals up to clusters' flopRateSimd value)
* data[2] = [0.1, 0.15, 0.2, ...] // Color Code -> Time Information (Floats from 0 to 1) (Optional)
*/
// Helpers
function getGradientR(x) {
if (x < 0.5) return 0
if (x > 0.75) return 255
x = (x - 0.5) * 4.0
return Math.floor(x * 255.0)
}
function getGradientG(x) {
if (x > 0.25 && x < 0.75) return 255
if (x < 0.25) x = x * 4.0
else x = 1.0 - (x - 0.75) * 4.0
return Math.floor(x * 255.0)
}
function getGradientB(x) {
if (x < 0.25) return 255
if (x > 0.5) return 0
x = 1.0 - (x - 0.25) * 4.0
return Math.floor(x * 255.0)
}
function getRGB(c) {
return `rgb(${getGradientR(c)}, ${getGradientG(c)}, ${getGradientB(c)})`
}
function nearestThousand (num) {
return Math.ceil(num/1000) * 1000
}
function lineIntersect(x1, y1, x2, y2, x3, y3, x4, y4) {
let l = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
let a = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / l
@ -45,314 +57,197 @@
y: y1 + a * (y2 - y1)
}
}
// End Helpers
function axisStepFactor(i, size) {
if (size && size < 500)
return 10
// Dot Renderers
const drawColorPoints = (u, seriesIdx, idx0, idx1) => {
const size = 5 * devicePixelRatio;
uPlot.orient(u, seriesIdx, (series, dataX, dataY, scaleX, scaleY, valToPosX, valToPosY, xOff, yOff, xDim, yDim, moveTo, lineTo, rect, arc) => {
let d = u.data[seriesIdx];
let deg360 = 2 * Math.PI;
for (let i = 0; i < d[0].length; i++) {
let p = new Path2D();
let xVal = d[0][i];
let yVal = d[1][i];
u.ctx.strokeStyle = getRGB(u.data[2][i])
u.ctx.fillStyle = getRGB(u.data[2][i])
if (xVal >= scaleX.min && xVal <= scaleX.max && yVal >= scaleY.min && yVal <= scaleY.max) {
let cx = valToPosX(xVal, scaleX, xDim, xOff);
let cy = valToPosY(yVal, scaleY, yDim, yOff);
if (i % 3 == 0)
return 2
else if (i % 3 == 1)
return 2.5
else
return 2
}
function render(ctx, data, cluster, width, height, colorDots, showTime, defaultMaxY) {
if (width <= 0)
return
const [minX, maxX, minY, maxY] = [0.01, 1000, 1., cluster?.flopRateSimd?.value || defaultMaxY]
const w = width - paddingLeft - paddingRight
const h = height - paddingTop - paddingBottom
// Helpers:
const [log10minX, log10maxX, log10minY, log10maxY] =
[Math.log10(minX), Math.log10(maxX), Math.log10(minY), Math.log10(maxY)]
/* Value -> Pixel-Coordinate */
const getCanvasX = (x) => {
x = Math.log10(x)
x -= log10minX; x /= (log10maxX - log10minX)
return Math.round((x * w) + paddingLeft)
}
const getCanvasY = (y) => {
y = Math.log10(y)
y -= log10minY
y /= (log10maxY - log10minY)
return Math.round((h - y * h) + paddingTop)
}
// Axes
ctx.fillStyle = 'black'
ctx.strokeStyle = axesColor
ctx.font = `${tickFontSize}px ${fontFamily}`
ctx.beginPath()
for (let x = minX, i = 0; x <= maxX; i++) {
let px = getCanvasX(x)
let text = formatNumber(x)
let textWidth = ctx.measureText(text).width
ctx.fillText(text,
Math.floor(px - (textWidth / 2)),
height - paddingBottom + tickFontSize + 5)
ctx.moveTo(px, paddingTop - 5)
ctx.lineTo(px, height - paddingBottom + 5)
x *= axisStepFactor(i, w)
}
if (data.xLabel) {
ctx.font = `${labelFontSize}px ${fontFamily}`
let textWidth = ctx.measureText(data.xLabel).width
ctx.fillText(data.xLabel, Math.floor((width / 2) - (textWidth / 2)), height - 20)
}
ctx.textAlign = 'center'
ctx.font = `${tickFontSize}px ${fontFamily}`
for (let y = minY, i = 0; y <= maxY; i++) {
let py = getCanvasY(y)
ctx.moveTo(paddingLeft - 5, py)
ctx.lineTo(width - paddingRight + 5, py)
ctx.save()
ctx.translate(paddingLeft - 10, py)
ctx.rotate(-Math.PI / 2)
ctx.fillText(formatNumber(y), 0, 0)
ctx.restore()
y *= axisStepFactor(i)
}
if (data.yLabel) {
ctx.font = `${labelFontSize}px ${fontFamily}`
ctx.save()
ctx.translate(15, Math.floor(height / 2))
ctx.rotate(-Math.PI / 2)
ctx.fillText(data.yLabel, 0, 0)
ctx.restore()
}
ctx.stroke()
// Draw Data
if (data.x && data.y) {
for (let i = 0; i < data.x.length; i++) {
let x = data.x[i], y = data.y[i], c = data.c[i]
if (x == null || y == null || Number.isNaN(x) || Number.isNaN(y))
continue
const s = 3
const px = getCanvasX(x)
const py = getCanvasY(y)
ctx.fillStyle = getRGB(c)
ctx.beginPath()
ctx.arc(px, py, s, 0, Math.PI * 2, false)
ctx.fill()
p.moveTo(cx + size/2, cy);
arc(p, cx, cy, size/2, 0, deg360);
}
u.ctx.fill(p);
}
} else if (data.tiles) {
const rows = data.tiles.length
const cols = data.tiles[0].length
});
return null;
};
const tileWidth = Math.ceil(w / cols)
const tileHeight = Math.ceil(h / rows)
let max = data.tiles.reduce((max, row) =>
Math.max(max, row.reduce((max, val) =>
Math.max(max, val)), 0), 0)
if (max == 0)
max = 1
const tileColor = val => `rgba(255, 0, 0, ${(val / max)})`
for (let i = 0; i < rows; i++) {
for (let j = 0; j < cols; j++) {
let px = paddingLeft + (j / cols) * w
let py = paddingTop + (h - (i / rows) * h) - tileHeight
ctx.fillStyle = tileColor(data.tiles[i][j])
ctx.fillRect(px, py, tileWidth, tileHeight)
const drawPoints = (u, seriesIdx, idx0, idx1) => {
const size = 5 * devicePixelRatio;
uPlot.orient(u, seriesIdx, (series, dataX, dataY, scaleX, scaleY, valToPosX, valToPosY, xOff, yOff, xDim, yDim, moveTo, lineTo, rect, arc) => {
let d = u.data[seriesIdx];
u.ctx.strokeStyle = getRGB(0);
u.ctx.fillStyle = getRGB(0);
let deg360 = 2 * Math.PI;
let p = new Path2D();
for (let i = 0; i < d[0].length; i++) {
let xVal = d[0][i];
let yVal = d[1][i];
if (xVal >= scaleX.min && xVal <= scaleX.max && yVal >= scaleY.min && yVal <= scaleY.max) {
let cx = valToPosX(xVal, scaleX, xDim, xOff);
let cy = valToPosY(yVal, scaleY, yDim, yOff);
p.moveTo(cx + size/2, cy);
arc(p, cx, cy, size/2, 0, deg360);
}
}
}
u.ctx.fill(p);
});
return null;
};
// Draw roofs
ctx.strokeStyle = 'black'
ctx.lineWidth = 2
ctx.beginPath()
if (cluster != null) {
const ycut = 0.01 * cluster.memoryBandwidth.value
const scalarKnee = (cluster.flopRateScalar.value - ycut) / cluster.memoryBandwidth.value
const simdKnee = (cluster.flopRateSimd.value - ycut) / cluster.memoryBandwidth.value
const scalarKneeX = getCanvasX(scalarKnee),
simdKneeX = getCanvasX(simdKnee),
flopRateScalarY = getCanvasY(cluster.flopRateScalar.value),
flopRateSimdY = getCanvasY(cluster.flopRateSimd.value)
// Main Function
function render(plotData) {
if (plotData) {
const opts = {
title: "",
mode: 2,
width: width,
height: height,
legend: {
show: false
},
cursor: { drag: { x: false, y: false } },
axes: [
{
label: 'Intensity [FLOPS/Byte]',
values: (u, vals) => vals.map(v => formatNumber(v))
},
{
label: 'Performace [GFLOPS]',
values: (u, vals) => vals.map(v => formatNumber(v))
}
],
scales: {
x: {
time: false,
range: [0.01, 1000],
distr: 3, // Render as log
log: 10, // log exp
},
y: {
range: [1.0, cluster?.flopRateSimd?.value ? nearestThousand(cluster.flopRateSimd.value) : 10000],
distr: 3, // Render as log
log: 10, // log exp
},
},
series: [
{},
{ paths: renderTime ? drawColorPoints : drawPoints }
],
hooks: {
drawClear: [
u => {
u.series.forEach((s, i) => {
if (i > 0)
s._paths = null;
});
},
],
draw: [
u => { // draw roofs when cluster set
// console.log(u)
if (cluster != null) {
const padding = u._padding // [top, right, bottom, left]
if (scalarKneeX < width - paddingRight) {
ctx.moveTo(scalarKneeX, flopRateScalarY)
ctx.lineTo(width - paddingRight, flopRateScalarY)
}
u.ctx.strokeStyle = 'black'
u.ctx.lineWidth = lineWidth
u.ctx.beginPath()
if (simdKneeX < width - paddingRight) {
ctx.moveTo(simdKneeX, flopRateSimdY)
ctx.lineTo(width - paddingRight, flopRateSimdY)
}
const ycut = 0.01 * cluster.memoryBandwidth.value
const scalarKnee = (cluster.flopRateScalar.value - ycut) / cluster.memoryBandwidth.value
const simdKnee = (cluster.flopRateSimd.value - ycut) / cluster.memoryBandwidth.value
const scalarKneeX = u.valToPos(scalarKnee, 'x', true), // Value, axis, toCanvasPixels
simdKneeX = u.valToPos(simdKnee, 'x', true),
flopRateScalarY = u.valToPos(cluster.flopRateScalar.value, 'y', true),
flopRateSimdY = u.valToPos(cluster.flopRateSimd.value, 'y', true)
let x1 = getCanvasX(0.01),
y1 = getCanvasY(ycut),
x2 = getCanvasX(simdKnee),
y2 = flopRateSimdY
// Debug get zoomLevel from browser
// console.log("Zoom", Math.round(window.devicePixelRatio * 100))
let xAxisIntersect = lineIntersect(
x1, y1, x2, y2,
0, height - paddingBottom, width, height - paddingBottom)
if (scalarKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Top horizontal roofline
u.ctx.moveTo(scalarKneeX, flopRateScalarY)
u.ctx.lineTo((width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio), flopRateScalarY)
}
if (xAxisIntersect.x > x1) {
x1 = xAxisIntersect.x
y1 = xAxisIntersect.y
}
if (simdKneeX < (width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio)) { // Lower horitontal roofline
u.ctx.moveTo(simdKneeX, flopRateSimdY)
u.ctx.lineTo((width * window.devicePixelRatio) - (padding[1] * window.devicePixelRatio), flopRateSimdY)
}
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
}
ctx.stroke()
let x1 = u.valToPos(0.01, 'x', true),
y1 = u.valToPos(ycut, 'y', true)
let x2 = u.valToPos(simdKnee, 'x', true),
y2 = flopRateSimdY
if (colorDots && showTime && data.x && data.y) {
// The Color Scale For Time Information
ctx.fillStyle = 'black'
ctx.fillText('Time:', 17, height - 5)
const start = paddingLeft + 5
for (let x = start; x < width - paddingRight; x += 15) {
let c = (x - start) / (width - start - paddingRight)
ctx.fillStyle = getRGB(c)
ctx.beginPath()
ctx.arc(x, height - 10, 5, 0, Math.PI * 2, false)
ctx.fill()
}
}
}
let xAxisIntersect = lineIntersect(
x1, y1, x2, y2,
u.valToPos(0.01, 'x', true), u.valToPos(1.0, 'y', true), // X-Axis Start Coords
u.valToPos(1000, 'x', true), u.valToPos(1.0, 'y', true) // X-Axis End Coords
)
function transformData(flopsAny, memBw, colorDots) { // Uses Metric Object
const nodes = flopsAny.series.length
const timesteps = flopsAny.series[0].data.length
if (xAxisIntersect.x > x1) {
x1 = xAxisIntersect.x
y1 = xAxisIntersect.y
}
/* c will contain values from 0 to 1 representing the time */
const x = [], y = [], c = []
// Diagonal
u.ctx.moveTo(x1, y1)
u.ctx.lineTo(x2, y2)
if (flopsAny && memBw) {
for (let i = 0; i < nodes; i++) {
const flopsData = flopsAny.series[i].data
const memBwData = memBw.series[i].data
for (let j = 0; j < timesteps; j++) {
const f = flopsData[j], m = memBwData[j]
const intensity = f / m
if (Number.isNaN(intensity) || !Number.isFinite(intensity))
continue
x.push(intensity)
y.push(f)
c.push(colorDots ? j / timesteps : 0)
}
}
u.ctx.stroke()
// Reset grid lineWidth
u.ctx.lineWidth = 0.15
}
}
]
},
};
uplot = new uPlot(opts, plotData, plotWrapper);
} else {
console.warn("transformData: metrics for 'mem_bw' and/or 'flops_any' missing!")
}
return {
x, y, c,
xLabel: 'Intensity [FLOPS/byte]',
yLabel: 'Performance [GFLOPS]'
console.log('No data for roofline!')
}
}
// Return something to be plotted. The argument shall be the result of the
// `nodeMetrics` GraphQL query.
export function transformPerNodeData(nodes) {
const x = [], y = [], c = []
for (let node of nodes) {
let flopsAny = node.metrics.find(m => m.name == 'flops_any' && m.scope == 'node')?.metric
let memBw = node.metrics.find(m => m.name == 'mem_bw' && m.scope == 'node')?.metric
if (!flopsAny || !memBw) {
console.warn("transformPerNodeData: metrics for 'mem_bw' and/or 'flops_any' missing!")
continue
}
let flopsData = flopsAny.series[0].data, memBwData = memBw.series[0].data
const f = flopsData[flopsData.length - 1], m = memBwData[flopsData.length - 1]
const intensity = f / m
if (Number.isNaN(intensity) || !Number.isFinite(intensity))
continue
x.push(intensity)
y.push(f)
c.push(0)
}
return {
x, y, c,
xLabel: 'Intensity [FLOPS/byte]',
yLabel: 'Performance [GFLOPS]'
}
}
</script>
<script>
import { onMount, tick } from 'svelte'
import { formatNumber } from '../units.js'
export let flopsAny = null
export let memBw = null
export let cluster = null
export let maxY = null
export let width = 500
export let height = 300
export let tiles = null
export let colorDots = true
export let showTime = true
export let data = null
console.assert(data || tiles || (flopsAny && memBw), "you must provide flopsAny and memBw or tiles!")
let ctx, canvasElement, prevWidth = width, prevHeight = height
data = data != null ? data : (flopsAny && memBw
? transformData(flopsAny.metric, memBw.metric, colorDots) // Use Metric Object from Parent
: {
tiles: tiles,
xLabel: 'Intensity [FLOPS/byte]',
yLabel: 'Performance [GFLOPS]'
})
// Svelte and Sizechange
onMount(() => {
ctx = canvasElement.getContext('2d')
if (prevWidth != width || prevHeight != height) {
sizeChanged()
return
}
canvasElement.width = width
canvasElement.height = height
render(ctx, data, cluster, width, height, colorDots, showTime, maxY)
render(data)
})
let timeoutId = null
function sizeChanged() {
if (!ctx)
return
onDestroy(() => {
if (uplot)
uplot.destroy()
if (timeoutId != null)
clearTimeout(timeoutId)
})
function sizeChanged() {
if (timeoutId != null)
clearTimeout(timeoutId)
prevWidth = width
prevHeight = height
timeoutId = setTimeout(() => {
if (!canvasElement)
return
timeoutId = null
canvasElement.width = width
canvasElement.height = height
render(ctx, data, cluster, width, height, colorDots, showTime, maxY)
}, 250)
if (uplot)
uplot.destroy()
render(data)
}, 200)
}
$: sizeChanged(width, height)
$: if (allowSizeChange) sizeChanged(width, height)
</script>
{#if data != null}
<div bind:this={plotWrapper}/>
{:else}
<Card class="mx-4" body color="warning">Cannot render roofline: No data!</Card>
{/if}

View File

@ -0,0 +1,234 @@
<div class="cc-plot">
<canvas bind:this={canvasElement} width="{prevWidth}" height="{prevHeight}"></canvas>
</div>
<script context="module">
const axesColor = '#aaaaaa'
const tickFontSize = 10
const labelFontSize = 12
const fontFamily = 'system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"'
const paddingLeft = 40,
paddingRight = 10,
paddingTop = 10,
paddingBottom = 50
function lineIntersect(x1, y1, x2, y2, x3, y3, x4, y4) {
let l = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
let a = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / l
return {
x: x1 + a * (x2 - x1),
y: y1 + a * (y2 - y1)
}
}
function axisStepFactor(i, size) {
if (size && size < 500)
return 10
if (i % 3 == 0)
return 2
else if (i % 3 == 1)
return 2.5
else
return 2
}
function render(ctx, data, cluster, width, height, defaultMaxY) {
if (width <= 0)
return
const [minX, maxX, minY, maxY] = [0.01, 1000, 1., cluster?.flopRateSimd?.value || defaultMaxY]
const w = width - paddingLeft - paddingRight
const h = height - paddingTop - paddingBottom
// Helpers:
const [log10minX, log10maxX, log10minY, log10maxY] =
[Math.log10(minX), Math.log10(maxX), Math.log10(minY), Math.log10(maxY)]
/* Value -> Pixel-Coordinate */
const getCanvasX = (x) => {
x = Math.log10(x)
x -= log10minX; x /= (log10maxX - log10minX)
return Math.round((x * w) + paddingLeft)
}
const getCanvasY = (y) => {
y = Math.log10(y)
y -= log10minY
y /= (log10maxY - log10minY)
return Math.round((h - y * h) + paddingTop)
}
// Axes
ctx.fillStyle = 'black'
ctx.strokeStyle = axesColor
ctx.font = `${tickFontSize}px ${fontFamily}`
ctx.beginPath()
for (let x = minX, i = 0; x <= maxX; i++) {
let px = getCanvasX(x)
let text = formatNumber(x)
let textWidth = ctx.measureText(text).width
ctx.fillText(text,
Math.floor(px - (textWidth / 2)),
height - paddingBottom + tickFontSize + 5)
ctx.moveTo(px, paddingTop - 5)
ctx.lineTo(px, height - paddingBottom + 5)
x *= axisStepFactor(i, w)
}
if (data.xLabel) {
ctx.font = `${labelFontSize}px ${fontFamily}`
let textWidth = ctx.measureText(data.xLabel).width
ctx.fillText(data.xLabel, Math.floor((width / 2) - (textWidth / 2)), height - 20)
}
ctx.textAlign = 'center'
ctx.font = `${tickFontSize}px ${fontFamily}`
for (let y = minY, i = 0; y <= maxY; i++) {
let py = getCanvasY(y)
ctx.moveTo(paddingLeft - 5, py)
ctx.lineTo(width - paddingRight + 5, py)
ctx.save()
ctx.translate(paddingLeft - 10, py)
ctx.rotate(-Math.PI / 2)
ctx.fillText(formatNumber(y), 0, 0)
ctx.restore()
y *= axisStepFactor(i)
}
if (data.yLabel) {
ctx.font = `${labelFontSize}px ${fontFamily}`
ctx.save()
ctx.translate(15, Math.floor(height / 2))
ctx.rotate(-Math.PI / 2)
ctx.fillText(data.yLabel, 0, 0)
ctx.restore()
}
ctx.stroke()
// Draw Data
if (data.tiles) {
const rows = data.tiles.length
const cols = data.tiles[0].length
const tileWidth = Math.ceil(w / cols)
const tileHeight = Math.ceil(h / rows)
let max = data.tiles.reduce((max, row) =>
Math.max(max, row.reduce((max, val) =>
Math.max(max, val)), 0), 0)
if (max == 0)
max = 1
const tileColor = val => `rgba(255, 0, 0, ${(val / max)})`
for (let i = 0; i < rows; i++) {
for (let j = 0; j < cols; j++) {
let px = paddingLeft + (j / cols) * w
let py = paddingTop + (h - (i / rows) * h) - tileHeight
ctx.fillStyle = tileColor(data.tiles[i][j])
ctx.fillRect(px, py, tileWidth, tileHeight)
}
}
}
// Draw roofs
ctx.strokeStyle = 'black'
ctx.lineWidth = 2
ctx.beginPath()
if (cluster != null) {
const ycut = 0.01 * cluster.memoryBandwidth.value
const scalarKnee = (cluster.flopRateScalar.value - ycut) / cluster.memoryBandwidth.value
const simdKnee = (cluster.flopRateSimd.value - ycut) / cluster.memoryBandwidth.value
const scalarKneeX = getCanvasX(scalarKnee),
simdKneeX = getCanvasX(simdKnee),
flopRateScalarY = getCanvasY(cluster.flopRateScalar.value),
flopRateSimdY = getCanvasY(cluster.flopRateSimd.value)
if (scalarKneeX < width - paddingRight) {
ctx.moveTo(scalarKneeX, flopRateScalarY)
ctx.lineTo(width - paddingRight, flopRateScalarY)
}
if (simdKneeX < width - paddingRight) {
ctx.moveTo(simdKneeX, flopRateSimdY)
ctx.lineTo(width - paddingRight, flopRateSimdY)
}
let x1 = getCanvasX(0.01),
y1 = getCanvasY(ycut),
x2 = getCanvasX(simdKnee),
y2 = flopRateSimdY
let xAxisIntersect = lineIntersect(
x1, y1, x2, y2,
0, height - paddingBottom, width, height - paddingBottom)
if (xAxisIntersect.x > x1) {
x1 = xAxisIntersect.x
y1 = xAxisIntersect.y
}
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
}
ctx.stroke()
}
</script>
<script>
import { onMount } from 'svelte'
import { formatNumber } from '../units.js'
export let cluster = null
export let tiles = null
export let maxY = null
export let width = 500
export let height = 300
console.assert(tiles, "you must provide tiles!")
let ctx, canvasElement, prevWidth = width, prevHeight = height
const data = {
tiles: tiles,
xLabel: 'Intensity [FLOPS/byte]',
yLabel: 'Performance [GFLOPS]'
}
onMount(() => {
ctx = canvasElement.getContext('2d')
if (prevWidth != width || prevHeight != height) {
sizeChanged()
return
}
canvasElement.width = width
canvasElement.height = height
render(ctx, data, cluster, width, height, maxY)
})
let timeoutId = null
function sizeChanged() {
if (!ctx)
return
if (timeoutId != null)
clearTimeout(timeoutId)
prevWidth = width
prevHeight = height
timeoutId = setTimeout(() => {
if (!canvasElement)
return
timeoutId = null
canvasElement.width = width
canvasElement.height = height
render(ctx, data, cluster, width, height, maxY)
}, 250)
}
$: sizeChanged(width, height)
</script>

View File

@ -6,8 +6,8 @@ const power = [1, 1e3, 1e6, 1e9, 1e12, 1e15, 1e18, 1e21]
const prefix = ['', 'K', 'M', 'G', 'T', 'P', 'E']
export function formatNumber(x) {
if ( isNaN(x) ) {
return x // Return if String , used in Histograms
if ( isNaN(x) || x == null) {
return x // Return if String or Null
} else {
for (let i = 0; i < prefix.length; i++)
if (power[i] <= x && x < power[i+1])

View File

@ -6,7 +6,7 @@ import {
} from "@urql/svelte";
import { setContext, getContext, hasContext, onDestroy, tick } from "svelte";
import { readable } from "svelte/store";
import { formatNumber } from './units.js'
// import { formatNumber } from './units.js'
/*
* Call this function only at component initialization time!
@ -325,9 +325,12 @@ export function convert2uplot(canvasData) {
return uplotData
}
export function binsFromFootprint(weights, values, numBins) {
let min = 0, max = 0
export function binsFromFootprint(weights, scope, values, numBins) {
let min = 0, max = 0 //, median = 0
if (values.length != 0) {
// Extreme, wrong peak vlaues: Filter here or backend?
// median = median(values)
for (let x of values) {
min = Math.min(min, x)
max = Math.max(max, x)
@ -338,31 +341,100 @@ export function binsFromFootprint(weights, values, numBins) {
if (numBins == null || numBins < 3)
numBins = 3
const bins = new Array(numBins).fill(0)
for (let i = 0; i < values.length; i++)
bins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += weights ? weights[i] : 1
let scopeWeights
switch (scope) {
case 'core':
scopeWeights = weights.coreHours
break
case 'accelerator':
scopeWeights = weights.accHours
break
default: // every other scope: use 'node'
scopeWeights = weights.nodeHours
}
// return {
// label: idx => {
// let start = min + (idx / numBins) * (max - min)
// let stop = min + ((idx + 1) / numBins) * (max - min)
// return `${formatNumber(start)} - ${formatNumber(stop)}`
// },
// bins: bins.map((count, idx) => ({ value: idx, count: count })),
// min: min,
// max: max
// }
const rawBins = new Array(numBins).fill(0)
for (let i = 0; i < values.length; i++)
rawBins[Math.floor(((values[i] - min) / (max - min)) * numBins)] += scopeWeights ? scopeWeights[i] : 1
const bins = rawBins.map((count, idx) => ({
value: Math.floor(min + ((idx + 1) / numBins) * (max - min)),
count: count
}))
return {
bins: bins.map((count, idx) => ({
value: idx => { // Get rounded down next integer to bins' Start-Stop Mean Value
let start = min + (idx / numBins) * (max - min)
let stop = min + ((idx + 1) / numBins) * (max - min)
return `${formatNumber(Math.floor((start+stop)/2))}`
},
count: count
})),
min: min,
max: max
bins: bins
}
}
export function transformDataForRoofline(flopsAny, memBw) { // Uses Metric Objects: {series:[{},{},...], timestep:60, name:$NAME}
const nodes = flopsAny.series.length
const timesteps = flopsAny.series[0].data.length
/* c will contain values from 0 to 1 representing the time */
let data = null
const x = [], y = [], c = []
if (flopsAny && memBw) {
for (let i = 0; i < nodes; i++) {
const flopsData = flopsAny.series[i].data
const memBwData = memBw.series[i].data
for (let j = 0; j < timesteps; j++) {
const f = flopsData[j], m = memBwData[j]
const intensity = f / m
if (Number.isNaN(intensity) || !Number.isFinite(intensity))
continue
x.push(intensity)
y.push(f)
c.push(j / timesteps)
}
}
} else {
console.warn("transformData: metrics for 'mem_bw' and/or 'flops_any' missing!")
}
if (x.length > 0 && y.length > 0 && c.length > 0) {
data = [null, [x, y], c] // for dataformat see roofline.svelte
}
return data
}
// Return something to be plotted. The argument shall be the result of the
// `nodeMetrics` GraphQL query.
export function transformPerNodeDataForRoofline(nodes) {
let data = null
const x = [], y = []
for (let node of nodes) {
let flopsAny = node.metrics.find(m => m.name == 'flops_any' && m.scope == 'node')?.metric
let memBw = node.metrics.find(m => m.name == 'mem_bw' && m.scope == 'node')?.metric
if (!flopsAny || !memBw) {
console.warn("transformPerNodeData: metrics for 'mem_bw' and/or 'flops_any' missing!")
continue
}
let flopsData = flopsAny.series[0].data, memBwData = memBw.series[0].data
const f = flopsData[flopsData.length - 1], m = memBwData[flopsData.length - 1]
const intensity = f / m
if (Number.isNaN(intensity) || !Number.isFinite(intensity))
continue
x.push(intensity)
y.push(f)
}
if (x.length > 0 && y.length > 0) {
data = [null, [x, y], []] // for dataformat see roofline.svelte
}
return data
}
// https://stackoverflow.com/questions/45309447/calculating-median-javascript
// function median(numbers) {
// const sorted = Array.from(numbers).sort((a, b) => a - b);
// const middle = Math.floor(sorted.length / 2);
// if (sorted.length % 2 === 0) {
// return (sorted[middle - 1] + sorted[middle]) / 2;
// }
// return sorted[middle];
// }